From 18c6ec2c8a2432aff3538b437f42c4c82593d939 Mon Sep 17 00:00:00 2001 From: hangq Date: Thu, 10 Sep 2020 23:30:12 +0800 Subject: [PATCH] optimize structure and interface --- mindspore/lite/CMakeLists.txt | 9 +- mindspore/lite/include/context.h | 26 +-- mindspore/lite/include/errorcode.h | 2 +- mindspore/lite/include/lite_session.h | 8 +- mindspore/lite/include/ms_tensor.h | 32 +-- mindspore/lite/include/train_session.h | 14 +- mindspore/lite/internal/CMakeLists.txt | 31 +++ mindspore/lite/internal/include/context.h | 40 ++++ mindspore/lite/internal/include/errorcode.h | 55 +++++ .../lite/internal/include/lite_session.h | 90 ++++++++ .../include/lite_utils.h} | 23 +- mindspore/lite/internal/include/model.h | 59 +++++ mindspore/lite/internal/include/ms_tensor.h | 142 ++++++++++++ mindspore/lite/internal/src/lite_session.cc | 68 ++++++ mindspore/lite/internal/src/ms_tensor.cc | 194 ++++++++++++++++ .../app/src/main/native/common/jni_utils.h | 1 - .../java/app/src/main/native/common/ms_log.h | 1 - .../app/src/main/native/runtime/ms_config.cpp | 6 +- .../lite/minddata/example/jni-example.cc | 95 ++++---- .../lite/minddata/example/x86-example.cc | 38 ++-- mindspore/lite/nnacl/fp16/arithmetic_fp16.h | 2 +- mindspore/lite/nnacl/fp16/reduce_fp16.h | 2 +- mindspore/lite/nnacl/fp32/reduce.h | 1 - mindspore/lite/nnacl/fp32/space_to_batch.h | 3 +- mindspore/lite/nnacl/fp32_grad/batch_norm.h | 8 +- mindspore/lite/nnacl/int8/slice_int8.h | 1 - mindspore/lite/nnacl/l2_norm.h | 3 +- mindspore/lite/nnacl/winograd_transform.h | 6 +- mindspore/lite/src/CMakeLists.txt | 14 +- mindspore/lite/src/common/common.h | 5 +- mindspore/lite/src/common/file_utils.cc | 4 +- mindspore/lite/src/common/file_utils.h | 7 +- mindspore/lite/src/common/file_utils_ext.cc | 2 +- mindspore/lite/src/common/file_utils_ext.h | 1 - .../lite/src/common/graph_utils_extends.cc | 3 +- mindspore/lite/src/common/log_adapter.cc | 52 +++-- mindspore/lite/src/common/ms_tensor_utils.cc | 41 ---- mindspore/lite/src/common/op_utils.h | 1 - mindspore/lite/src/common/utils.cc | 3 +- mindspore/lite/src/common/utils.h | 3 +- mindspore/lite/src/context.cc | 31 --- mindspore/lite/src/executor.cc | 34 +-- mindspore/lite/src/executor.h | 12 +- mindspore/lite/src/ir/meta_tensor_extends.cc | 28 --- mindspore/lite/src/kernel_registry.cc | 7 +- mindspore/lite/src/kernel_registry.h | 5 +- mindspore/lite/src/lite_kernel.cc | 18 +- mindspore/lite/src/lite_kernel.h | 31 ++- mindspore/lite/src/lite_session.cc | 98 +++----- mindspore/lite/src/lite_session.h | 11 +- mindspore/lite/src/model.cc | 4 +- mindspore/lite/src/ops/addn.cc | 2 +- mindspore/lite/src/ops/addn.h | 2 +- mindspore/lite/src/ops/apply_momentum.cc | 3 +- mindspore/lite/src/ops/apply_momentum.h | 2 +- mindspore/lite/src/ops/argmax.cc | 2 +- mindspore/lite/src/ops/argmax.h | 2 +- mindspore/lite/src/ops/argmin.cc | 2 +- mindspore/lite/src/ops/argmin.h | 2 +- mindspore/lite/src/ops/arithmetic.cc | 4 +- mindspore/lite/src/ops/arithmetic.h | 2 +- mindspore/lite/src/ops/arithmetic_grad.cc | 5 +- mindspore/lite/src/ops/arithmetic_grad.h | 2 +- mindspore/lite/src/ops/arithmetic_self.cc | 2 +- mindspore/lite/src/ops/arithmetic_self.h | 2 +- mindspore/lite/src/ops/batch_to_space.cc | 6 +- mindspore/lite/src/ops/batch_to_space.h | 2 +- mindspore/lite/src/ops/bias_grad.cc | 3 +- mindspore/lite/src/ops/bias_grad.h | 2 +- mindspore/lite/src/ops/broadcast_to.cc | 2 +- mindspore/lite/src/ops/broadcast_to.h | 2 +- mindspore/lite/src/ops/cast.cc | 2 +- mindspore/lite/src/ops/cast.h | 2 +- mindspore/lite/src/ops/concat.cc | 4 +- mindspore/lite/src/ops/concat.h | 2 +- mindspore/lite/src/ops/constant_of_shape.cc | 6 +- mindspore/lite/src/ops/constant_of_shape.h | 2 +- mindspore/lite/src/ops/conv2d.cc | 14 +- mindspore/lite/src/ops/conv2d.h | 2 +- mindspore/lite/src/ops/conv2d_grad_filter.cc | 4 +- mindspore/lite/src/ops/conv2d_grad_filter.h | 2 +- mindspore/lite/src/ops/conv2d_grad_input.cc | 4 +- mindspore/lite/src/ops/conv2d_grad_input.h | 2 +- mindspore/lite/src/ops/crop.cc | 2 +- mindspore/lite/src/ops/crop.h | 2 +- mindspore/lite/src/ops/deconv2d.cc | 2 +- mindspore/lite/src/ops/deconv2d.h | 2 +- mindspore/lite/src/ops/dedepthwise_conv2d.cc | 3 +- mindspore/lite/src/ops/dedepthwise_conv2d.h | 2 +- mindspore/lite/src/ops/depth_to_space.cc | 4 +- mindspore/lite/src/ops/depth_to_space.h | 2 +- mindspore/lite/src/ops/depthwise_conv2d.cc | 9 +- mindspore/lite/src/ops/depthwise_conv2d.h | 2 +- .../lite/src/ops/detection_post_process.cc | 3 +- .../lite/src/ops/detection_post_process.h | 2 +- mindspore/lite/src/ops/embedding_lookup.cc | 2 +- mindspore/lite/src/ops/embedding_lookup.h | 2 +- mindspore/lite/src/ops/expand_dims.cc | 2 +- mindspore/lite/src/ops/expand_dims.h | 2 +- mindspore/lite/src/ops/fill.cc | 2 +- mindspore/lite/src/ops/fill.h | 2 +- mindspore/lite/src/ops/flatten.cc | 2 +- mindspore/lite/src/ops/flatten.h | 2 +- mindspore/lite/src/ops/full_connection.cc | 3 +- mindspore/lite/src/ops/full_connection.h | 2 +- mindspore/lite/src/ops/gather.cc | 4 +- mindspore/lite/src/ops/gather.h | 2 +- mindspore/lite/src/ops/gather_nd.cc | 2 +- mindspore/lite/src/ops/gather_nd.h | 2 +- mindspore/lite/src/ops/lstm.cc | 2 +- mindspore/lite/src/ops/lstm.h | 2 +- mindspore/lite/src/ops/matmul.cc | 2 +- mindspore/lite/src/ops/matmul.h | 2 +- mindspore/lite/src/ops/mean.cc | 2 +- mindspore/lite/src/ops/mean.h | 2 +- mindspore/lite/src/ops/nchw2nhwc.cc | 4 +- mindspore/lite/src/ops/nchw2nhwc.h | 2 +- mindspore/lite/src/ops/nhwc2nchw.cc | 4 +- mindspore/lite/src/ops/nhwc2nchw.h | 2 +- mindspore/lite/src/ops/one_hot.cc | 4 +- mindspore/lite/src/ops/one_hot.h | 2 +- mindspore/lite/src/ops/pad.cc | 2 +- mindspore/lite/src/ops/pad.h | 2 +- mindspore/lite/src/ops/pooling.cc | 10 +- mindspore/lite/src/ops/pooling.h | 2 +- mindspore/lite/src/ops/pooling_grad.cc | 2 +- mindspore/lite/src/ops/pooling_grad.h | 2 +- mindspore/lite/src/ops/power.cc | 4 +- mindspore/lite/src/ops/power.h | 2 +- mindspore/lite/src/ops/primitive_c.cc | 54 ++--- mindspore/lite/src/ops/primitive_c.h | 19 +- mindspore/lite/src/ops/prior_box.cc | 2 +- mindspore/lite/src/ops/prior_box.h | 2 +- mindspore/lite/src/ops/quant_dtype_cast.cc | 2 +- mindspore/lite/src/ops/quant_dtype_cast.h | 2 +- mindspore/lite/src/ops/range.cc | 2 +- mindspore/lite/src/ops/range.h | 2 +- mindspore/lite/src/ops/rank.cc | 2 +- mindspore/lite/src/ops/rank.h | 2 +- mindspore/lite/src/ops/reduce.cc | 2 +- mindspore/lite/src/ops/reduce.h | 2 +- mindspore/lite/src/ops/reshape.cc | 20 +- mindspore/lite/src/ops/reshape.h | 4 +- mindspore/lite/src/ops/resize.cc | 2 +- mindspore/lite/src/ops/resize.h | 2 +- mindspore/lite/src/ops/return.cc | 2 +- mindspore/lite/src/ops/return.h | 2 +- mindspore/lite/src/ops/reverse_sequence.cc | 2 +- mindspore/lite/src/ops/reverse_sequence.h | 2 +- mindspore/lite/src/ops/roi_pooling.cc | 2 +- mindspore/lite/src/ops/roi_pooling.h | 2 +- mindspore/lite/src/ops/scatter_nd.cc | 6 +- mindspore/lite/src/ops/scatter_nd.h | 2 +- mindspore/lite/src/ops/shape.cc | 12 +- mindspore/lite/src/ops/shape.h | 2 +- mindspore/lite/src/ops/slice.cc | 4 +- mindspore/lite/src/ops/slice.h | 2 +- mindspore/lite/src/ops/softmax.cc | 2 +- mindspore/lite/src/ops/softmax.h | 2 +- .../lite/src/ops/softmax_cross_entropy.cc | 2 +- .../lite/src/ops/softmax_cross_entropy.h | 2 +- mindspore/lite/src/ops/space_to_batch.cc | 4 +- mindspore/lite/src/ops/space_to_batch.h | 2 +- mindspore/lite/src/ops/space_to_batch_nd.cc | 5 +- mindspore/lite/src/ops/space_to_batch_nd.h | 2 +- mindspore/lite/src/ops/space_to_depth.cc | 4 +- mindspore/lite/src/ops/space_to_depth.h | 2 +- mindspore/lite/src/ops/split.cc | 2 +- mindspore/lite/src/ops/split.h | 2 +- mindspore/lite/src/ops/squeeze.cc | 2 +- mindspore/lite/src/ops/squeeze.h | 2 +- mindspore/lite/src/ops/stack.cc | 2 +- mindspore/lite/src/ops/stack.h | 2 +- mindspore/lite/src/ops/strided_slice.cc | 2 +- mindspore/lite/src/ops/strided_slice.h | 2 +- mindspore/lite/src/ops/tile.cc | 2 +- mindspore/lite/src/ops/tile.h | 2 +- mindspore/lite/src/ops/topk.cc | 2 +- mindspore/lite/src/ops/topk.h | 2 +- mindspore/lite/src/ops/transpose.cc | 2 +- mindspore/lite/src/ops/transpose.h | 2 +- mindspore/lite/src/ops/unique.cc | 2 +- mindspore/lite/src/ops/unique.h | 2 +- mindspore/lite/src/ops/unsqueeze.cc | 4 +- mindspore/lite/src/ops/unsqueeze.h | 2 +- mindspore/lite/src/ops/unstack.cc | 2 +- mindspore/lite/src/ops/unstack.h | 2 +- mindspore/lite/src/ops/where.cc | 2 +- mindspore/lite/src/ops/where.h | 2 +- mindspore/lite/src/ops/zeros_like.cc | 2 +- mindspore/lite/src/ops/zeros_like.h | 2 +- mindspore/lite/src/param_value_lite.h | 5 +- mindspore/lite/src/populate_parameter.cc | 7 +- mindspore/lite/src/runtime/allocator.cc | 1 - mindspore/lite/src/runtime/allocator.h | 1 - .../kernel/arm/base/arg_min_max_base.cc | 23 +- .../kernel/arm/base/arg_min_max_base.h | 4 +- .../kernel/arm/base/batch_to_space_base.cc | 10 +- .../kernel/arm/base/batch_to_space_base.h | 4 +- .../runtime/kernel/arm/base/concat_base.cc | 21 +- .../src/runtime/kernel/arm/base/concat_base.h | 4 +- .../kernel/arm/base/convolution_base.cc | 20 +- .../kernel/arm/base/convolution_base.h | 8 +- .../src/runtime/kernel/arm/base/crop_base.cc | 21 +- .../src/runtime/kernel/arm/base/crop_base.h | 4 +- .../kernel/arm/base/depth_to_space_base.cc | 10 +- .../kernel/arm/base/depth_to_space_base.h | 4 +- .../kernel/arm/base/fullconnection_base.cc | 29 ++- .../kernel/arm/base/fullconnection_base.h | 4 +- .../kernel/arm/base/layout_transform.cc | 16 +- .../kernel/arm/base/layout_transform.h | 1 + .../kernel/arm/base/leaky_relu_base.cc | 9 +- .../runtime/kernel/arm/base/leaky_relu_base.h | 6 +- .../runtime/kernel/arm/base/matmul_base.cc | 28 ++- .../src/runtime/kernel/arm/base/matmul_base.h | 4 +- .../lite/src/runtime/kernel/arm/base/pad.cc | 14 +- .../runtime/kernel/arm/base/pooling_base.cc | 14 +- .../runtime/kernel/arm/base/pooling_base.h | 4 +- .../src/runtime/kernel/arm/base/power_base.cc | 14 +- .../src/runtime/kernel/arm/base/power_base.h | 4 +- .../src/runtime/kernel/arm/base/prior_box.cc | 13 +- .../src/runtime/kernel/arm/base/prior_box.h | 4 +- .../kernel/arm/base/quant_dtype_cast.cc | 14 +- .../kernel/arm/base/quant_dtype_cast.h | 4 +- .../runtime/kernel/arm/base/reduce_base.cc | 23 +- .../src/runtime/kernel/arm/base/reduce_base.h | 4 +- .../runtime/kernel/arm/base/reshape_base.cc | 21 +- .../runtime/kernel/arm/base/reshape_base.h | 4 +- .../runtime/kernel/arm/base/resize_base.cc | 14 +- .../src/runtime/kernel/arm/base/resize_base.h | 4 +- .../src/runtime/kernel/arm/base/slice_base.cc | 14 +- .../src/runtime/kernel/arm/base/slice_base.h | 4 +- .../runtime/kernel/arm/base/softmax_base.cc | 14 +- .../runtime/kernel/arm/base/softmax_base.h | 4 +- .../src/runtime/kernel/arm/base/split_base.cc | 21 +- .../src/runtime/kernel/arm/base/split_base.h | 4 +- .../runtime/kernel/arm/base/squeeze_base.cc | 7 +- .../runtime/kernel/arm/base/squeeze_base.h | 4 +- .../runtime/kernel/arm/base/strided_slice.cc | 10 +- .../runtime/kernel/arm/base/strided_slice.h | 4 +- .../kernel/arm/fp16/activation_fp16.cc | 11 +- .../runtime/kernel/arm/fp16/activation_fp16.h | 4 +- .../kernel/arm/fp16/arithmetic_fp16.cc | 12 +- .../runtime/kernel/arm/fp16/arithmetic_fp16.h | 4 +- .../runtime/kernel/arm/fp16/batchnorm_fp16.cc | 17 +- .../runtime/kernel/arm/fp16/batchnorm_fp16.h | 4 +- .../src/runtime/kernel/arm/fp16/cast_fp16.cc | 13 +- .../src/runtime/kernel/arm/fp16/cast_fp16.h | 4 +- .../runtime/kernel/arm/fp16/common_fp16.cc | 10 +- .../src/runtime/kernel/arm/fp16/common_fp16.h | 4 +- .../runtime/kernel/arm/fp16/concat_fp16.cc | 15 +- .../src/runtime/kernel/arm/fp16/concat_fp16.h | 4 +- .../kernel/arm/fp16/convolution_1x1_fp16.cc | 6 +- .../kernel/arm/fp16/convolution_1x1_fp16.h | 4 +- .../kernel/arm/fp16/convolution_3x3_fp16.cc | 4 +- .../kernel/arm/fp16/convolution_3x3_fp16.h | 4 +- .../kernel/arm/fp16/convolution_base_fp16.cc | 6 +- .../kernel/arm/fp16/convolution_base_fp16.h | 4 +- .../arm/fp16/convolution_depthwise_fp16.cc | 9 +- .../arm/fp16/convolution_depthwise_fp16.h | 4 +- .../convolution_depthwise_slidewindow_fp16.cc | 4 +- .../convolution_depthwise_slidewindow_fp16.h | 4 +- .../kernel/arm/fp16/convolution_fp16.cc | 11 +- .../kernel/arm/fp16/convolution_fp16.h | 4 +- .../kernel/arm/fp16/convolution_sw_fp16.cc | 4 +- .../kernel/arm/fp16/convolution_sw_fp16.h | 4 +- .../arm/fp16/convolution_winograd_fp16.cc | 4 +- .../arm/fp16/convolution_winograd_fp16.h | 4 +- .../arm/fp16/deconvolution_depthwise_fp16.cc | 11 +- .../arm/fp16/deconvolution_depthwise_fp16.h | 4 +- .../kernel/arm/fp16/deconvolution_fp16.cc | 13 +- .../kernel/arm/fp16/deconvolution_fp16.h | 4 +- .../kernel/arm/fp16/fullconnection_fp16.cc | 16 +- .../kernel/arm/fp16/fullconnection_fp16.h | 4 +- .../kernel/arm/fp16/fused_batchnorm_fp16.cc | 35 ++- .../kernel/arm/fp16/fused_batchnorm_fp16.h | 4 +- .../kernel/arm/fp16/layout_transform_fp16.cc | 14 +- .../runtime/kernel/arm/fp16/matmul_fp16.cc | 29 ++- .../src/runtime/kernel/arm/fp16/matmul_fp16.h | 4 +- .../runtime/kernel/arm/fp16/pooling_fp16.cc | 9 +- .../runtime/kernel/arm/fp16/pooling_fp16.h | 4 +- .../runtime/kernel/arm/fp16/reduce_fp16.cc | 26 +-- .../src/runtime/kernel/arm/fp16/reduce_fp16.h | 4 +- .../runtime/kernel/arm/fp16/reshape_fp16.cc | 15 +- .../runtime/kernel/arm/fp16/reshape_fp16.h | 4 +- .../runtime/kernel/arm/fp16/softmax_fp16.cc | 13 +- .../runtime/kernel/arm/fp16/softmax_fp16.h | 4 +- .../src/runtime/kernel/arm/fp16/split_fp16.cc | 15 +- .../src/runtime/kernel/arm/fp16/split_fp16.h | 4 +- .../runtime/kernel/arm/fp16/transpose_fp16.cc | 15 +- .../runtime/kernel/arm/fp16/transpose_fp16.h | 4 +- .../src/runtime/kernel/arm/fp32/activation.cc | 11 +- .../src/runtime/kernel/arm/fp32/activation.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/addn.cc | 17 +- .../lite/src/runtime/kernel/arm/fp32/addn.h | 4 +- .../src/runtime/kernel/arm/fp32/argminmax.cc | 4 +- .../src/runtime/kernel/arm/fp32/argminmax.h | 4 +- .../src/runtime/kernel/arm/fp32/arithmetic.cc | 21 +- .../src/runtime/kernel/arm/fp32/arithmetic.h | 4 +- .../kernel/arm/fp32/arithmetic_self.cc | 22 +- .../runtime/kernel/arm/fp32/arithmetic_self.h | 4 +- .../runtime/kernel/arm/fp32/batch_to_space.cc | 4 +- .../runtime/kernel/arm/fp32/batch_to_space.h | 4 +- .../src/runtime/kernel/arm/fp32/batchnorm.cc | 13 +- .../src/runtime/kernel/arm/fp32/batchnorm.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/bias.cc | 10 +- .../lite/src/runtime/kernel/arm/fp32/bias.h | 4 +- .../runtime/kernel/arm/fp32/broadcast_to.cc | 8 +- .../runtime/kernel/arm/fp32/broadcast_to.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/cast.cc | 19 +- .../lite/src/runtime/kernel/arm/fp32/cast.h | 4 +- .../src/runtime/kernel/arm/fp32/concat.cc | 4 +- .../lite/src/runtime/kernel/arm/fp32/concat.h | 4 +- .../kernel/arm/fp32/constant_of_shape.cc | 6 +- .../kernel/arm/fp32/constant_of_shape.h | 4 +- .../runtime/kernel/arm/fp32/convolution.cc | 20 +- .../src/runtime/kernel/arm/fp32/convolution.h | 4 +- .../kernel/arm/fp32/convolution_1x1.cc | 9 +- .../runtime/kernel/arm/fp32/convolution_1x1.h | 4 +- .../kernel/arm/fp32/convolution_3x3.cc | 10 +- .../runtime/kernel/arm/fp32/convolution_3x3.h | 4 +- .../kernel/arm/fp32/convolution_depthwise.cc | 17 +- .../kernel/arm/fp32/convolution_depthwise.h | 4 +- .../fp32/convolution_depthwise_slidewindow.cc | 8 +- .../fp32/convolution_depthwise_slidewindow.h | 4 +- .../arm/fp32/convolution_slidewindow.cc | 12 +- .../kernel/arm/fp32/convolution_slidewindow.h | 4 +- .../kernel/arm/fp32/convolution_winograd.cc | 10 +- .../kernel/arm/fp32/convolution_winograd.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/crop.cc | 8 +- .../lite/src/runtime/kernel/arm/fp32/crop.h | 4 +- .../runtime/kernel/arm/fp32/deconvolution.cc | 15 +- .../runtime/kernel/arm/fp32/deconvolution.h | 4 +- .../arm/fp32/deconvolution_depthwise.cc | 15 +- .../kernel/arm/fp32/deconvolution_depthwise.h | 4 +- .../runtime/kernel/arm/fp32/depth_to_space.cc | 6 +- .../runtime/kernel/arm/fp32/depth_to_space.h | 4 +- .../kernel/arm/fp32/detection_post_process.cc | 22 +- .../kernel/arm/fp32/detection_post_process.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/elu.cc | 8 +- .../lite/src/runtime/kernel/arm/fp32/elu.h | 4 +- .../kernel/arm/fp32/embedding_lookup.cc | 10 +- .../kernel/arm/fp32/embedding_lookup.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/exp.cc | 8 +- .../lite/src/runtime/kernel/arm/fp32/exp.h | 4 +- .../src/runtime/kernel/arm/fp32/expandDims.cc | 8 +- .../src/runtime/kernel/arm/fp32/expandDims.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/fill.cc | 11 +- .../lite/src/runtime/kernel/arm/fp32/fill.h | 4 +- .../src/runtime/kernel/arm/fp32/flatten.cc | 11 +- .../src/runtime/kernel/arm/fp32/flatten.h | 4 +- .../runtime/kernel/arm/fp32/fullconnection.cc | 16 +- .../runtime/kernel/arm/fp32/fullconnection.h | 4 +- .../kernel/arm/fp32/fused_batchnorm.cc | 16 +- .../runtime/kernel/arm/fp32/fused_batchnorm.h | 4 +- .../src/runtime/kernel/arm/fp32/gather.cc | 23 +- .../lite/src/runtime/kernel/arm/fp32/gather.h | 6 +- .../src/runtime/kernel/arm/fp32/gatherNd.cc | 13 +- .../src/runtime/kernel/arm/fp32/gatherNd.h | 4 +- .../src/runtime/kernel/arm/fp32/l2_norm.cc | 30 +-- .../src/runtime/kernel/arm/fp32/l2_norm.h | 8 +- .../src/runtime/kernel/arm/fp32/leaky_relu.cc | 15 +- .../src/runtime/kernel/arm/fp32/leaky_relu.h | 6 +- .../kernel/arm/fp32/local_response_norm.cc | 8 +- .../kernel/arm/fp32/local_response_norm.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/lstm.cc | 22 +- .../lite/src/runtime/kernel/arm/fp32/lstm.h | 4 +- .../src/runtime/kernel/arm/fp32/matmul.cc | 16 +- .../lite/src/runtime/kernel/arm/fp32/matmul.h | 4 +- .../src/runtime/kernel/arm/fp32/nchw2nhwc.cc | 17 +- .../src/runtime/kernel/arm/fp32/nchw2nhwc.h | 4 +- .../src/runtime/kernel/arm/fp32/nhwc2nchw.cc | 17 +- .../src/runtime/kernel/arm/fp32/nhwc2nchw.h | 4 +- .../src/runtime/kernel/arm/fp32/one_hot.cc | 17 +- .../src/runtime/kernel/arm/fp32/one_hot.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/pad.cc | 8 +- .../lite/src/runtime/kernel/arm/fp32/pad.h | 4 +- .../src/runtime/kernel/arm/fp32/pooling.cc | 4 +- .../src/runtime/kernel/arm/fp32/pooling.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/power.cc | 6 +- .../lite/src/runtime/kernel/arm/fp32/power.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/prelu.cc | 10 +- .../lite/src/runtime/kernel/arm/fp32/prelu.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/range.cc | 9 +- .../lite/src/runtime/kernel/arm/fp32/range.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/rank.cc | 9 +- .../lite/src/runtime/kernel/arm/fp32/rank.h | 4 +- .../src/runtime/kernel/arm/fp32/reduce.cc | 4 +- .../lite/src/runtime/kernel/arm/fp32/reduce.h | 4 +- .../src/runtime/kernel/arm/fp32/reshape.cc | 4 +- .../src/runtime/kernel/arm/fp32/reshape.h | 4 +- .../src/runtime/kernel/arm/fp32/resize.cc | 4 +- .../lite/src/runtime/kernel/arm/fp32/resize.h | 4 +- .../src/runtime/kernel/arm/fp32/reverse.cc | 11 +- .../src/runtime/kernel/arm/fp32/reverse.h | 4 +- .../kernel/arm/fp32/reverse_sequence.cc | 10 +- .../kernel/arm/fp32/reverse_sequence.h | 4 +- .../runtime/kernel/arm/fp32/roi_pooling.cc | 13 +- .../src/runtime/kernel/arm/fp32/roi_pooling.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/scale.cc | 33 ++- .../lite/src/runtime/kernel/arm/fp32/scale.h | 4 +- .../src/runtime/kernel/arm/fp32/scatter_nd.cc | 15 +- .../src/runtime/kernel/arm/fp32/scatter_nd.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/shape.cc | 11 +- .../lite/src/runtime/kernel/arm/fp32/shape.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/slice.cc | 8 +- .../lite/src/runtime/kernel/arm/fp32/slice.h | 4 +- .../src/runtime/kernel/arm/fp32/softmax.cc | 4 +- .../src/runtime/kernel/arm/fp32/softmax.h | 4 +- .../runtime/kernel/arm/fp32/space_to_batch.cc | 26 +-- .../runtime/kernel/arm/fp32/space_to_batch.h | 4 +- .../runtime/kernel/arm/fp32/space_to_depth.cc | 12 +- .../runtime/kernel/arm/fp32/space_to_depth.h | 4 +- .../kernel/arm/fp32/sparse_to_dense.cc | 14 +- .../runtime/kernel/arm/fp32/sparse_to_dense.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/split.cc | 4 +- .../lite/src/runtime/kernel/arm/fp32/split.h | 4 +- .../src/runtime/kernel/arm/fp32/squeeze.cc | 15 +- .../src/runtime/kernel/arm/fp32/squeeze.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/stack.cc | 19 +- .../lite/src/runtime/kernel/arm/fp32/stack.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/tile.cc | 8 +- .../lite/src/runtime/kernel/arm/fp32/tile.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/topk.cc | 12 +- .../lite/src/runtime/kernel/arm/fp32/topk.h | 7 +- .../src/runtime/kernel/arm/fp32/transpose.cc | 32 ++- .../src/runtime/kernel/arm/fp32/transpose.h | 12 +- .../src/runtime/kernel/arm/fp32/unique.cc | 12 +- .../lite/src/runtime/kernel/arm/fp32/unique.h | 4 +- .../src/runtime/kernel/arm/fp32/unsqueeze.cc | 11 +- .../src/runtime/kernel/arm/fp32/unsqueeze.h | 4 +- .../src/runtime/kernel/arm/fp32/unstack.cc | 10 +- .../src/runtime/kernel/arm/fp32/unstack.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/where.cc | 15 +- .../lite/src/runtime/kernel/arm/fp32/where.h | 4 +- .../src/runtime/kernel/arm/fp32/zeroslike.cc | 11 +- .../src/runtime/kernel/arm/fp32/zeroslike.h | 4 +- .../kernel/arm/fp32_grad/activation_grad.cc | 10 +- .../kernel/arm/fp32_grad/activation_grad.h | 4 +- .../kernel/arm/fp32_grad/apply_momentum.cc | 26 +-- .../kernel/arm/fp32_grad/apply_momentum.h | 10 +- .../kernel/arm/fp32_grad/arithmetic_grad.cc | 34 +-- .../kernel/arm/fp32_grad/arithmetic_grad.h | 4 +- .../runtime/kernel/arm/fp32_grad/bias_grad.cc | 11 +- .../runtime/kernel/arm/fp32_grad/bias_grad.h | 4 +- .../runtime/kernel/arm/fp32_grad/bn_grad.cc | 19 +- .../runtime/kernel/arm/fp32_grad/bn_grad.h | 9 +- .../kernel/arm/fp32_grad/convolution.cc | 14 +- .../kernel/arm/fp32_grad/convolution.h | 16 +- .../arm/fp32_grad/convolution_grad_filter.cc | 10 +- .../arm/fp32_grad/convolution_grad_filter.h | 9 +- .../arm/fp32_grad/convolution_grad_input.cc | 10 +- .../arm/fp32_grad/convolution_grad_input.h | 6 +- .../runtime/kernel/arm/fp32_grad/depend.cc | 19 +- .../src/runtime/kernel/arm/fp32_grad/depend.h | 6 +- .../runtime/kernel/arm/fp32_grad/make_tuple.h | 6 +- .../kernel/arm/fp32_grad/pooling_grad.cc | 12 +- .../kernel/arm/fp32_grad/pooling_grad.h | 5 +- .../kernel/arm/fp32_grad/power_grad.cc | 14 +- .../runtime/kernel/arm/fp32_grad/power_grad.h | 4 +- ...parse_softmax_cross_entropy_with_logits.cc | 12 +- ...sparse_softmax_cross_entropy_with_logits.h | 11 +- .../kernel/arm/fp32_grad/tuple_getitem.cc | 19 +- .../kernel/arm/fp32_grad/tuple_getitem.h | 6 +- .../src/runtime/kernel/arm/int8/activation.cc | 7 +- .../src/runtime/kernel/arm/int8/add_int8.cc | 23 +- .../src/runtime/kernel/arm/int8/add_int8.h | 4 +- .../runtime/kernel/arm/int8/argminmax_int8.cc | 4 +- .../runtime/kernel/arm/int8/argminmax_int8.h | 4 +- .../kernel/arm/int8/arithmetic_int8.cc | 21 +- .../runtime/kernel/arm/int8/arithmetic_int8.h | 4 +- .../kernel/arm/int8/arithmetic_self_int8.cc | 8 +- .../kernel/arm/int8/arithmetic_self_int8.h | 4 +- .../kernel/arm/int8/batch_to_space_int8.cc | 4 +- .../kernel/arm/int8/batch_to_space_int8.h | 4 +- .../runtime/kernel/arm/int8/batchnorm_int8.cc | 23 +- .../runtime/kernel/arm/int8/batchnorm_int8.h | 4 +- .../runtime/kernel/arm/int8/bias_add_int8.cc | 12 +- .../runtime/kernel/arm/int8/bias_add_int8.h | 4 +- .../runtime/kernel/arm/int8/concat_int8.cc | 17 +- .../src/runtime/kernel/arm/int8/concat_int8.h | 4 +- .../kernel/arm/int8/convolution_1x1_int8.cc | 12 +- .../kernel/arm/int8/convolution_1x1_int8.h | 4 +- .../kernel/arm/int8/convolution_3x3_int8.cc | 12 +- .../kernel/arm/int8/convolution_3x3_int8.h | 4 +- .../arm/int8/convolution_depthwise_int8.cc | 15 +- .../arm/int8/convolution_depthwise_int8.h | 4 +- .../convolution_depthwise_slidewindow_int8.cc | 8 +- .../convolution_depthwise_slidewindow_int8.h | 4 +- .../kernel/arm/int8/convolution_int8.cc | 21 +- .../kernel/arm/int8/convolution_int8.h | 4 +- .../src/runtime/kernel/arm/int8/crop_int8.cc | 4 +- .../src/runtime/kernel/arm/int8/crop_int8.h | 4 +- .../arm/int8/deconvolution_depthwise_int8.cc | 15 +- .../arm/int8/deconvolution_depthwise_int8.h | 4 +- .../kernel/arm/int8/deconvolution_int8.cc | 19 +- .../kernel/arm/int8/deconvolution_int8.h | 4 +- .../kernel/arm/int8/depth_to_space_int8.cc | 4 +- .../kernel/arm/int8/depth_to_space_int8.h | 4 +- .../src/runtime/kernel/arm/int8/div_int8.cc | 26 +-- .../src/runtime/kernel/arm/int8/div_int8.h | 4 +- .../kernel/arm/int8/fullconnection_int8.cc | 8 +- .../kernel/arm/int8/fullconnection_int8.h | 4 +- .../runtime/kernel/arm/int8/gatherNd_int8.cc | 13 +- .../runtime/kernel/arm/int8/gatherNd_int8.h | 4 +- .../runtime/kernel/arm/int8/gather_int8.cc | 13 +- .../src/runtime/kernel/arm/int8/gather_int8.h | 4 +- .../runtime/kernel/arm/int8/hswish_int8.cc | 8 +- .../src/runtime/kernel/arm/int8/hswish_int8.h | 4 +- .../kernel/arm/int8/leaky_relu_int8.cc | 4 +- .../runtime/kernel/arm/int8/leaky_relu_int8.h | 9 +- .../runtime/kernel/arm/int8/matmul_int8.cc | 6 +- .../src/runtime/kernel/arm/int8/matmul_int8.h | 4 +- .../src/runtime/kernel/arm/int8/mul_int8.cc | 22 +- .../src/runtime/kernel/arm/int8/mul_int8.h | 4 +- .../src/runtime/kernel/arm/int8/pad_int8.cc | 4 +- .../src/runtime/kernel/arm/int8/pad_int8.h | 4 +- .../runtime/kernel/arm/int8/pooling_int8.cc | 4 +- .../runtime/kernel/arm/int8/pooling_int8.h | 4 +- .../src/runtime/kernel/arm/int8/power_int8.cc | 6 +- .../src/runtime/kernel/arm/int8/power_int8.h | 9 +- .../runtime/kernel/arm/int8/reduce_int8.cc | 28 +-- .../src/runtime/kernel/arm/int8/reduce_int8.h | 4 +- .../src/runtime/kernel/arm/int8/relux_int8.cc | 8 +- .../src/runtime/kernel/arm/int8/relux_int8.h | 12 +- .../runtime/kernel/arm/int8/reshape_int8.cc | 4 +- .../runtime/kernel/arm/int8/reshape_int8.h | 4 +- .../runtime/kernel/arm/int8/resize_int8.cc | 4 +- .../src/runtime/kernel/arm/int8/resize_int8.h | 4 +- .../runtime/kernel/arm/int8/sigmoid_int8.cc | 8 +- .../runtime/kernel/arm/int8/sigmoid_int8.h | 4 +- .../src/runtime/kernel/arm/int8/slice_int8.cc | 8 +- .../src/runtime/kernel/arm/int8/slice_int8.h | 9 +- .../runtime/kernel/arm/int8/softmax_int8.cc | 8 +- .../runtime/kernel/arm/int8/softmax_int8.h | 4 +- .../src/runtime/kernel/arm/int8/split_int8.cc | 4 +- .../src/runtime/kernel/arm/int8/split_int8.h | 4 +- .../runtime/kernel/arm/int8/squeeze_int8.cc | 10 +- .../runtime/kernel/arm/int8/squeeze_int8.h | 4 +- .../src/runtime/kernel/arm/int8/sub_int8.cc | 26 +-- .../src/runtime/kernel/arm/int8/sub_int8.h | 4 +- .../src/runtime/kernel/arm/int8/topk_int8.cc | 12 +- .../src/runtime/kernel/arm/int8/topk_int8.h | 10 +- .../runtime/kernel/arm/int8/unsqueeze_int8.cc | 15 +- .../runtime/kernel/arm/int8/unsqueeze_int8.h | 4 +- .../src/runtime/kernel/opencl/image_format.h | 1 - .../kernel/opencl/kernel/activation.cc | 8 +- .../runtime/kernel/opencl/kernel/activation.h | 4 +- .../kernel/opencl/kernel/arithmetic.cc | 50 ++--- .../runtime/kernel/opencl/kernel/arithmetic.h | 6 +- .../kernel/opencl/kernel/arithmetic_self.cc | 8 +- .../kernel/opencl/kernel/arithmetic_self.h | 4 +- .../runtime/kernel/opencl/kernel/batchnorm.cc | 23 +- .../runtime/kernel/opencl/kernel/batchnorm.h | 4 +- .../runtime/kernel/opencl/kernel/biasadd.cc | 22 +- .../runtime/kernel/opencl/kernel/biasadd.h | 6 +- .../runtime/kernel/opencl/kernel/concat.cc | 25 +-- .../src/runtime/kernel/opencl/kernel/concat.h | 4 +- .../kernel/opencl/kernel/conv2d_transpose.cc | 18 +- .../kernel/opencl/kernel/conv2d_transpose.h | 4 +- .../kernel/opencl/kernel/convolution.cc | 42 ++-- .../kernel/opencl/kernel/convolution.h | 6 +- .../kernel/opencl/kernel/depthwise_conv2d.cc | 22 +- .../kernel/opencl/kernel/depthwise_conv2d.h | 4 +- .../runtime/kernel/opencl/kernel/matmul.cc | 21 +- .../src/runtime/kernel/opencl/kernel/matmul.h | 4 +- .../runtime/kernel/opencl/kernel/pooling2d.cc | 15 +- .../runtime/kernel/opencl/kernel/pooling2d.h | 4 +- .../src/runtime/kernel/opencl/kernel/prelu.cc | 26 +-- .../src/runtime/kernel/opencl/kernel/prelu.h | 6 +- .../runtime/kernel/opencl/kernel/reshape.cc | 15 +- .../runtime/kernel/opencl/kernel/reshape.h | 4 +- .../src/runtime/kernel/opencl/kernel/slice.cc | 13 +- .../src/runtime/kernel/opencl/kernel/slice.h | 4 +- .../runtime/kernel/opencl/kernel/softmax.cc | 19 +- .../runtime/kernel/opencl/kernel/softmax.h | 4 +- .../runtime/kernel/opencl/kernel/to_format.cc | 34 +-- .../runtime/kernel/opencl/kernel/to_format.h | 4 +- .../runtime/kernel/opencl/kernel/transpose.cc | 21 +- .../runtime/kernel/opencl/kernel/transpose.h | 4 +- .../src/runtime/kernel/opencl/opencl_kernel.h | 14 +- .../kernel/opencl/subgraph_opencl_kernel.cc | 28 +-- .../kernel/opencl/subgraph_opencl_kernel.h | 16 +- .../lite/src/runtime/kernel/opencl/utils.cc | 5 +- .../lite/src/runtime/kernel/opencl/utils.h | 17 +- .../src/runtime/opencl/opencl_executor.cc | 7 +- .../lite/src/runtime/opencl/opencl_executor.h | 6 +- .../lite/src/runtime/opencl/opencl_runtime.cc | 11 +- .../lite/src/runtime/parallel_executor.cc | 4 +- .../lite/src/runtime/parallel_executor.h | 2 +- mindspore/lite/src/runtime/thread_pool.h | 2 +- mindspore/lite/src/runtime/workspace_pool.cc | 1 - mindspore/lite/src/runtime/workspace_pool.h | 1 - mindspore/lite/src/scheduler.cc | 47 ++-- mindspore/lite/src/scheduler.h | 15 +- mindspore/lite/src/{ir => }/tensor.cc | 209 ++++++------------ mindspore/lite/src/{ir => }/tensor.h | 126 ++++++----- mindspore/lite/src/train/loss_kernel.h | 7 +- .../src/train/train_populate_parameter.cc | 16 +- .../lite/src/train/train_populate_parameter.h | 3 +- mindspore/lite/src/train/train_session.cc | 42 ++-- mindspore/lite/test/CMakeLists.txt | 102 +++------ mindspore/lite/test/main.cc | 2 +- mindspore/lite/test/st/benchmark_test.cc | 45 ++-- mindspore/lite/test/st/converter_test.cc | 1 - mindspore/lite/test/ut/internal/infer_test.cc | 63 ++++++ .../test/ut/src/dataset/de_tensor_test.cc | 11 +- .../lite/test/ut/src/dataset/eager_test.cc | 5 +- mindspore/lite/test/ut/src/infer_test.cc | 25 +-- .../kernel/arm/common/strided_slice_tests.cc | 19 +- .../kernel/arm/fp16/convolution_fp16_tests.cc | 2 +- .../kernel/arm/fp16/reduce_fp16_tests.cc | 8 +- .../kernel/arm/fp32/activation_fp32_test.cc | 8 +- .../kernel/arm/fp32/argminmax_fp32_test.cc | 151 +++---------- .../kernel/arm/fp32/arithmetic_fp32_tests.cc | 82 +++---- .../arm/fp32/batch_to_space_fp32_test.cc | 12 +- .../kernel/arm/fp32/batchnorm_fp32_tests.cc | 40 ++-- .../arm/fp32/constant_of_shape_fp32_test.cc | 20 +- .../kernel/arm/fp32/conv1x1_fp32_tests.cc | 62 +++--- .../fp32/convolution_depthwise_fp32_tests.cc | 36 +-- .../runtime/kernel/arm/fp32/crop_fp32_test.cc | 72 ++---- .../arm/fp32/deconvolution_fp32_tests.cc | 101 ++++----- .../arm/fp32/detection_post_process_test.cc | 46 ++-- .../runtime/kernel/arm/fp32/elu_fp32_test.cc | 10 +- .../arm/fp32/embedding_lookup_fp32_test.cc | 18 +- .../arm/fp32/fullconnection_fp32_tests.cc | 46 ++-- .../kernel/arm/fp32/lstm_fp32_tests.cc | 91 ++++---- .../kernel/arm/fp32/matmul_fp32_tests.cc | 72 +++--- .../kernel/arm/fp32/power_fp32_tests.cc | 44 ++-- .../arm/fp32/resize_bilinear_fp32_tests.cc | 10 +- .../resize_nearest_neighbor_fp32_tests.cc | 8 +- .../arm/fp32/reverse_sequence_fp32_tests.cc | 30 +-- .../kernel/arm/fp32/roi_pooling_fp32_tests.cc | 26 +-- .../arm/fp32/space_to_batch_fp32_tests.cc | 76 +++---- .../arm/fp32/space_to_depth_fp32_tests.cc | 8 +- .../kernel/arm/fp32/tile_fp32_tests.cc | 8 +- .../kernel/arm/fp32/topk_fp32_tests.cc | 16 +- .../kernel/arm/fp32/transpose_fp32_tests.cc | 8 +- .../kernel/arm/fp32/unique_fp32_tests.cc | 10 +- .../kernel/arm/fp32/unstack_fp32_tests.cc | 26 +-- .../fp32_grad/activation_grad_fp32_tests.cc | 2 +- .../arm/fp32_grad/bias_grad_fp32_tests.cc | 10 +- .../kernel/arm/fp32_grad/bn_grad_fp32_test.cc | 24 +- .../fp32_grad/convolution_grad_fp32_tests.cc | 121 +++++----- .../kernel/arm/fp32_grad/network_test.cc | 21 +- .../arm/fp32_grad/pooling_grad_fp32_tests.cc | 96 ++++---- .../softmax_crossentropy_fp32_tests.cc | 12 +- .../runtime/kernel/arm/int8/add_int8_tests.cc | 16 +- .../arm/int8/arithmetic_self_int8_tests.cc | 194 ++++++++-------- .../kernel/arm/int8/batchnorm_int8_test.cc | 48 ++-- .../kernel/arm/int8/bias_add_int8_tests.cc | 10 +- .../kernel/arm/int8/concat_int8_tests.cc | 44 ++-- .../kernel/arm/int8/conv_1x1_int8_tests.cc | 85 ++++--- .../kernel/arm/int8/crop_int8_tests.cc | 122 +++++----- .../kernel/arm/int8/deconv_int8_tests.cc | 22 +- .../runtime/kernel/arm/int8/div_int8_test.cc | 16 +- .../arm/int8/fullconnection_int8_tests.cc | 21 +- .../kernel/arm/int8/gatherNd_int8_test.cc | 16 +- .../kernel/arm/int8/gather_int8_test.cc | 16 +- .../kernel/arm/int8/hswish_int8_tests.cc | 12 +- .../kernel/arm/int8/matmul_int8_tests.cc | 25 +-- .../runtime/kernel/arm/int8/mul_int8_tests.cc | 58 ++--- .../runtime/kernel/arm/int8/pad_int8_tests.cc | 46 ++-- .../kernel/arm/int8/power_int8_tests.cc | 26 +-- .../kernel/arm/int8/prelu_int8_tests.cc | 14 +- .../kernel/arm/int8/quant_dtype_cast_tests.cc | 20 +- .../kernel/arm/int8/reduce_int8_tests.cc | 6 +- .../kernel/arm/int8/relux_int8_tests.cc | 24 +- .../kernel/arm/int8/reshape_int8_tests.cc | 26 +-- .../arm/int8/resize_bilinear_int8_tests.cc | 30 +-- .../resize_nearest_neighbor_int8_tests.cc | 14 +- .../kernel/arm/int8/sigmoid_int8_tests.cc | 12 +- .../kernel/arm/int8/slice_int8_tests.cc | 12 +- .../kernel/arm/int8/softmax_int8_tests.cc | 12 +- .../kernel/arm/int8/split_int8_tests.cc | 48 ++-- .../kernel/arm/int8/squeeze_int8_tests.cc | 14 +- .../runtime/kernel/arm/int8/sub_int_tests.cc | 16 +- .../kernel/arm/int8/topk_int8_tests.cc | 16 +- .../kernel/arm/int8/unsqueeze_int8_tests.cc | 14 +- .../runtime/kernel/opencl/activation_tests.cc | 58 ++--- .../kernel/opencl/arithmetic_self_tests.cc | 14 +- .../runtime/kernel/opencl/arithmetic_tests.cc | 32 +-- .../kernel/opencl/avg_pooling_tests.cc | 19 +- .../runtime/kernel/opencl/batchnorm_tests.cc | 80 ++++--- .../runtime/kernel/opencl/biasadd_tests.cc | 26 +-- .../src/runtime/kernel/opencl/concat_tests.cc | 46 ++-- .../kernel/opencl/conv2d_transpose_tests.cc | 20 +- .../kernel/opencl/convolution_tests.cc | 18 +- .../kernel/opencl/depthwise_conv2d_tests.cc | 24 +- .../src/runtime/kernel/opencl/matmul_tests.cc | 22 +- .../kernel/opencl/max_pooling_tests.cc | 19 +- .../src/runtime/kernel/opencl/prelu_tests.cc | 26 +-- .../runtime/kernel/opencl/reshape_tests.cc | 19 +- .../src/runtime/kernel/opencl/slice_tests.cc | 32 ++- .../runtime/kernel/opencl/softmax_tests.cc | 15 +- .../runtime/kernel/opencl/to_format_tests.cc | 15 +- .../runtime/kernel/opencl/transpose_tests.cc | 18 +- .../src/runtime/kernel/opencl/utils_tests.h | 4 +- mindspore/lite/test/ut/src/utils_test.cc | 10 +- .../tflite/tflite_activation_parser_test.cc | 4 +- .../parser/tflite/tflite_addn_parser_test.cc | 4 +- .../tflite/tflite_arithmetic_parser_test.cc | 38 +--- .../parser/tflite/tflite_cast_parser_test.cc | 4 +- .../tflite_depth_to_space_parser_test.cc | 4 +- .../parser/tflite/tflite_fill_parser_test.cc | 2 +- .../tflite/tflite_logical_parser_test.cc | 1 - .../parser/tflite/tflite_lrn_parser_test.cc | 4 +- .../tflite/tflite_pooling_parser_test.cc | 8 +- .../tflite/tflite_reshape_parser_test.cc | 4 +- .../tflite_reverse_sequence_parser_test.cc | 4 +- .../tflite/tflite_softmax_parser_test.cc | 4 +- .../tflite_space_to_batch_nd_parser_test.cc | 4 +- .../tflite_space_to_depth_parser_test.cc | 4 +- .../tflite_sparse_to_dense_parser_test.cc | 4 +- .../tflite_strided_slice_parser_test.cc | 4 +- .../parser/tflite/tflite_tile_parser_test.cc | 4 +- .../tflite/tflite_topk_v2_parser_test.cc | 4 +- .../tflite/tflite_transpose_parser_test.cc | 18 +- .../tflite/tflite_unique_parser_test.cc | 4 +- .../tflite/tflite_unstack_parser_test.cc | 4 +- .../fusion/constant_folding_fusion_test.cc | 24 +- .../fusion/conv_activation_fusion_test.cc | 3 +- .../fusion/conv_biasadd_fusion_test.cc | 3 +- .../optimizer/fusion/conv_bn_fusion_test.cc | 2 - .../lite/tools/anf_exporter/anf_exporter.cc | 14 +- .../anf_importer/import_from_meta_graphT.cc | 4 +- .../anf_importer/import_from_protobuf.cc | 49 ++-- .../tools/anf_importer/import_from_protobuf.h | 4 +- mindspore/lite/tools/benchmark/benchmark.cc | 8 +- mindspore/lite/tools/benchmark/main.cc | 1 - .../lite/tools/common/converter_op_utils.h | 1 - mindspore/lite/tools/common/flag_parser.cc | 3 +- mindspore/lite/tools/common/graph_util.h | 3 +- mindspore/lite/tools/common/node_util.cc | 110 ++++----- mindspore/lite/tools/common/node_util.h | 8 +- mindspore/lite/tools/common/option.h | 1 - mindspore/lite/tools/common/storage.h | 1 - mindspore/lite/tools/common/tensor_util.cc | 2 +- mindspore/lite/tools/common/tensor_util.h | 23 +- mindspore/lite/tools/converter/CMakeLists.txt | 69 +----- .../lite/tools/converter/anf_transform.cc | 14 +- mindspore/lite/tools/converter/converter.h | 1 - .../lite/tools/converter/converter_flags.h | 13 +- .../lite/tools/converter/graphdef_transform.h | 1 - .../fusion/batchnorm_fold_fusion_pass.cc | 9 +- .../fusion/format_trans_fusion_pass.cc | 2 +- .../fusion/format_trans_fusion_pass.h | 1 - .../format_trans_transpose_fusion_pass.cc | 2 +- .../legacy_optimizer/fusion/fusion_pass.cc | 5 +- .../legacy_optimizer/fusion/fusion_pass.h | 3 +- .../legacy_optimizer/fusion/fusion_pattern.h | 7 +- .../fusion/matmul_biasadd_fusion_pass.cc | 2 +- .../fusion/matmul_biasadd_fusion_pass.h | 10 +- .../fusion/mul_add_fusion_pass.cc | 4 +- .../fusion/mul_add_fusion_pass.h | 4 +- .../fusion/quant_cast_fusion_pass.cc | 6 +- .../fusion/quant_cast_fusion_pass.h | 1 - .../graph/batchnorm_convert_scale_pass.cc | 14 +- .../graph/format_trans_pass.cc | 2 +- .../legacy_optimizer/graph/infershape_pass.cc | 33 ++- .../graph/isolated_node_remove_pass.cc | 1 - .../graph/isolated_node_remove_pass.h | 1 - .../model_input_format_preprocess_pass.cc | 8 +- .../model_input_format_preprocess_pass.h | 1 - .../graph/topological_sort_pass.cc | 3 +- .../graph/topological_sort_pass.h | 1 - .../graph/trans_format_remove_pass.cc | 4 +- .../graph/unused_node_remove_pass.cc | 1 - .../graph/unused_node_remove_pass.h | 1 - .../graph/weight_format_hardcode_pass.cc | 30 +-- .../graph/weight_format_transform_pass.cc | 30 +-- .../graph/weight_format_transform_pass.h | 4 +- mindspore/lite/tools/converter/main.cc | 1 - mindspore/lite/tools/converter/model_parser.h | 8 +- mindspore/lite/tools/converter/optimizer.cc | 1 - mindspore/lite/tools/converter/optimizer.h | 2 - .../parser/caffe/caffe_argmax_parser.cc | 7 +- .../parser/caffe/caffe_argmax_parser.h | 5 +- .../parser/caffe/caffe_batchnorm_parser.cc | 13 +- .../parser/caffe/caffe_batchnorm_parser.h | 5 +- .../parser/caffe/caffe_concat_parser.cc | 7 +- .../parser/caffe/caffe_concat_parser.h | 5 +- .../parser/caffe/caffe_conv_base_parser.cc | 19 +- .../parser/caffe/caffe_conv_base_parser.h | 19 +- .../converter/parser/caffe/caffe_converter.cc | 5 +- .../converter/parser/caffe/caffe_converter.h | 1 - .../parser/caffe/caffe_convolution_parser.cc | 15 +- .../parser/caffe/caffe_convolution_parser.h | 8 +- .../parser/caffe/caffe_crop_parser.cc | 7 +- .../parser/caffe/caffe_crop_parser.h | 5 +- .../caffe/caffe_deconvolution_parser.cc | 15 +- .../parser/caffe/caffe_deconvolution_parser.h | 8 +- .../parser/caffe/caffe_eltwise_parser.cc | 6 +- .../parser/caffe/caffe_eltwise_parser.h | 5 +- .../parser/caffe/caffe_exp_parser.cc | 7 +- .../converter/parser/caffe/caffe_exp_parser.h | 4 +- .../parser/caffe/caffe_flatten_parser.cc | 6 +- .../parser/caffe/caffe_flatten_parser.h | 4 +- .../parser/caffe/caffe_innerproduct_parser.cc | 7 +- .../parser/caffe/caffe_innerproduct_parser.h | 5 +- .../converter/parser/caffe/caffe_inspector.cc | 1 - .../converter/parser/caffe/caffe_inspector.h | 1 - .../parser/caffe/caffe_interp_parser.cc | 7 +- .../parser/caffe/caffe_interp_parser.h | 5 +- .../parser/caffe/caffe_model_parser.cc | 6 +- .../parser/caffe/caffe_model_parser.h | 6 +- .../parser/caffe/caffe_node_parser.cc | 11 +- .../parser/caffe/caffe_node_parser.h | 8 +- .../caffe/caffe_node_parser_registry.cc | 1 - .../parser/caffe/caffe_node_parser_registry.h | 1 - .../parser/caffe/caffe_permute_parser.cc | 7 +- .../parser/caffe/caffe_permute_parser.h | 5 +- .../parser/caffe/caffe_pooling_parser.cc | 21 +- .../parser/caffe/caffe_pooling_parser.h | 17 +- .../parser/caffe/caffe_power_parser.cc | 8 +- .../parser/caffe/caffe_power_parser.h | 9 +- .../parser/caffe/caffe_prelu_parser.cc | 7 +- .../parser/caffe/caffe_prelu_parser.h | 9 +- .../parser/caffe/caffe_relu6_parser.cc | 6 +- .../parser/caffe/caffe_relu6_parser.h | 4 +- .../parser/caffe/caffe_relu_parser.cc | 7 +- .../parser/caffe/caffe_relu_parser.h | 5 +- .../parser/caffe/caffe_reshape_parser.cc | 9 +- .../parser/caffe/caffe_reshape_parser.h | 5 +- .../parser/caffe/caffe_scale_parser.cc | 6 +- .../parser/caffe/caffe_scale_parser.h | 8 +- .../parser/caffe/caffe_sigmoid_parser.cc | 7 +- .../parser/caffe/caffe_sigmoid_parser.h | 5 +- .../parser/caffe/caffe_softmax_parser.cc | 7 +- .../parser/caffe/caffe_softmax_parser.h | 5 +- .../parser/caffe/caffe_tanh_parser.cc | 7 +- .../parser/caffe/caffe_tanh_parser.h | 4 +- .../parser/caffe/caffe_tile_parser.cc | 7 +- .../parser/caffe/caffe_tile_parser.h | 4 +- .../parser/onnx/onnx_argmax_parser.cc | 6 +- .../parser/onnx/onnx_argmax_parser.h | 5 +- .../onnx/onnx_arithmetic_operation_parser.h | 1 - .../parser/onnx/onnx_batchnorm_parser.cc | 3 +- .../parser/onnx/onnx_batchnorm_parser.h | 5 +- .../parser/onnx/onnx_biasadd_parser.cc | 4 +- .../parser/onnx/onnx_biasadd_parser.h | 5 +- .../converter/parser/onnx/onnx_cast_parser.cc | 5 +- .../converter/parser/onnx/onnx_cast_parser.h | 5 +- .../converter/parser/onnx/onnx_clip_parser.cc | 4 +- .../converter/parser/onnx/onnx_clip_parser.h | 5 +- .../parser/onnx/onnx_concat_parser.cc | 4 +- .../parser/onnx/onnx_concat_parser.h | 5 +- .../parser/onnx/onnx_constant_parser.cc | 4 +- .../parser/onnx/onnx_constant_parser.h | 5 +- .../converter/parser/onnx/onnx_conv_parser.cc | 11 +- .../converter/parser/onnx/onnx_conv_parser.h | 8 +- .../converter/parser/onnx/onnx_converter.cc | 5 +- .../converter/parser/onnx/onnx_converter.h | 1 - .../parser/onnx/onnx_deconv_parser.cc | 10 +- .../parser/onnx/onnx_deconv_parser.h | 8 +- .../parser/onnx/onnx_depth_to_space_parser.cc | 6 +- .../parser/onnx/onnx_depth_to_space_parser.h | 5 +- .../parser/onnx/onnx_dropout_parser.cc | 4 +- .../parser/onnx/onnx_dropout_parser.h | 5 +- .../converter/parser/onnx/onnx_elu_parser.cc | 7 +- .../converter/parser/onnx/onnx_elu_parser.h | 5 +- .../parser/onnx/onnx_expand_parser.h | 5 +- .../parser/onnx/onnx_flatten_parser.cc | 4 +- .../parser/onnx/onnx_flatten_parser.h | 5 +- .../parser/onnx/onnx_gather_parser.cc | 6 +- .../parser/onnx/onnx_gather_parser.h | 5 +- .../converter/parser/onnx/onnx_lrn_parser.cc | 8 +- .../converter/parser/onnx/onnx_lrn_parser.h | 5 +- .../parser/onnx/onnx_matmul_parser.cc | 3 +- .../parser/onnx/onnx_matmul_parser.h | 5 +- .../parser/onnx/onnx_model_parser.cc | 16 +- .../converter/parser/onnx/onnx_model_parser.h | 4 +- .../converter/parser/onnx/onnx_node_parser.cc | 5 +- .../converter/parser/onnx/onnx_node_parser.h | 9 +- .../parser/onnx/onnx_node_parser_registry.h | 4 +- .../converter/parser/onnx/onnx_pad_parser.cc | 9 +- .../converter/parser/onnx/onnx_pad_parser.h | 5 +- .../converter/parser/onnx/onnx_pool_parser.cc | 6 +- .../converter/parser/onnx/onnx_pool_parser.h | 5 +- .../parser/onnx/onnx_reduce_parser.cc | 4 +- .../parser/onnx/onnx_reduce_parser.h | 5 +- .../converter/parser/onnx/onnx_relu_parser.cc | 4 +- .../converter/parser/onnx/onnx_relu_parser.h | 9 +- .../parser/onnx/onnx_reshape_parser.cc | 5 +- .../parser/onnx/onnx_reshape_parser.h | 5 +- .../parser/onnx/onnx_shape_parser.cc | 4 +- .../converter/parser/onnx/onnx_shape_parser.h | 5 +- .../parser/onnx/onnx_sigmoid_parser.cc | 4 +- .../parser/onnx/onnx_sigmoid_parser.h | 5 +- .../parser/onnx/onnx_slice_parser.cc | 2 +- .../converter/parser/onnx/onnx_slice_parser.h | 5 +- .../parser/onnx/onnx_softmax_parser.cc | 6 +- .../parser/onnx/onnx_softmax_parser.h | 5 +- .../parser/onnx/onnx_space_to_depth_parser.cc | 3 +- .../parser/onnx/onnx_space_to_depth_parser.h | 5 +- .../parser/onnx/onnx_squeeze_parser.cc | 4 +- .../parser/onnx/onnx_squeeze_parser.h | 5 +- .../converter/parser/onnx/onnx_tile_parser.cc | 4 +- .../converter/parser/onnx/onnx_tile_parser.h | 5 +- .../parser/onnx/onnx_transpose_parser.cc | 4 +- .../parser/onnx/onnx_transpose_parser.h | 5 +- .../parser/onnx/onnx_unsample_parser.cc | 4 +- .../parser/onnx/onnx_unsample_parser.h | 5 +- .../parser/onnx/onnx_unsqueeze_parser.h | 5 +- .../parser/onnx/onnx_unuseful_node_parser.cc | 4 +- .../parser/onnx/onnx_unuseful_node_parser.h | 5 +- .../parser/tflite/tflite_activation_parser.cc | 26 +-- .../parser/tflite/tflite_activation_parser.h | 13 +- .../parser/tflite/tflite_addn_parser.cc | 44 ++-- .../parser/tflite/tflite_addn_parser.h | 36 ++- .../parser/tflite/tflite_argmax_parser.cc | 16 +- .../parser/tflite/tflite_argmax_parser.h | 8 +- .../parser/tflite/tflite_argmin_parser.cc | 17 +- .../parser/tflite/tflite_argmin_parser.h | 8 +- .../parser/tflite/tflite_arithmetic_parser.cc | 42 ++-- .../parser/tflite/tflite_arithmetic_parser.h | 27 +-- .../tflite/tflite_batch_to_space_parser.cc | 43 ++-- .../tflite/tflite_batch_to_space_parser.h | 36 ++- .../tflite/tflite_broadcast_to_parser.cc | 44 ++-- .../tflite/tflite_broadcast_to_parser.h | 36 ++- .../parser/tflite/tflite_cast_parser.cc | 46 ++-- .../parser/tflite/tflite_cast_parser.h | 36 ++- .../parser/tflite/tflite_concat_parser.cc | 16 +- .../parser/tflite/tflite_concat_parser.h | 9 +- .../parser/tflite/tflite_conv_parser.cc | 30 ++- .../parser/tflite/tflite_conv_parser.h | 9 +- .../parser/tflite/tflite_converter.cc | 5 +- .../parser/tflite/tflite_converter.h | 1 - .../parser/tflite/tflite_custom_parser.cc | 2 +- .../parser/tflite/tflite_custom_parser.h | 8 +- .../parser/tflite/tflite_deconv_parser.cc | 25 +-- .../parser/tflite/tflite_deconv_parser.h | 8 +- .../tflite/tflite_depth_to_space_parser.cc | 45 ++-- .../tflite/tflite_depth_to_space_parser.h | 36 ++- .../tflite/tflite_depthwise_conv_parser.cc | 29 ++- .../tflite/tflite_depthwise_conv_parser.h | 9 +- .../parser/tflite/tflite_dequantize_parser.cc | 14 +- .../parser/tflite/tflite_dequantize_parser.h | 8 +- .../tflite/tflite_expand_dims_parser.cc | 8 +- .../parser/tflite/tflite_expand_dims_parser.h | 9 +- .../parser/tflite/tflite_fill_parser.cc | 14 +- .../parser/tflite/tflite_fill_parser.h | 9 +- .../tflite/tflite_fullyconnected_parser.cc | 24 +- .../tflite/tflite_fullyconnected_parser.h | 9 +- .../parser/tflite/tflite_gather_nd_parser.cc | 16 +- .../parser/tflite/tflite_gather_nd_parser.h | 9 +- .../parser/tflite/tflite_gather_parser.cc | 16 +- .../parser/tflite/tflite_gather_parser.h | 9 +- .../parser/tflite/tflite_l2norm_parser.cc | 52 +++-- .../parser/tflite/tflite_l2norm_parser.h | 36 ++- .../parser/tflite/tflite_logical_parser.cc | 14 +- .../parser/tflite/tflite_logical_parser.h | 8 +- .../parser/tflite/tflite_lrn_parser.cc | 16 +- .../parser/tflite/tflite_lrn_parser.h | 9 +- .../parser/tflite/tflite_model_parser.cc | 9 +- .../parser/tflite/tflite_model_parser.h | 22 +- .../parser/tflite/tflite_node_parser.h | 37 +--- .../tflite/tflite_node_parser_registry.cc | 1 - .../tflite/tflite_node_parser_registry.h | 1 - .../parser/tflite/tflite_one_hot_parser.cc | 14 +- .../parser/tflite/tflite_one_hot_parser.h | 8 +- .../parser/tflite/tflite_pad_parser.cc | 14 +- .../parser/tflite/tflite_pad_parser.h | 8 +- .../parser/tflite/tflite_pooling_parser.cc | 22 +- .../parser/tflite/tflite_pooling_parser.h | 9 +- .../parser/tflite/tflite_quantize_parser.cc | 18 +- .../parser/tflite/tflite_quantize_parser.h | 8 +- .../parser/tflite/tflite_range_parser.cc | 22 +- .../parser/tflite/tflite_range_parser.h | 9 +- .../parser/tflite/tflite_rank_parser.cc | 16 +- .../parser/tflite/tflite_rank_parser.h | 9 +- .../parser/tflite/tflite_reduce_parser.cc | 14 +- .../parser/tflite/tflite_reduce_parser.h | 8 +- .../parser/tflite/tflite_reshape_parser.cc | 16 +- .../parser/tflite/tflite_reshape_parser.h | 9 +- .../parser/tflite/tflite_resize_parser.cc | 23 +- .../parser/tflite/tflite_resize_parser.h | 9 +- .../parser/tflite/tflite_reverse_parser.cc | 16 +- .../parser/tflite/tflite_reverse_parser.h | 9 +- .../tflite/tflite_reverse_sequence_parser.cc | 47 ++-- .../tflite/tflite_reverse_sequence_parser.h | 36 ++- .../parser/tflite/tflite_scatter_nd_parser.cc | 22 +- .../parser/tflite/tflite_scatter_nd_parser.h | 8 +- .../parser/tflite/tflite_shape_parser.cc | 14 +- .../parser/tflite/tflite_shape_parser.h | 8 +- .../parser/tflite/tflite_slice_parser.cc | 17 +- .../parser/tflite/tflite_slice_parser.h | 9 +- .../parser/tflite/tflite_softmax_parser.cc | 14 +- .../parser/tflite/tflite_softmax_parser.h | 9 +- .../tflite/tflite_space_to_batch_nd_parser.cc | 43 ++-- .../tflite/tflite_space_to_batch_nd_parser.h | 36 ++- .../tflite/tflite_space_to_depth_parser.cc | 45 ++-- .../tflite/tflite_space_to_depth_parser.h | 36 ++- .../tflite/tflite_sparse_to_dense_parser.cc | 56 +++-- .../tflite/tflite_sparse_to_dense_parser.h | 36 ++- .../parser/tflite/tflite_split_parser.cc | 14 +- .../parser/tflite/tflite_split_parser.h | 8 +- .../parser/tflite/tflite_split_v_parser.cc | 14 +- .../parser/tflite/tflite_split_v_parser.h | 8 +- .../parser/tflite/tflite_squeeze_parser.cc | 14 +- .../parser/tflite/tflite_squeeze_parser.h | 8 +- .../parser/tflite/tflite_stack_parser.cc | 14 +- .../parser/tflite/tflite_stack_parser.h | 9 +- .../tflite/tflite_strided_slice_parser.cc | 13 +- .../tflite/tflite_strided_slice_parser.h | 10 +- .../parser/tflite/tflite_tile_parser.cc | 44 ++-- .../parser/tflite/tflite_tile_parser.h | 36 ++- .../parser/tflite/tflite_topk_v2_parser.cc | 44 ++-- .../parser/tflite/tflite_topk_v2_parser.h | 36 ++- .../parser/tflite/tflite_transpose_parser.cc | 18 +- .../parser/tflite/tflite_transpose_parser.h | 9 +- .../parser/tflite/tflite_unique_parser.cc | 44 ++-- .../parser/tflite/tflite_unique_parser.h | 36 ++- .../parser/tflite/tflite_unstack_parser.cc | 44 ++-- .../parser/tflite/tflite_unstack_parser.h | 36 ++- .../converter/parser/tflite/tflite_util.h | 14 +- .../parser/tflite/tflite_where_parser.cc | 44 ++-- .../parser/tflite/tflite_where_parser.h | 36 ++- .../parser/tflite/tflite_zeros_like_parser.cc | 44 ++-- .../parser/tflite/tflite_zeros_like_parser.h | 36 ++- .../converter/quantizer/aware_quantizer.cc | 3 +- .../converter/quantizer/aware_quantizer.h | 3 +- .../converter/quantizer/calc_quant_param.cc | 3 +- .../converter/quantizer/general_bitpacking.cc | 103 +++++---- .../quantizer/post_training_quantizer.cc | 14 +- .../tools/converter/quantizer/quant_cast.cc | 9 +- .../tools/converter/quantizer/quantizer.h | 3 +- .../converter/quantizer/weight_quantizer.cc | 13 +- .../converter/quantizer/weight_quantizer.h | 5 +- .../optimizer/common/node_pass_extends.cc | 1 - .../fusion/constant_folding_fusion.cc | 21 +- .../fusion/constant_folding_fusion.h | 2 +- .../optimizer/fusion/conv_biasadd_fusion.h | 1 - .../fusion/pooling_activation_fusion.h | 2 +- .../lite/tools/time_profile/time_profile.cc | 4 +- 1035 files changed, 6295 insertions(+), 7046 deletions(-) create mode 100644 mindspore/lite/internal/CMakeLists.txt create mode 100644 mindspore/lite/internal/include/context.h create mode 100644 mindspore/lite/internal/include/errorcode.h create mode 100644 mindspore/lite/internal/include/lite_session.h rename mindspore/lite/{src/common/ms_tensor_utils.h => internal/include/lite_utils.h} (57%) create mode 100644 mindspore/lite/internal/include/model.h create mode 100644 mindspore/lite/internal/include/ms_tensor.h create mode 100644 mindspore/lite/internal/src/lite_session.cc create mode 100644 mindspore/lite/internal/src/ms_tensor.cc mode change 100755 => 100644 mindspore/lite/src/common/common.h delete mode 100644 mindspore/lite/src/common/ms_tensor_utils.cc mode change 100755 => 100644 mindspore/lite/src/common/op_utils.h delete mode 100644 mindspore/lite/src/context.cc delete mode 100644 mindspore/lite/src/ir/meta_tensor_extends.cc rename mindspore/lite/src/{ir => }/tensor.cc (54%) rename mindspore/lite/src/{ir => }/tensor.h (64%) create mode 100644 mindspore/lite/test/ut/internal/infer_test.cc mode change 100755 => 100644 mindspore/lite/tools/common/flag_parser.cc mode change 100755 => 100644 mindspore/lite/tools/converter/parser/onnx/onnx_converter.cc mode change 100755 => 100644 mindspore/lite/tools/converter/parser/onnx/onnx_converter.h diff --git a/mindspore/lite/CMakeLists.txt b/mindspore/lite/CMakeLists.txt index 351cf6f6ae..00c0741633 100644 --- a/mindspore/lite/CMakeLists.txt +++ b/mindspore/lite/CMakeLists.txt @@ -53,6 +53,7 @@ set(CORE_DIR ${TOP_DIR}/mindspore/core) set(CCSRC_DIR ${TOP_DIR}/mindspore/ccsrc) include_directories(${TOP_DIR}) include_directories(${CORE_DIR}) +include_directories(${CORE_DIR}/ir) include_directories(${CCSRC_DIR}) include_directories(${CMAKE_CURRENT_SOURCE_DIR}) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/kernel/arm) @@ -96,7 +97,6 @@ else () set(CMAKE_SHARED_LINKER_FLAGS "-Wl,-z,relro,-z,now -Wl,-z,noexecstack ${CMAKE_SHARED_LINKER_FLAGS}") set(CMAKE_EXE_LINKER_FLAGS "-Wl,-z,relro,-z,now -Wl,-z,noexecstack ${CMAKE_EXE_LINKER_FLAGS}") endif() - string(REPLACE " -g " " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") endif () if (BUILD_DEVICE) @@ -128,12 +128,6 @@ if (WIN32) add_compile_definitions(BUILDING_DLL) endif () -set(CORE_SRC - ${CORE_DIR}/ir/meta_tensor.cc - ${CORE_DIR}/gvar/logging_level.cc - ${CORE_DIR}/gvar/typeid_manager.cc - ${CORE_DIR}/base/base.cc - ) if (BUILD_CONVERTER) if (PLATFORM_ARM64 OR PLATFORM_ARM32) MESSAGE(FATAL_ERROR "Cannot build converter in arm platform") @@ -224,6 +218,7 @@ endif () if (BUILD_DEVICE) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/internal) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/nnacl) if (NOT WIN32) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tools/benchmark) diff --git a/mindspore/lite/include/context.h b/mindspore/lite/include/context.h index 233c2441ad..8d3d289b6a 100644 --- a/mindspore/lite/include/context.h +++ b/mindspore/lite/include/context.h @@ -41,32 +41,10 @@ typedef enum { DT_NPU /**< NPU device type, not supported yet */ } DeviceType; -/// \brief DeviceContext defined for holding DeviceType. -typedef struct { - DeviceType type; /**< device type */ -} DeviceContext; - /// \brief Context defined for holding environment variables during runtime. -class MS_API Context { - public: - /// \brief Constructor of MindSpore Lite Context using default value for parameters. - /// - /// \return Instance of MindSpore Lite Context. - Context(); - - /// \brief Constructor of MindSpore Lite Context using input value for parameters. - /// - /// \param[in] thread_num Define the work thread number during the runtime. - /// \param[in] allocator Define the allocator for malloc. - /// \param[in] device_ctx Define device information during the runtime. - Context(int thread_num, std::shared_ptr allocator, DeviceContext device_ctx); - - /// \brief Destructor of MindSpore Lite Context. - virtual ~Context(); - - public: +struct Context { bool float16_priority = false; /**< prior enable float16 inference */ - DeviceContext device_ctx_{DT_CPU}; + DeviceType device_type_ = DT_CPU; int thread_num_ = 2; /**< thread number config for thread pool */ std::shared_ptr allocator = nullptr; CpuBindMode cpu_bind_mode_ = MID_CPU; diff --git a/mindspore/lite/include/errorcode.h b/mindspore/lite/include/errorcode.h index b04c6dda3e..65519b6f73 100644 --- a/mindspore/lite/include/errorcode.h +++ b/mindspore/lite/include/errorcode.h @@ -51,7 +51,7 @@ constexpr int RET_OP_EXECUTE_FAILURE = -304; /**< Failed to execution operator. constexpr int RET_FORMAT_ERR = -401; /**< Failed to checking tensor format. */ /* InferShape error code, range: [-501,-600] */ -constexpr int RET_INFER_ERR = -501; /**< Failed to infer shape. */ +constexpr int RET_INFER_ERR = -501; /**< Failed to infer shape. */ constexpr int RET_INFER_INVALID = -502; /**< Invalid infer shape before runtime. */ } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/include/lite_session.h b/mindspore/lite/include/lite_session.h index 7b26f3b524..8bed3996a4 100644 --- a/mindspore/lite/include/lite_session.h +++ b/mindspore/lite/include/lite_session.h @@ -86,22 +86,18 @@ class MS_API LiteSession { /// \return STATUS as an error code of running graph, STATUS is defined in errorcode.h. virtual int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) = 0; - /// \brief Get output MindSpore Lite MSTensors of model mapped by node name. - /// - /// \return The map of output node name and MindSpore Lite MSTensor. - virtual std::unordered_map> GetOutputMapByNode() const = 0; - /// \brief Get output MindSpore Lite MSTensors of model by node name. /// /// \param[in] node_name Define node name. /// /// \return The vector of MindSpore Lite MSTensor. + /// deprecated, replace with GetOutputByTensorName virtual std::vector GetOutputsByNodeName(const std::string &node_name) const = 0; /// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name. /// /// \return The map of output tensor name and MindSpore Lite MSTensor. - virtual std::unordered_map GetOutputMapByTensor() const = 0; + virtual std::unordered_map GetOutputs() const = 0; /// \brief Get name of output tensors of model compiled by this session. /// diff --git a/mindspore/lite/include/ms_tensor.h b/mindspore/lite/include/ms_tensor.h index 41b8131b9f..63520e09a9 100644 --- a/mindspore/lite/include/ms_tensor.h +++ b/mindspore/lite/include/ms_tensor.h @@ -33,17 +33,6 @@ class MS_API MSTensor { /// \return Instance of MindSpore Lite MSTensor. MSTensor() = default; - /// \brief Static method to create a MSTensor pointer. - /// - /// \param[in] data_type Define data type of tensor to be created. - /// \param[in] shape Define Shape of tensor to be created. - /// - /// \note TypeId is defined in mindspore/mindspore/core/ir/dtype/type_id.h. Only number types in TypeId enum are - /// suitable for MSTensor. - /// - /// \return the pointer of MSTensor. - static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape); - /// \brief Destructor of MindSpore Lite Model. virtual ~MSTensor() = default; @@ -55,25 +44,11 @@ class MS_API MSTensor { /// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor. virtual TypeId data_type() const = 0; - /// \brief Set data type for the MindSpore Lite MSTensor. - /// - /// \param[in] data_type Define MindSpore Lite TypeId to be set in the MindSpore Lite MSTensor. - /// - /// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor after set. - virtual TypeId set_data_type(TypeId data_type) = 0; - /// \brief Get shape of the MindSpore Lite MSTensor. /// /// \return A vector of int as the shape of the MindSpore Lite MSTensor. virtual std::vector shape() const = 0; - /// \brief Set shape for the MindSpore Lite MSTensor. - /// - /// \param[in] shape Define a vector of int as shape to be set into the MindSpore Lite MSTensor. - /// - /// \return size of shape of the MindSpore Lite MSTensor after set. - virtual size_t set_shape(const std::vector &shape) = 0; - /// \brief Get size of the dimension of the MindSpore Lite MSTensor index by the parameter index. /// /// \param[in] index Define index of dimension returned. @@ -86,11 +61,6 @@ class MS_API MSTensor { /// \return Number of element in MSTensor. virtual int ElementsNum() const = 0; - /// \brief Get hash of the MindSpore Lite MSTensor. - /// - /// \return Hash of the MindSpore Lite MSTensor. - virtual std::size_t hash() const = 0; - /// \brief Get byte size of data in MSTensor. /// /// \return Byte size of data in MSTensor. @@ -101,7 +71,7 @@ class MS_API MSTensor { /// \note The data pointer can be used to both write and read data in MSTensor. /// /// \return the pointer points to data in MSTensor. - virtual void *MutableData() const = 0; + virtual void *MutableData() = 0; }; } // namespace tensor } // namespace mindspore diff --git a/mindspore/lite/include/train_session.h b/mindspore/lite/include/train_session.h index 4bde58dfcf..016cd41e84 100644 --- a/mindspore/lite/include/train_session.h +++ b/mindspore/lite/include/train_session.h @@ -25,9 +25,7 @@ namespace mindspore { namespace lite { struct Model; } -namespace lite::tensor { -class Tensor; -} + namespace session { class TrainSession : public lite::LiteSession { @@ -40,9 +38,10 @@ class TrainSession : public lite::LiteSession { int CompileGraph(lite::Model *model) override; virtual void ReplaceOps(); - virtual void* ExportToBuf(void* buf, size_t* len) const; + virtual void *ExportToBuf(void *buf, size_t *len) const; - std::unordered_map> GetOutputs() const; + // todo: output tensors by tensor name + std::unordered_map> GetOutputMap() const; std::vector GetOutputsByName(const std::string &node_name) const; virtual void train(); @@ -51,11 +50,10 @@ class TrainSession : public lite::LiteSession { bool is_eval() { return train_mode_ == false; } protected: - bool train_mode_ = false; - lite::Model* model_ = nullptr; + bool train_mode_ = false; + lite::Model *model_ = nullptr; std::unordered_map> ext_output_map_; - // private: }; } // namespace session diff --git a/mindspore/lite/internal/CMakeLists.txt b/mindspore/lite/internal/CMakeLists.txt new file mode 100644 index 0000000000..1f55bb53a5 --- /dev/null +++ b/mindspore/lite/internal/CMakeLists.txt @@ -0,0 +1,31 @@ +cmake_minimum_required(VERSION 3.14) +project (Lite_Internal) +set(TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../) + +include_directories(${TOP_DIR}) + +file(GLOB_RECURSE C_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cc) +file(GLOB KERNEL_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/*.c + ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/fp32/*.c + ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/int8/*.c + ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/quantization/*.c + ) +list(REMOVE_ITEM KERNEL_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/opt_op_handler.c) + +set(CCSRC + ${TOP_DIR}/src/common/log_adapter.cc + ${TOP_DIR}/src/runtime/allocator.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/gvar/logging_level.cc + ) + +if (PLATFORM_ARM64) + # assembly + file(GLOB ASSEMBLY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/assembly/arm64/*.s + ${CMAKE_CURRENT_SOURCE_DIR}/../nnacl/assembly/arm64/*.S) + set_property(SOURCE ${ASSEMBLY_SRC} PROPERTY LANGUAGE C) + set(KERNEL_SRC ${KERNEL_SRC} ${ASSEMBLY_SRC}) + add_library(mslite_internal SHARED ${C_SRC} ${CCSRC} ${KERNEL_SRC}) + target_link_libraries(mslite_internal log) +endif() + diff --git a/mindspore/lite/internal/include/context.h b/mindspore/lite/internal/include/context.h new file mode 100644 index 0000000000..df6b51c7bd --- /dev/null +++ b/mindspore/lite/internal/include/context.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_CONTEXT_H_ +#define MINDSPORE_LITE_INTERNAL_INCLUDE_CONTEXT_H_ + +/// \brief CpuBindMode defined for holding bind cpu strategy argument. +typedef enum { + MID_CPU = -1, /**< bind middle cpu first */ + HIGHER_CPU = 1, /**< bind higher cpu first */ + NO_BIND = 0 /**< no bind */ +} CpuBindMode; + +/// \brief DeviceType defined for holding user's preferred backend. +typedef enum { + DT_CPU, /**< CPU device type */ + DT_GPU, /**< GPU device type */ + DT_NPU /**< NPU device type, not supported yet */ +} DeviceType; + +/// \brief Context defined for holding environment variables during runtime. +typedef struct { + bool float16_priority = false; /**< prior enable float16 inference */ + DeviceType device_type_ = DT_CPU; + int thread_num_ = 2; /**< thread number config for thread pool */ +} Context; +#endif // MINDSPORE_LITE_INCLUDE_CONTEXT_H_ diff --git a/mindspore/lite/internal/include/errorcode.h b/mindspore/lite/internal/include/errorcode.h new file mode 100644 index 0000000000..6d0dc0969e --- /dev/null +++ b/mindspore/lite/internal/include/errorcode.h @@ -0,0 +1,55 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_ERRORCODE_H_ +#define MINDSPORE_LITE_INTERNAL_INCLUDE_ERRORCODE_H_ + +/// \brief STATUS defined for holding error code in MindSpore Lite. +using STATUS = int; + +/* Success */ +constexpr int RET_OK = 0; /**< No error occurs. */ + +/* Common error code, range: [-1, -100]*/ +constexpr int RET_ERROR = -1; /**< Common error code. */ +constexpr int RET_NULL_PTR = -2; /**< NULL pointer returned.*/ +constexpr int RET_PARAM_INVALID = -3; /**< Invalid parameter.*/ +constexpr int RET_NO_CHANGE = -4; /**< No change. */ +constexpr int RET_SUCCESS_EXIT = -5; /**< No error but exit. */ +constexpr int RET_MEMORY_FAILED = -6; /**< Fail to create memory. */ + +/* Executor error code, range: [-101,-200] */ +constexpr int RET_OUT_OF_TENSOR_RANGE = -101; /**< Failed to check range. */ +constexpr int RET_INPUT_TENSOR_ERROR = -102; /**< Failed to check input tensor. */ +constexpr int RET_REENTRANT_ERROR = -103; /**< Exist executor running. */ + +/* Graph error code, range: [-201,-300] */ +constexpr int RET_GRAPH_FILE_ERR = -201; /**< Failed to verify graph file. */ + +/* Node error code, range: [-301,-400] */ +constexpr int RET_NOT_FIND_OP = -301; /**< Failed to find operator. */ +constexpr int RET_INVALID_OP_NAME = -302; /**< Invalid operator name. */ +constexpr int RET_INVALID_OP_ATTR = -303; /**< Invalid operator attr. */ +constexpr int RET_OP_EXECUTE_FAILURE = -304; /**< Failed to execution operator. */ + +/* Tensor error code, range: [-401,-500] */ +constexpr int RET_FORMAT_ERR = -401; /**< Failed to checking tensor format. */ + +/* InferShape error code, range: [-501,-600] */ +constexpr int RET_INFER_ERR = -501; /**< Failed to infer shape. */ +constexpr int RET_INFER_INVALID = -502; /**< Invalid infer shape before runtime. */ + +#endif // MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ diff --git a/mindspore/lite/internal/include/lite_session.h b/mindspore/lite/internal/include/lite_session.h new file mode 100644 index 0000000000..7d264e27fe --- /dev/null +++ b/mindspore/lite/internal/include/lite_session.h @@ -0,0 +1,90 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_SESSION_H +#define MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_SESSION_H + +#include "internal/include/ms_tensor.h" +#include "internal/include/model.h" +#include "internal/include/context.h" +#include "internal/include/lite_utils.h" + +/// \brief LiteSession defined session in MindSpore Lite for compiling Model and forwarding model. +typedef struct LiteSession { + /// \brief Static method to create a LiteSession pointer. + /// + /// \param[in] context Define the context of session to be created. + /// + /// \return Pointer of MindSpore Lite LiteSession. + static LiteSession *CreateSession(Context *context); + + /// \brief Compile MindSpore Lite model. + /// + /// \note CompileGraph should be called before RunGraph. + /// + /// \param[in] model Define the model to be compiled. + /// + /// \return STATUS as an error code of compiling graph, STATUS is defined in errorcode.h. + int CompileGraph(Model *model); + + /// \brief Get input MindSpore Lite MSTensors of model. + /// + /// \return The vector of MindSpore Lite MSTensor. + TensorPtrVector GetInputs() const; + + /// \brief Get input MindSpore Lite MSTensors of model by node name. + /// + /// \param[in] node_name Define node name. + /// + /// \return The vector of MindSpore Lite MSTensor. + TensorPtrVector GetInputsByName(const String &node_name) const; + + /// \brief Get output MindSpore Lite MSTensors of model by node name. + /// + /// \param[in] node_name Define node name. + /// + /// \return The vector of MindSpore Lite MSTensor. + TensorPtrVector GetOutputsByNodeName(const String &node_name) const; + + /// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name. + /// + /// \return The map of output tensor name and MindSpore Lite MSTensor. + TensorPtrVector GetOutputs() const; + + /// \brief Get name of output tensors of model compiled by this session. + /// + /// \return The vector of string as output tensor names in order. + StringVector GetOutputTensorNames() const; + + /// \brief Get output MindSpore Lite MSTensors of model by tensor name. + /// + /// \param[in] tensor_name Define tensor name. + /// + /// \return Pointer of MindSpore Lite MSTensor. + MSTensor *GetOutputByTensorName(const String &tensor_name) const; + + /// \note RunGraph should be called after CompileGraph. + int RunGraph(); + + /// \brief Resize inputs shape. + /// + /// \param[in] inputs Define the new inputs shape. + /// + /// \return STATUS as an error code of resize inputs, STATUS is defined in errorcode.h. + int Resize(const TensorPtrVector &inputs); +} LiteSession; + +#endif // MINDSPORE_LITE_INCLUDE_LITE_SESSION_H diff --git a/mindspore/lite/src/common/ms_tensor_utils.h b/mindspore/lite/internal/include/lite_utils.h similarity index 57% rename from mindspore/lite/src/common/ms_tensor_utils.h rename to mindspore/lite/internal/include/lite_utils.h index fc68d0e951..663fd2bb5a 100644 --- a/mindspore/lite/src/common/ms_tensor_utils.h +++ b/mindspore/lite/internal/include/lite_utils.h @@ -14,17 +14,18 @@ * limitations under the License. */ -#ifndef LITE_MS_TENSOR_UTILS_H -#define LITE_MS_TENSOR_UTILS_H - +#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_UTILS_H_ +#define MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_UTILS_H_ #include -#include "include/ms_tensor.h" -#include "src/ir/tensor.h" +#include -namespace mindspore { -namespace tensor { -std::vector PackToMSTensors(const std::vector &in_tensors); -} -} // namespace mindspore +struct MSTensor; +struct Node; +using TensorPtrVector = std::vector; +using Uint32Vector = std::vector; +using String = std::string; +using StringVector = std::vector; +using ShapeVector = std::vector; +using NodePtrVector = std::vector; -#endif // LITE_MS_TENSOR_UTILS_H +#endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ diff --git a/mindspore/lite/internal/include/model.h b/mindspore/lite/internal/include/model.h new file mode 100644 index 0000000000..eb1b791d36 --- /dev/null +++ b/mindspore/lite/internal/include/model.h @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_MODEL_H +#define MINDSPORE_LITE_INTERNAL_INCLUDE_MODEL_H +#include "internal/include/lite_utils.h" +#include "nnacl/op_base.h" + +using PrimitiveC = OpParameter; +enum NodeType { + NodeType_ValueNode = 0, + NodeType_Parameter = 1, + NodeType_CNode = 2, + NodeType_MIN = NodeType_ValueNode, + NodeType_MAX = NodeType_CNode +}; + +typedef struct Node { + String name_; + NodeType node_type_; + PrimitiveC *primitive_; + Uint32Vector input_indices_; + Uint32Vector output_indices_; +} Node; + +typedef struct Model { + String name_; + String version_; + TensorPtrVector all_tensors_; + Uint32Vector input_indices_; + Uint32Vector output_indices_; + NodePtrVector nodes_; + char *buf; + + /// \brief Static method to create a Model pointer. + /// + /// \param[in] model_buf Define the buffer read from a model file. + /// \param[in] size Define bytes number of model buffer. + /// + /// \return Pointer of MindSpore Lite Model. + static Model *Import(const char *model_buf, size_t size); + + /// \brief Free all the temporary buffer + void Free(); +} Model; + +#endif // MINDSPORE_LITE_INTERNAL_INCLUDE_MODEL_H diff --git a/mindspore/lite/internal/include/ms_tensor.h b/mindspore/lite/internal/include/ms_tensor.h new file mode 100644 index 0000000000..2a1612ff2e --- /dev/null +++ b/mindspore/lite/internal/include/ms_tensor.h @@ -0,0 +1,142 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_MS_TENSOR_H_ +#define MINDSPORE_LITE_INTERNAL_INCLUDE_MS_TENSOR_H_ + +#include "internal/include/lite_utils.h" + +enum TypeId : int { + kTypeUnknown = 0, + kMetaTypeBegin = kTypeUnknown, + kMetaTypeType, // Type + kMetaTypeAnything, + kMetaTypeObject, + kMetaTypeTypeType, // TypeType + kMetaTypeProblem, + kMetaTypeExternal, + kMetaTypeNone, + kMetaTypeNull, + kMetaTypeEllipsis, + kMetaTypeEnd, + // + // Object types + // + kObjectTypeBegin = kMetaTypeEnd, + kObjectTypeNumber, + kObjectTypeString, + kObjectTypeList, + kObjectTypeTuple, + kObjectTypeSlice, + kObjectTypeKeyword, + kObjectTypeTensorType, + kObjectTypeRowTensorType, + kObjectTypeSparseTensorType, + kObjectTypeUndeterminedType, + kObjectTypeClass, + kObjectTypeDictionary, + kObjectTypeFunction, + kObjectTypeJTagged, + kObjectTypeSymbolicKeyType, + kObjectTypeEnvType, + kObjectTypeRefKey, + kObjectTypeRef, + kObjectTypeEnd, + // + // Number Types + // + kNumberTypeBegin = kObjectTypeEnd, + kNumberTypeBool, + kNumberTypeInt, + kNumberTypeInt8, + kNumberTypeInt16, + kNumberTypeInt32, + kNumberTypeInt64, + kNumberTypeUInt, + kNumberTypeUInt8, + kNumberTypeUInt16, + kNumberTypeUInt32, + kNumberTypeUInt64, + kNumberTypeFloat, + kNumberTypeFloat16, + kNumberTypeFloat32, + kNumberTypeFloat64, + kNumberTypeEnd +}; + +enum Format { + Format_NCHW = 0, + Format_NHWC = 1, + Format_NHWC4 = 2, + Format_HWKC = 3, + Format_HWCK = 4, + Format_KCHW = 5, + Format_CKHW = 6, + Format_KHWC = 7, + Format_CHWK = 8, + Format_HW = 9, + Format_HW4 = 10, + Format_NC = 11, + Format_NC4 = 12, + Format_NC4HW4 = 100, + Format_NUM_OF_FORMAT = 101, + Format_MIN = Format_NCHW, + Format_MAX = Format_NUM_OF_FORMAT +}; + +typedef struct MSTensor { + enum Category { + CONST, // weight tensor + VAR // activation tensor + }; + void *data_ = NULL; + void *device_data_ = NULL; + TypeId data_type_; + Format format_ = Format_NHWC; + Category category_ = VAR; + ShapeVector shape_ = {}; + size_t refCount = 0; + + int32_t Batch() const; + + int32_t Channel() const; + + int32_t Height() const; + + int32_t Width() const; + + /// \brief Get size of the dimension of the MindSpore Lite MSTensor index by the parameter index. + /// + /// \param[in] index Define index of dimension returned. + /// + /// \return Size of dimension of the MindSpore Lite MSTensor. + int DimensionSize(size_t index) const; + + /// \brief Get number of element in MSTensor. + /// + /// \return Number of element in MSTensor. + int ElementsNum() const; + + int ElementsC4Num() const; + + /// \brief Get byte size of data in MSTensor. + /// + /// \return Byte size of data in MSTensor. + size_t Size() const; +} MSTensor; + +MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape); +#endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ diff --git a/mindspore/lite/internal/src/lite_session.cc b/mindspore/lite/internal/src/lite_session.cc new file mode 100644 index 0000000000..7c8b928040 --- /dev/null +++ b/mindspore/lite/internal/src/lite_session.cc @@ -0,0 +1,68 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "internal/include/lite_session.h" +#include "internal/include/model.h" +#include "internal/include/ms_tensor.h" +#include "src/runtime/allocator.h" + +static Context *g_Ctx; +static Model *g_Model; +static LiteSession g_Session; +static mindspore::lite::DefaultAllocator allocator; + +LiteSession *LiteSession::CreateSession(Context *context) { + g_Ctx = context; + return &g_Session; +} + +int LiteSession::CompileGraph(Model *model) { + g_Model = model; + for (auto in : g_Model->input_indices_) { + g_Model->all_tensors_[in]->data_ = allocator.Malloc(g_Model->all_tensors_[in]->Size()); + } + return 0; +} + +TensorPtrVector LiteSession::GetInputs() const { + TensorPtrVector in(g_Model->input_indices_.size()); + // for(auto index : g_Model->input_indices_){ + // in.emplace_back(g_Model->all_tensors_[index]); + // } + return in; +} + +TensorPtrVector LiteSession::GetInputsByName(const String &node_name) const { return TensorPtrVector(); } + +TensorPtrVector LiteSession::GetOutputsByNodeName(const String &node_name) const { return TensorPtrVector(); } + +TensorPtrVector LiteSession::GetOutputs() const { + TensorPtrVector out(g_Model->output_indices_.size()); + // for(auto index : g_Model->output_indices_){ + // out.emplace_back(g_Model->all_tensors_[index]); + // } + return out; +} + +int LiteSession::RunGraph() { + // invoke nnacl kernel + return 0; +} + +StringVector LiteSession::GetOutputTensorNames() const { return StringVector(); } + +MSTensor *LiteSession::GetOutputByTensorName(const String &tensor_name) const { return NULL; } + +int LiteSession::Resize(const TensorPtrVector &inputs) { return 0; } diff --git a/mindspore/lite/internal/src/ms_tensor.cc b/mindspore/lite/internal/src/ms_tensor.cc new file mode 100644 index 0000000000..cdff8119f5 --- /dev/null +++ b/mindspore/lite/internal/src/ms_tensor.cc @@ -0,0 +1,194 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include "internal/include/ms_tensor.h" +MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape) { + MSTensor *tensor = new MSTensor(); + tensor->shape_ = shape; + tensor->data_type_ = data_type; + return tensor; +} +int MSTensor::ElementsNum() const { return std::accumulate(shape_.begin(), shape_.end(), 1LL, std::multiplies()); } + +size_t MSTensor::Size() const { + size_t size = 0; + switch (this->data_type_) { + case kNumberTypeFloat64: + size = sizeof(double); + break; + case kNumberTypeFloat: + case kNumberTypeFloat32: + size = sizeof(float); + break; + case kNumberTypeInt8: + size = sizeof(int8_t); + break; + case kNumberTypeUInt8: + size = sizeof(uint8_t); + break; + case kNumberTypeFloat16: + size = sizeof(int16_t); + break; + case kNumberTypeInt16: + size = sizeof(int16_t); + break; + case kNumberTypeInt32: + size = sizeof(int32_t); + break; + case kNumberTypeInt64: + size = sizeof(int64_t); + break; + case kNumberTypeUInt16: + size = sizeof(uint16_t); + break; + case kNumberTypeUInt32: + size = sizeof(uint32_t); + break; + case kNumberTypeUInt64: + size = sizeof(uint64_t); + break; + case kNumberTypeBool: + size = sizeof(bool); + break; + default: + std::cout << "Not support the type: " << this->data_type_; + return 0; + } + size *= (format_ == Format::Format_NC4HW4 || format_ == Format::Format_NHWC4) ? ElementsC4Num() : ElementsNum(); + + return size; +} +int32_t MSTensor::Batch() const { + if (this->shape_.size() != 4 && this->shape_.size() != 2) { + std::cout << "Unsupported tensor shape: " << this->shape_.size(); + return -1; + } + switch (this->format_) { + case Format::Format_NHWC: + case Format::Format_NHWC4: + case Format::Format_NCHW: + case Format::Format_NC4HW4: + case Format::Format_KCHW: + case Format::Format_KHWC: + case Format::Format_NC: + case Format::Format_NC4: + return this->shape_[0]; + case Format::Format_HWCK: + case Format::Format_CHWK: + return this->shape_[3]; + case Format::Format_HWKC: + return this->shape_[2]; + case Format::Format_CKHW: + return this->shape_[1]; + default: + // std::cout << "Unsupported format: " << EnumNameFormat(this->format_); + return -1; + } +} + +int32_t MSTensor::Channel() const { + if (this->shape_.size() != 4 && this->shape_.size() != 2) { + std::cout << "Unsupported tensor shape: " << this->shape_.size(); + return -1; + } + switch (this->format_) { + case Format::Format_NCHW: + case Format::Format_KCHW: + case Format::Format_NC: + case Format::Format_NC4: + return this->shape_[1]; + case Format::Format_HWCK: + return this->shape_[2]; + case Format::Format_HWKC: + case Format::Format_NHWC: + case Format::Format_NHWC4: + case Format::Format_NC4HW4: + case Format::Format_KHWC: + return this->shape_[3]; + case Format::Format_CKHW: + case Format::Format_CHWK: + return this->shape_[0]; + default: + return -1; + } +} + +int32_t MSTensor::Height() const { + if (this->shape_.size() != 4 && this->shape_.size() != 2) { + std::cout << "Unsupported tensor shape: " << this->shape_.size(); + return -1; + } + switch (this->format_) { + case Format::Format_NCHW: + case Format::Format_KCHW: + case Format::Format_CKHW: + return this->shape_[2]; + case Format::Format_NHWC: + case Format::Format_NHWC4: + case Format::Format_NC4HW4: + case Format::Format_KHWC: + case Format::Format_CHWK: + return this->shape_[1]; + case Format::Format_HWCK: + case Format::Format_HWKC: + case Format::Format_HW: + case Format::Format_HW4: + return this->shape_[0]; + default: + // std::cout << "Unsupported format: " << EnumNameFormat(this->format_); + return -1; + } +} + +int32_t MSTensor::Width() const { + if (this->shape_.size() != 4 && this->shape_.size() != 2) { + std::cout << "Unsupported tensor shape: " << this->shape_.size(); + return -1; + } + switch (this->format_) { + case Format::Format_NCHW: + case Format::Format_KCHW: + case Format::Format_CKHW: + return this->shape_[3]; + case Format::Format_KHWC: + case Format::Format_NHWC: + case Format::Format_NHWC4: + case Format::Format_NC4HW4: + case Format::Format_CHWK: + return this->shape_[2]; + case Format::Format_HWCK: + case Format::Format_HWKC: + case Format::Format_HW: + case Format::Format_HW4: + return this->shape_[1]; + default: + return -1; + } +} + +int MSTensor::ElementsC4Num() const { + int result = 0; + if (this->shape_.size() == 4) { + result = Batch() * Height() * Width() * ((Channel() + 3) / 4 * 4); + } else if (this->shape_.size() == 2) { + result = this->shape_[0] * ((this->shape_[1] + 3) / 4 * 4); + } + return result; +} diff --git a/mindspore/lite/java/java/app/src/main/native/common/jni_utils.h b/mindspore/lite/java/java/app/src/main/native/common/jni_utils.h index 215d7e40de..9f6fba9b08 100644 --- a/mindspore/lite/java/java/app/src/main/native/common/jni_utils.h +++ b/mindspore/lite/java/java/app/src/main/native/common/jni_utils.h @@ -14,7 +14,6 @@ * limitations under the License. */ - #ifndef MINDSPORE_LITE_JAVA_SRC_COMMON_JNI_UTILS_H #define MINDSPORE_LITE_JAVA_SRC_COMMON_JNI_UTILS_H diff --git a/mindspore/lite/java/java/app/src/main/native/common/ms_log.h b/mindspore/lite/java/java/app/src/main/native/common/ms_log.h index a41f4fa657..4558a732bc 100644 --- a/mindspore/lite/java/java/app/src/main/native/common/ms_log.h +++ b/mindspore/lite/java/java/app/src/main/native/common/ms_log.h @@ -14,7 +14,6 @@ * limitations under the License. */ - #ifndef MINDSPORE_LITE_JAVA_SRC_COMMON_MS_LOG_H #define MINDSPORE_LITE_JAVA_SRC_COMMON_MS_LOG_H diff --git a/mindspore/lite/java/java/app/src/main/native/runtime/ms_config.cpp b/mindspore/lite/java/java/app/src/main/native/runtime/ms_config.cpp index 33be872b5c..68be956bd8 100644 --- a/mindspore/lite/java/java/app/src/main/native/runtime/ms_config.cpp +++ b/mindspore/lite/java/java/app/src/main/native/runtime/ms_config.cpp @@ -30,13 +30,13 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_config_MSConfig_creat } switch (device_type) { case 0: - context->device_ctx_.type = mindspore::lite::DT_CPU; + context->device_type_ = mindspore::lite::DT_CPU; break; case 1: - context->device_ctx_.type = mindspore::lite::DT_GPU; + context->device_type_ = mindspore::lite::DT_GPU; break; case 2: - context->device_ctx_.type = mindspore::lite::DT_NPU; + context->device_type_ = mindspore::lite::DT_NPU; break; default: MS_LOGE("Invalid device_type : %d", device_type); diff --git a/mindspore/lite/minddata/example/jni-example.cc b/mindspore/lite/minddata/example/jni-example.cc index 2524d97552..356893ae69 100644 --- a/mindspore/lite/minddata/example/jni-example.cc +++ b/mindspore/lite/minddata/example/jni-example.cc @@ -26,70 +26,63 @@ #include #endif -extern "C" JNIEXPORT jstring JNICALL -Java_com_example_mindsporepredict_MainActivity_stringFromJNI( - JNIEnv* env, - jobject /* this */) { - std::string hello = "Hello World!"; - MS_LOG(DEBUG) << hello; - return env->NewStringUTF(hello.c_str()); +extern "C" JNIEXPORT jstring JNICALL Java_com_example_mindsporepredict_MainActivity_stringFromJNI(JNIEnv *env, + jobject /* this */) { + std::string hello = "Hello World!"; + MS_LOG(DEBUG) << hello; + return env->NewStringUTF(hello.c_str()); } using Dataset = mindspore::dataset::api::Dataset; using Iterator = mindspore::dataset::api::Iterator; -using mindspore::dataset::Tensor; using mindspore::dataset::Path; +using mindspore::dataset::Tensor; using mindspore::dataset::api::Cifar10; using mindspore::dataset::api::RandomSampler; -extern "C" JNIEXPORT void JNICALL -Java_com_example_mindsporepredict_MainActivity_pathTest( - JNIEnv* env, - jobject /* this */, - jstring path) { - MS_LOG(WARNING) << env->GetStringUTFChars(path, 0); - Path f(env->GetStringUTFChars(path, 0)); - MS_LOG(WARNING) << f.Exists() << f.IsDirectory() << f.ParentPath(); - // Print out the first few items in the directory - auto dir_it = Path::DirIterator::OpenDirectory(&f); - MS_LOG(WARNING) << dir_it.get(); - int i = 0; - while (dir_it->hasNext()) { - Path v = dir_it->next(); - MS_LOG(WARNING) << v.toString(); - i++; - if (i > 5) - break; - } +extern "C" JNIEXPORT void JNICALL Java_com_example_mindsporepredict_MainActivity_pathTest(JNIEnv *env, + jobject /* this */, + jstring path) { + MS_LOG(WARNING) << env->GetStringUTFChars(path, 0); + Path f(env->GetStringUTFChars(path, 0)); + MS_LOG(WARNING) << f.Exists() << f.IsDirectory() << f.ParentPath(); + // Print out the first few items in the directory + auto dir_it = Path::DirIterator::OpenDirectory(&f); + MS_LOG(WARNING) << dir_it.get(); + int i = 0; + while (dir_it->hasNext()) { + Path v = dir_it->next(); + MS_LOG(WARNING) << v.toString(); + i++; + if (i > 5) break; + } } -extern "C" JNIEXPORT void JNICALL -Java_com_example_mindsporepredict_MainActivity_TestCifar10Dataset( - JNIEnv* env, - jobject /* this */, - jstring path) { - MS_LOG(INFO) << "Doing MindDataTestPipeline-TestCifar10Dataset."; +extern "C" JNIEXPORT void JNICALL Java_com_example_mindsporepredict_MainActivity_TestCifar10Dataset(JNIEnv *env, + jobject /* this */, + jstring path) { + MS_LOG(INFO) << "Doing MindDataTestPipeline-TestCifar10Dataset."; - // Create a Cifar10 Dataset - std::string folder_path = env->GetStringUTFChars(path, 0); - std::shared_ptr ds = Cifar10(folder_path, RandomSampler(false, 10)); + // Create a Cifar10 Dataset + std::string folder_path = env->GetStringUTFChars(path, 0); + std::shared_ptr ds = Cifar10(folder_path, RandomSampler(false, 10)); - // Create an iterator over the result of the above dataset - // This will trigger the creation of the Execution Tree and launch it. - std::shared_ptr iter = ds->CreateIterator(); + // Create an iterator over the result of the above dataset + // This will trigger the creation of the Execution Tree and launch it. + std::shared_ptr iter = ds->CreateIterator(); - // Iterate the dataset and get each row - std::unordered_map> row; - iter->GetNextRow(&row); + // Iterate the dataset and get each row + std::unordered_map> row; + iter->GetNextRow(&row); - uint64_t i = 0; - while (row.size() != 0) { - i++; - auto image = row["image"]; - MS_LOG(INFO) << "Tensor image shape: " << image->shape(); - iter->GetNextRow(&row); - } + uint64_t i = 0; + while (row.size() != 0) { + i++; + auto image = row["image"]; + MS_LOG(INFO) << "Tensor image shape: " << image->shape(); + iter->GetNextRow(&row); + } - // Manually terminate the pipeline - iter->Stop(); + // Manually terminate the pipeline + iter->Stop(); } diff --git a/mindspore/lite/minddata/example/x86-example.cc b/mindspore/lite/minddata/example/x86-example.cc index 4aff13ed1d..4dfddb7c2b 100644 --- a/mindspore/lite/minddata/example/x86-example.cc +++ b/mindspore/lite/minddata/example/x86-example.cc @@ -28,28 +28,28 @@ using mindspore::dataset::api::Cifar10; using mindspore::dataset::api::RandomSampler; int main() { - MS_LOG(INFO) << "Doing MindDataTestPipeline-TestCifar10Dataset."; + MS_LOG(INFO) << "Doing MindDataTestPipeline-TestCifar10Dataset."; - // Create a Cifar10 Dataset - std::string folder_path = "./testCifar10Data/"; - std::shared_ptr ds = Cifar10(folder_path, RandomSampler(false, 10)); + // Create a Cifar10 Dataset + std::string folder_path = "./testCifar10Data/"; + std::shared_ptr ds = Cifar10(folder_path, RandomSampler(false, 10)); - // Create an iterator over the result of the above dataset - // This will trigger the creation of the Execution Tree and launch it. - std::shared_ptr iter = ds->CreateIterator(); + // Create an iterator over the result of the above dataset + // This will trigger the creation of the Execution Tree and launch it. + std::shared_ptr iter = ds->CreateIterator(); - // Iterate the dataset and get each row - std::unordered_map> row; - iter->GetNextRow(&row); + // Iterate the dataset and get each row + std::unordered_map> row; + iter->GetNextRow(&row); - uint64_t i = 0; - while (row.size() != 0) { - i++; - auto image = row["image"]; - MS_LOG(INFO) << "Tensor image shape: " << image->shape(); - iter->GetNextRow(&row); - } + uint64_t i = 0; + while (row.size() != 0) { + i++; + auto image = row["image"]; + MS_LOG(INFO) << "Tensor image shape: " << image->shape(); + iter->GetNextRow(&row); + } - // Manually terminate the pipeline - iter->Stop(); + // Manually terminate the pipeline + iter->Stop(); } diff --git a/mindspore/lite/nnacl/fp16/arithmetic_fp16.h b/mindspore/lite/nnacl/fp16/arithmetic_fp16.h index 5c62221cca..17e712a7f8 100644 --- a/mindspore/lite/nnacl/fp16/arithmetic_fp16.h +++ b/mindspore/lite/nnacl/fp16/arithmetic_fp16.h @@ -112,7 +112,7 @@ int ElementGreaterFp16(float16_t *input0, float16_t *input1, float16_t *output, int ElementGreaterEqualFp16(float16_t *input0, float16_t *input1, float16_t *output, int element_size); void TileDimensionsFp16(float16_t *data0, float16_t *data1, float16_t *tile_data0, float16_t *tile_data1, - ArithmeticParameter *param); + ArithmeticParameter *param); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp16/reduce_fp16.h b/mindspore/lite/nnacl/fp16/reduce_fp16.h index 4827c691dd..8a9655abb1 100644 --- a/mindspore/lite/nnacl/fp16/reduce_fp16.h +++ b/mindspore/lite/nnacl/fp16/reduce_fp16.h @@ -26,7 +26,7 @@ extern "C" { #endif int ReduceMeanFp16(const int outer_size, const int inner_size, const int axis_size, const float16_t *src_data, - const int *src_shape, float16_t *dst_data, const int tid, const int thread_num); + const int *src_shape, float16_t *dst_data, const int tid, const int thread_num); #ifdef __cplusplus } diff --git a/mindspore/lite/nnacl/fp32/reduce.h b/mindspore/lite/nnacl/fp32/reduce.h index 3a80658393..5844b23f32 100644 --- a/mindspore/lite/nnacl/fp32/reduce.h +++ b/mindspore/lite/nnacl/fp32/reduce.h @@ -19,7 +19,6 @@ #include "nnacl/op_base.h" #include "nnacl/reduce_parameter.h" - #ifdef __cplusplus extern "C" { #endif diff --git a/mindspore/lite/nnacl/fp32/space_to_batch.h b/mindspore/lite/nnacl/fp32/space_to_batch.h index 5406408022..19d941c42b 100644 --- a/mindspore/lite/nnacl/fp32/space_to_batch.h +++ b/mindspore/lite/nnacl/fp32/space_to_batch.h @@ -29,8 +29,7 @@ typedef struct SpaceToBatchParameter { #ifdef __cplusplus extern "C" { #endif -void DoSpaceToBatchNHWC(const float *input, float *output, SpaceToBatchParameter *param, int *in_shape, - int *out_shape); +void DoSpaceToBatchNHWC(const float *input, float *output, SpaceToBatchParameter *param, int *in_shape, int *out_shape); void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, int *padding, int *out_shape, const float *pedding_h_data, const float *pedding_w_data); #ifdef __cplusplus diff --git a/mindspore/lite/nnacl/fp32_grad/batch_norm.h b/mindspore/lite/nnacl/fp32_grad/batch_norm.h index 1603aa0c07..f66e5508e0 100644 --- a/mindspore/lite/nnacl/fp32_grad/batch_norm.h +++ b/mindspore/lite/nnacl/fp32_grad/batch_norm.h @@ -29,13 +29,11 @@ typedef struct BNGradParameter { extern "C" { #endif - void sumSpatialBatch(const float *in, int size, int ch, float *out); void scaleBias(const float *scales, int batch, int n, int size, float *output); -void normalize(const float *x, const float *mean, const float *invar, int batch, int filters, int spatial, - float *out); -void backwardScale(const float *x, const float *mean, const float *invar, const float *delta, int batch, - int n, int size, float *scale_updates); +void normalize(const float *x, const float *mean, const float *invar, int batch, int filters, int spatial, float *out); +void backwardScale(const float *x, const float *mean, const float *invar, const float *delta, int batch, int n, + int size, float *scale_updates); void meanVar(const float *in, int batch, int size, int ch, float eps, float *mean, float *invar); void meanDelta(float *yt, int size, int ch, float *invar, float *mean_delta); void varianceDelta(const float *x, const float *delta, const float *mean, const float *invar, int batch, int ch, diff --git a/mindspore/lite/nnacl/int8/slice_int8.h b/mindspore/lite/nnacl/int8/slice_int8.h index 04b4fcce6e..5a67c083cc 100644 --- a/mindspore/lite/nnacl/int8/slice_int8.h +++ b/mindspore/lite/nnacl/int8/slice_int8.h @@ -29,4 +29,3 @@ int SliceInt8(const int8_t *input, int8_t *output, SliceParameter *param, int th #endif #endif // MINDSPORE_LITE_NNACL_INT8_SLICE_INT8_H_ - diff --git a/mindspore/lite/nnacl/l2_norm.h b/mindspore/lite/nnacl/l2_norm.h index 1c55648472..2895cc418f 100644 --- a/mindspore/lite/nnacl/l2_norm.h +++ b/mindspore/lite/nnacl/l2_norm.h @@ -21,8 +21,7 @@ #ifdef __cplusplus extern "C" { #endif -int L2NormFp32(const float *input_ptr, float *output_ptr, - L2NormParameter *param); +int L2NormFp32(const float *input_ptr, float *output_ptr, L2NormParameter *param); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/winograd_transform.h b/mindspore/lite/nnacl/winograd_transform.h index 0cea9da41b..0b53e97ff1 100644 --- a/mindspore/lite/nnacl/winograd_transform.h +++ b/mindspore/lite/nnacl/winograd_transform.h @@ -59,16 +59,16 @@ void Conv3x3Fp32OutputTransform(const float *gemm_out, float *out_data, const fl void Conv3x3Int8InputUnit(int16_t *tmp_data, int16_t *trans_input_data, size_t step, int input_zp); void Conv3x3Int8InputTransform(const int16_t *input_data, int16_t *trans_input, int16_t *tmp_data, int start_index, - int real_cal_num, int out_w_block, ConvParameter *conv_param); + int real_cal_num, int out_w_block, ConvParameter *conv_param); void Conv3x3Int8FilterTransform(const int16_t *weight_data, int16_t *trans_weight, int iC8, int output_channel, int kernel_plane); void Conv3x3Int8OutputUnit(const int32_t *gemm_out, const int32_t *bias_data, int8_t *output_data, bool h_not_bound, - bool w_not_bound, int output_w, int real_num, int oc_start, ConvParameter *conv_param); + bool w_not_bound, int output_w, int real_num, int oc_start, ConvParameter *conv_param); void Conv3x3Int8OutputTransform(const int32_t *gemm_out, int8_t *out_data, const int32_t *bias_data, int start_index, - int real_cal_num, int out_w_block, ConvParameter *conv_param); + int real_cal_num, int out_w_block, ConvParameter *conv_param); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index 036f0d756d..0b06df14b9 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -1,22 +1,21 @@ if (PLATFORM_ARM32 OR PLATFORM_ARM64) # for performance if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") + #-fno-rtti -fno-exceptions + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") endif() endif () set(LITE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc - ${CMAKE_CURRENT_SOURCE_DIR}/common/ms_tensor_utils.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/gvar/logging_level.cc ${CMAKE_CURRENT_SOURCE_DIR}/common/log_adapter.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/allocator.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/runtime_api.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/thread_pool.c ${CMAKE_CURRENT_SOURCE_DIR}/runtime/workspace_pool.cc - ${CMAKE_CURRENT_SOURCE_DIR}/ir/tensor.cc - ${CMAKE_CURRENT_SOURCE_DIR}/ir/meta_tensor_extends.cc - ${CMAKE_CURRENT_SOURCE_DIR}/context.cc + ${CMAKE_CURRENT_SOURCE_DIR}/tensor.cc ${CMAKE_CURRENT_SOURCE_DIR}/executor.cc ${CMAKE_CURRENT_SOURCE_DIR}/kernel_registry.cc ${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel.cc @@ -59,17 +58,14 @@ endif () file(GLOB_RECURSE C_OPS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/ops/*.cc) add_library(mindspore-lite SHARED ${LITE_SRC} ${C_OPS_SRC}) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-private-field") -add_library(core_mid_ OBJECT ${CORE_SRC}) if (SUPPORT_GPU) add_subdirectory(runtime/kernel/opencl) target_link_libraries(mindspore-lite -core_mid_ cpu_kernel_mid_ opencl_kernel_lib_ ) else () target_link_libraries(mindspore-lite - core_mid_ cpu_kernel_mid_ ) endif () diff --git a/mindspore/lite/src/common/common.h b/mindspore/lite/src/common/common.h old mode 100755 new mode 100644 index 533fd8fc38..0dc90fe2cb --- a/mindspore/lite/src/common/common.h +++ b/mindspore/lite/src/common/common.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_COMMON_COMMON_H_ #include -#include "schema/model_generated.h" +#include "src/tensor.h" namespace mindspore { namespace lite { @@ -51,9 +51,8 @@ static const char CALIB_NORM[] = "NORM"; // dims static const int32_t DIM_DEFAULT_SIZE = 4; -static const schema::Format DEFAULT_FORMAT = schema::Format_NCHW; +static const schema::Format DEFAULT_FORMAT = schema::Format::Format_NCHW; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_COMMON_COMMON_H_ - diff --git a/mindspore/lite/src/common/file_utils.cc b/mindspore/lite/src/common/file_utils.cc index 6cbb7b8dfc..3456df31ed 100644 --- a/mindspore/lite/src/common/file_utils.cc +++ b/mindspore/lite/src/common/file_utils.cc @@ -119,13 +119,13 @@ int CompareOutputData(float *output_data, float *correct_data, int data_size) { return 0; } -int CompareOutput(float *output_data, std::string file_path) { +int CompareOutput(float *output_data, std::string file_path) { size_t output_size; auto ground_truth = reinterpret_cast(mindspore::lite::ReadFile(file_path.c_str(), &output_size)); size_t output_num = output_size / sizeof(float); printf("output num : %zu\n", output_num); int res = CompareOutputData(output_data, ground_truth, output_num); - delete [] ground_truth; + delete[] ground_truth; return res; } } // namespace lite diff --git a/mindspore/lite/src/common/file_utils.h b/mindspore/lite/src/common/file_utils.h index 03fdf5360d..a2b479147a 100644 --- a/mindspore/lite/src/common/file_utils.h +++ b/mindspore/lite/src/common/file_utils.h @@ -34,7 +34,7 @@ char *ReadFile(const char *file, size_t *size); std::string RealPath(const char *path); template -void WriteToTxt(const std::string& file_path, void *data, size_t element_size) { +void WriteToTxt(const std::string &file_path, void *data, size_t element_size) { std::ofstream out_file; out_file.open(file_path, std::ios::out); auto real_data = reinterpret_cast(data); @@ -44,10 +44,10 @@ void WriteToTxt(const std::string& file_path, void *data, size_t element_size) { out_file.close(); } -int WriteToBin(const std::string& file_path, void *data, size_t size); +int WriteToBin(const std::string &file_path, void *data, size_t size); int CompareOutputData(float *output_data, float *correct_data, int data_size); -int CompareOutput(float *output_data, std::string file_path); +int CompareOutput(float *output_data, std::string file_path); std::string GetAndroidPackageName(); std::string GetAndroidPackagePath(); @@ -55,4 +55,3 @@ std::string GetAndroidPackagePath(); } // namespace mindspore #endif // MINDSPORE_LITE_COMMON_FILE_UTILS_H_ - diff --git a/mindspore/lite/src/common/file_utils_ext.cc b/mindspore/lite/src/common/file_utils_ext.cc index ade264d7b7..91851f8b90 100644 --- a/mindspore/lite/src/common/file_utils_ext.cc +++ b/mindspore/lite/src/common/file_utils_ext.cc @@ -48,7 +48,7 @@ int CompareRelativeOutput(float *output_data, std::string file_path) { size_t output_num = output_size / sizeof(float); std::cout << "output num : " << output_num << "\n"; int res = CompareOutputRelativeData(output_data, ground_truth, output_num); - delete [] ground_truth; + delete[] ground_truth; return res; } } // namespace lite diff --git a/mindspore/lite/src/common/file_utils_ext.h b/mindspore/lite/src/common/file_utils_ext.h index f5441cbdf9..6f4cb0a7ea 100644 --- a/mindspore/lite/src/common/file_utils_ext.h +++ b/mindspore/lite/src/common/file_utils_ext.h @@ -18,7 +18,6 @@ #define MINDSPORE_LITE_COMMON_FILE_UTILS_EXT_H_ #include - namespace mindspore { namespace lite { int CompareRelativeOutput(float *output_data, std::string file_path); diff --git a/mindspore/lite/src/common/graph_utils_extends.cc b/mindspore/lite/src/common/graph_utils_extends.cc index 2f741a98b8..f7652194fb 100644 --- a/mindspore/lite/src/common/graph_utils_extends.cc +++ b/mindspore/lite/src/common/graph_utils_extends.cc @@ -27,7 +27,7 @@ #include "ir/visitor.h" #include "ir/func_graph.h" - #include "utils/label.h" +#include "utils/label.h" #include "utils/log_adapter.h" #include "src/common/utils.h" @@ -147,4 +147,3 @@ std::vector DeepLinkedGraphSearch(const AnfNodePtr &root, const Incl return DeepLinkedGraphSearcher(include).Search(root); } } // namespace mindspore - diff --git a/mindspore/lite/src/common/log_adapter.cc b/mindspore/lite/src/common/log_adapter.cc index b23a737251..fbe4586d63 100644 --- a/mindspore/lite/src/common/log_adapter.cc +++ b/mindspore/lite/src/common/log_adapter.cc @@ -29,17 +29,12 @@ namespace mindspore { constexpr const char *ANDROID_LOG_TAG = "MS_LITE"; int EnvToInt(const char *env) { - if (env == nullptr) - return -1; - if (strcmp(env, "DEBUG") == 0) - return 0; - if (strcmp(env, "INFO") == 0) - return 1; - if (strcmp(env, "WARNING") == 0) - return 2; - if (strcmp(env, "ERROR") == 0) - return 3; - return -1; + if (env == nullptr) return -1; + if (strcmp(env, "DEBUG") == 0) return 0; + if (strcmp(env, "INFO") == 0) return 1; + if (strcmp(env, "WARNING") == 0) return 2; + if (strcmp(env, "ERROR") == 0) return 3; + return -1; } bool IsPrint(int level) { @@ -55,15 +50,15 @@ bool IsPrint(int level) { // convert MsLogLevel to corresponding android level static int GetAndroidLogLevel(MsLogLevel level) { switch (level) { - case DEBUG: - return ANDROID_LOG_DEBUG; - case INFO: - return ANDROID_LOG_INFO; - case WARNING: - return ANDROID_LOG_WARN; - case ERROR: - default: - return ANDROID_LOG_ERROR; + case DEBUG: + return ANDROID_LOG_DEBUG; + case INFO: + return ANDROID_LOG_INFO; + case WARNING: + return ANDROID_LOG_WARN; + case ERROR: + default: + return ANDROID_LOG_ERROR; } } #endif @@ -114,16 +109,20 @@ static std::string ExceptionTypeToString(ExceptionType type) { } void LogWriter::OutputLog(const std::ostringstream &msg) const { -if (IsPrint(log_level_)) { + if (IsPrint(log_level_)) { + std::string sm = ""; + if (submodule_ != SM_UNKNOWN) { + sm = std::to_string(submodule_) + " "; + } // #ifdef USE_ANDROID_LOG #ifdef ENABLE_ARM - __android_log_print(GetAndroidLogLevel(log_level_), ANDROID_LOG_TAG, "[%s:%d] %s] %s", location_.file_, - location_.line_, location_.func_, msg.str().c_str()); + __android_log_print(GetAndroidLogLevel(log_level_), ANDROID_LOG_TAG, "[%s:%d] %s] %s%s", location_.file_, + location_.line_, location_.func_, sm.c_str(), msg.str().c_str()); #else - printf("%s [%s:%d] %s] %s\n", EnumStrForMsLogLevel(log_level_), location_.file_, location_.line_, location_.func_, - msg.str().c_str()); + printf("%s [%s:%d] %s] %s%s\n", EnumStrForMsLogLevel(log_level_), location_.file_, location_.line_, location_.func_, + sm.c_str(), msg.str().c_str()); #endif -} + } } void LogWriter::operator<(const LogStream &stream) const noexcept { @@ -155,4 +154,3 @@ void LogWriter::operator^(const LogStream &stream) const { throw std::runtime_error(oss.str()); } } // namespace mindspore - diff --git a/mindspore/lite/src/common/ms_tensor_utils.cc b/mindspore/lite/src/common/ms_tensor_utils.cc deleted file mode 100644 index 44d04afbc8..0000000000 --- a/mindspore/lite/src/common/ms_tensor_utils.cc +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/common/ms_tensor_utils.h" - -#include -#include "utils/log_adapter.h" - -namespace mindspore { -namespace tensor { -using mindspore::lite::tensor::LiteTensor; -using mindspore::lite::tensor::Tensor; - -std::vector PackToMSTensors(const std::vector &in_tensors) { - std::vector ret; - for (auto *lite_tensor : in_tensors) { - MS_ASSERT(lite_tensor != nullptr); - auto *ms_tensor = new (std::nothrow) LiteTensor(lite_tensor); - if (ms_tensor == nullptr) { - MS_LOG(ERROR) << "new LiteTensor failed"; - return ret; - } - ret.emplace_back(ms_tensor); - } - return ret; -} -} // namespace tensor -} // namespace mindspore diff --git a/mindspore/lite/src/common/op_utils.h b/mindspore/lite/src/common/op_utils.h old mode 100755 new mode 100644 index 68a4217114..e81e5181bc --- a/mindspore/lite/src/common/op_utils.h +++ b/mindspore/lite/src/common/op_utils.h @@ -29,4 +29,3 @@ inline std::string GetOpTypeName(const schema::CNode &opDef) { return schema::En } // namespace mindspore #endif // MINDSPORE_LITE_COMMON_OP_UTILS_H_ - diff --git a/mindspore/lite/src/common/utils.cc b/mindspore/lite/src/common/utils.cc index bb2e1e9c2b..da8534422b 100644 --- a/mindspore/lite/src/common/utils.cc +++ b/mindspore/lite/src/common/utils.cc @@ -21,7 +21,7 @@ namespace mindspore { namespace lite { -std::vector StringSplit(std::string str, const std::string& pattern) { +std::vector StringSplit(std::string str, const std::string &pattern) { std::vector result; if (str.empty()) { return result; @@ -259,4 +259,3 @@ uint32_t getHwCap(int hwcap_type) { #endif } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/src/common/utils.h b/mindspore/lite/src/common/utils.h index b6d28d8992..156abcc692 100644 --- a/mindspore/lite/src/common/utils.h +++ b/mindspore/lite/src/common/utils.h @@ -32,7 +32,7 @@ namespace mindspore { namespace lite { const int USEC = 1000000; const int MSEC = 1000; -std::vector StringSplit(std::string str, const std::string& pattern); +std::vector StringSplit(std::string str, const std::string &pattern); uint64_t GetTimeUs(void); @@ -190,4 +190,3 @@ inline Option GenericParseValue(const std::string &value) { } // namespace mindspore #endif // MINDSPORE_LITE_COMMON_UTILS_H_ - diff --git a/mindspore/lite/src/context.cc b/mindspore/lite/src/context.cc deleted file mode 100644 index 5145a738b7..0000000000 --- a/mindspore/lite/src/context.cc +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "include/context.h" -#include "src/runtime/allocator.h" - -namespace mindspore::lite { -Context::Context() { allocator = Allocator::Create(); } - -Context::~Context() = default; - -Context::Context(int thread_num, std::shared_ptr allocator, DeviceContext device_ctx) { - this->allocator = std::move(allocator); - this->thread_num_ = thread_num; - this->device_ctx_ = device_ctx; -} -} // namespace mindspore::lite - diff --git a/mindspore/lite/src/executor.cc b/mindspore/lite/src/executor.cc index 3877f8980f..330ab443ea 100644 --- a/mindspore/lite/src/executor.cc +++ b/mindspore/lite/src/executor.cc @@ -17,10 +17,9 @@ #include "mindspore/lite/src/executor.h" #include "nnacl/pack.h" #include "include/errorcode.h" -#include "src/common/ms_tensor_utils.h" namespace mindspore::lite { -int Executor::Run(std::vector &in_tensors, std::vector &out_tensors, +int Executor::Run(std::vector &in_tensors, std::vector &out_tensors, std::vector &kernels, Allocator *allocator, const session::KernelCallBack &before, const session::KernelCallBack &after) { MS_ASSERT(nullptr != allocator); @@ -29,11 +28,11 @@ int Executor::Run(std::vector &in_tensors, std::vectorData() == nullptr) { + if (inTensor->MutableData() == nullptr) { MS_LOG(ERROR) << "Graph input tensor data is nullptr"; return RET_ERROR; } - if (inTensor->GetFormat() != schema::Format_NHWC) { + if (inTensor->GetFormat() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "Model input tensor should be NHWC"; return RET_ERROR; } @@ -47,7 +46,7 @@ int Executor::Run(std::vector &in_tensors, std::vectorin_tensors()), PackToMSTensors(kernel->out_tensors()), + if (!before(TensorVectorCast(kernel->in_tensors()), TensorVectorCast(kernel->out_tensors()), {kernel->name(), kernel->type_str()})) { MS_LOG(ERROR) << "run kernel before_callback failed, name: " << kernel->name(); } @@ -59,9 +58,8 @@ int Executor::Run(std::vector &in_tensors, std::vectorname(); return ret; } - if (after != nullptr) { - if (!after(PackToMSTensors(kernel->in_tensors()), PackToMSTensors(kernel->out_tensors()), + if (!after(TensorVectorCast(kernel->in_tensors()), TensorVectorCast(kernel->out_tensors()), {kernel->name(), kernel->type_str()})) { MS_LOG(ERROR) << "run kernel after_callback failed, name: " << kernel->name(); } @@ -80,7 +78,7 @@ int Executor::Run(std::vector &in_tensors, std::vectorshape().size()); @@ -96,13 +94,17 @@ int Executor::TransformTensorLayout(tensor::Tensor *tensor, schema::Format dst_f return RET_OK; } -int Executor::TransformTensorLayoutFp32(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator) { +int Executor::TransformTensorLayoutFp32(Tensor *tensor, schema::Format dst_format, Allocator *allocator) { MS_ASSERT(nullptr != tensor); MS_ASSERT(nullptr != allocator); MS_ASSERT(4 == tensor->shape().size()); auto src_format = tensor->GetFormat(); - if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC) { - auto *src_data = tensor->Data(); + if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC) { + auto *src_data = tensor->MutableData(); + if (src_data == nullptr) { + MS_LOG(ERROR) << "MutableData return nullptr"; + return RET_ERROR; + } auto *dst_data = allocator->Malloc(tensor->Size()); if (dst_data == nullptr) { MS_LOG(ERROR) << "Malloc data failed"; @@ -114,18 +116,18 @@ int Executor::TransformTensorLayoutFp32(tensor::Tensor *tensor, schema::Format d allocator->Free(src_data); return RET_OK; } else { - MS_LOG(ERROR) << "Unsupported layout transform: " << schema::EnumNameFormat(tensor->GetFormat()) << " to " - << schema::EnumNameFormat(dst_format) << " in float32"; + MS_LOG(ERROR) << "Unsupported layout transform: " << EnumNameFormat(tensor->GetFormat()) << " to " + << EnumNameFormat(dst_format) << " in float32"; return RET_ERROR; } } -int Executor::TransformTensorLayoutUint8(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator) { +int Executor::TransformTensorLayoutUint8(Tensor *tensor, schema::Format dst_format, Allocator *allocator) { MS_ASSERT(nullptr != tensor); MS_ASSERT(nullptr != allocator); MS_ASSERT(4 == tensor->shape().size()); - MS_LOG(ERROR) << "Unsupported layout transform: " << schema::EnumNameFormat(tensor->GetFormat()) << " to " - << schema::EnumNameFormat(dst_format) << " in uint8"; + MS_LOG(ERROR) << "Unsupported layout transform: " << EnumNameFormat(tensor->GetFormat()) << " to " + << EnumNameFormat(dst_format) << " in uint8"; return RET_ERROR; } } // namespace mindspore::lite diff --git a/mindspore/lite/src/executor.h b/mindspore/lite/src/executor.h index 54eee3dd9d..9f89f6d804 100644 --- a/mindspore/lite/src/executor.h +++ b/mindspore/lite/src/executor.h @@ -30,16 +30,16 @@ class Executor { virtual int Prepare(std::vector &kernels) { return 0; } - virtual int Run(std::vector &in_tensors, std::vector &out_tensors, - std::vector &kernels, Allocator *allocator = nullptr, - const session::KernelCallBack &before = nullptr, const session::KernelCallBack &after = nullptr); + virtual int Run(std::vector &in_tensors, std::vector &out_tensors, + std::vector &kernels, Allocator *allocator = nullptr, + const session::KernelCallBack &before = nullptr, const session::KernelCallBack &after = nullptr); protected: - int TransformTensorLayoutFp32(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); + int TransformTensorLayoutFp32(Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); - int TransformTensorLayoutUint8(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); + int TransformTensorLayoutUint8(Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); - int TransformTensorLayout(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); + int TransformTensorLayout(Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); }; } // namespace mindspore::lite diff --git a/mindspore/lite/src/ir/meta_tensor_extends.cc b/mindspore/lite/src/ir/meta_tensor_extends.cc deleted file mode 100644 index 3e5851ba33..0000000000 --- a/mindspore/lite/src/ir/meta_tensor_extends.cc +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ir/meta_tensor.h" - -namespace mindspore { -namespace tensor { -abstract::AbstractBasePtr MetaTensor::ToAbstract() { - MS_LOG(ERROR) << "MetaTensor ToAbstract is not implemented"; - return nullptr; -} -TypePtr MetaTensor::Dtype() const { return nullptr; } -} // namespace tensor -} // namespace mindspore - diff --git a/mindspore/lite/src/kernel_registry.cc b/mindspore/lite/src/kernel_registry.cc index d283877516..994867b076 100644 --- a/mindspore/lite/src/kernel_registry.cc +++ b/mindspore/lite/src/kernel_registry.cc @@ -96,10 +96,9 @@ bool KernelRegistry::Merge(const std::unordered_map &n const kernel::KernelCreator *KernelRegistry::GetCreatorArrays() { return creator_arrays_; } -kernel::LiteKernel *KernelRegistry::GetKernel(const std::vector &in_tensors, - const std::vector &out_tensors, - const PrimitiveC *primitive, const Context *ctx, - const kernel::KernelKey &key) { +kernel::LiteKernel *KernelRegistry::GetKernel(const std::vector &in_tensors, + const std::vector &out_tensors, const PrimitiveC *primitive, + const Context *ctx, const kernel::KernelKey &key) { MS_ASSERT(nullptr != primitive); MS_ASSERT(nullptr != ctx); auto parameter = kernel::PopulateParameter(primitive); diff --git a/mindspore/lite/src/kernel_registry.h b/mindspore/lite/src/kernel_registry.h index bee48dade7..c0511441ba 100644 --- a/mindspore/lite/src/kernel_registry.h +++ b/mindspore/lite/src/kernel_registry.h @@ -44,9 +44,8 @@ class KernelRegistry { void RegKernel(const kernel::KERNEL_ARCH arch, const TypeId data_type, const schema::PrimitiveType type, kernel::KernelCreator creator); bool Merge(const std::unordered_map &newCreators); - kernel::LiteKernel *GetKernel(const std::vector &in_tensors, - const std::vector &out_tensors, const PrimitiveC *primitive, - const Context *ctx, const kernel::KernelKey &key); + kernel::LiteKernel *GetKernel(const std::vector &in_tensors, const std::vector &out_tensors, + const PrimitiveC *primitive, const Context *ctx, const kernel::KernelKey &key); protected: static const int device_type_length_{kKernelArch_MAX - kKernelArch_MIN + 1}; diff --git a/mindspore/lite/src/lite_kernel.cc b/mindspore/lite/src/lite_kernel.cc index 7d3edf0dfd..efa70a61c7 100644 --- a/mindspore/lite/src/lite_kernel.cc +++ b/mindspore/lite/src/lite_kernel.cc @@ -101,10 +101,9 @@ std::vector LiteKernelUtil::SubgraphOutputKernels( return output_kernels; } -std::vector LiteKernelUtil::SubgraphInputTensors( - const std::vector &kernels) { - std::vector input_tensors; - std::vector all_output_tensors; +std::vector LiteKernelUtil::SubgraphInputTensors(const std::vector &kernels) { + std::vector input_tensors; + std::vector all_output_tensors; for (const auto &kernel : kernels) { all_output_tensors.insert(all_output_tensors.end(), kernel->out_tensors().begin(), kernel->out_tensors().end()); } @@ -112,7 +111,7 @@ std::vector LiteKernelUtil::SubgraphInputTensors( for (const auto &kernel : input_kernels) { for (const auto &tensor : kernel->in_tensors()) { auto iter = std::find(all_output_tensors.begin(), all_output_tensors.end(), tensor); - if (iter == all_output_tensors.end() && tensor->Data() == nullptr) { + if (iter == all_output_tensors.end() && tensor->data_c() == nullptr) { input_tensors.emplace_back(tensor); } } @@ -120,10 +119,9 @@ std::vector LiteKernelUtil::SubgraphInputTensors( return input_tensors; } -std::vector LiteKernelUtil::SubgraphOutputTensors( - const std::vector &kernels) { - std::vector output_tensors; - std::vector all_input_tensors; +std::vector LiteKernelUtil::SubgraphOutputTensors(const std::vector &kernels) { + std::vector output_tensors; + std::vector all_input_tensors; for (const auto &kernel : kernels) { all_input_tensors.insert(all_input_tensors.end(), kernel->in_tensors().begin(), kernel->in_tensors().end()); } @@ -165,5 +163,5 @@ void LiteKernelUtil::InitTensorRefCount(std::vector &kerne } } -int LiteKernelUtil::SetInput(LiteKernel &kernelMod, std::vector inputs) { return -1; } +int LiteKernelUtil::SetInput(LiteKernel &kernelMod, std::vector inputs) { return -1; } } // namespace mindspore::kernel diff --git a/mindspore/lite/src/lite_kernel.h b/mindspore/lite/src/lite_kernel.h index 6a166024cb..35bb4615bb 100644 --- a/mindspore/lite/src/lite_kernel.h +++ b/mindspore/lite/src/lite_kernel.h @@ -24,7 +24,7 @@ #include "src/ops/primitive_c.h" #include "nnacl/op_base.h" #include "include/context.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "include/errorcode.h" // using mindspore::kernel::AddressPtr; @@ -52,8 +52,8 @@ class LiteKernel { public: LiteKernel() = default; // parameter should be deleted or freed by caller, and should be deleted or freed after LiteKernel is deleted - LiteKernel(OpParameter *parameter, const std::vector &in_tensors, - const std::vector &out_tensors, const lite::Context *ctx, + LiteKernel(OpParameter *parameter, const std::vector &in_tensors, + const std::vector &out_tensors, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : op_parameter_(parameter), in_tensors_(in_tensors), @@ -105,13 +105,13 @@ class LiteKernel { std::string type_str() { return schema::EnumNamePrimitiveType(this->Type()); } - void set_in_tensors(const std::vector &in_tensors) { this->in_tensors_ = in_tensors; } + void set_in_tensors(const std::vector &in_tensors) { this->in_tensors_ = in_tensors; } - void set_out_tensors(const std::vector &out_tensors) { this->out_tensors_ = out_tensors; } + void set_out_tensors(const std::vector &out_tensors) { this->out_tensors_ = out_tensors; } - std::vector &in_tensors() { return this->in_tensors_; } + std::vector &in_tensors() { return this->in_tensors_; } - std::vector &out_tensors() { return this->out_tensors_; } + std::vector &out_tensors() { return this->out_tensors_; } void AddInKernel(LiteKernel *kernel) { this->in_kernels_.emplace_back(kernel); } @@ -142,8 +142,8 @@ class LiteKernel { std::string name_; OpParameter *op_parameter_ = nullptr; // tensor will free in ~lite_session() - std::vector in_tensors_; - std::vector out_tensors_; + std::vector in_tensors_; + std::vector out_tensors_; const mindspore::lite::PrimitiveC *primitive_ = nullptr; const lite::Context *context_ = nullptr; std::vector in_kernels_; @@ -154,8 +154,7 @@ class LiteKernel { class SubGraphKernel : public LiteKernel { public: - explicit SubGraphKernel(const std::vector &inputs, - const std::vector &outputs, + explicit SubGraphKernel(const std::vector &inputs, const std::vector &outputs, const std::vector &in_kernels, const std::vector &out_kernels, const std::vector &nodes, const lite::Context *ctx, @@ -174,8 +173,8 @@ class SubGraphKernel : public LiteKernel { std::vector nodes_; }; -typedef LiteKernel *(*KernelCreator)(const std::vector &inputs, - const std::vector &outputs, OpParameter *parameter, +typedef LiteKernel *(*KernelCreator)(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive); @@ -187,13 +186,13 @@ class LiteKernelUtil { static std::vector SubgraphOutputKernels(const std::vector &kernels); - static std::vector SubgraphInputTensors(const std::vector &kernels); + static std::vector SubgraphInputTensors(const std::vector &kernels); - static std::vector SubgraphOutputTensors(const std::vector &kernels); + static std::vector SubgraphOutputTensors(const std::vector &kernels); static void InitTensorRefCount(std::vector &kernels); - static int SetInput(LiteKernel &kernelMod, std::vector inputs); + static int SetInput(LiteKernel &kernelMod, std::vector inputs); }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/lite_session.cc b/mindspore/lite/src/lite_session.cc index 55deaf0cdc..a2e8861f19 100644 --- a/mindspore/lite/src/lite_session.cc +++ b/mindspore/lite/src/lite_session.cc @@ -34,9 +34,8 @@ namespace mindspore { namespace lite { static std::vector packed_op = { - schema::PrimitiveType_Conv2D, schema::PrimitiveType_DeConv2D, - schema::PrimitiveType_DepthwiseConv2D, schema::PrimitiveType_DeDepthwiseConv2D, - schema::PrimitiveType_MatMul}; + schema::PrimitiveType_Conv2D, schema::PrimitiveType_DeConv2D, schema::PrimitiveType_DepthwiseConv2D, + schema::PrimitiveType_DeDepthwiseConv2D, schema::PrimitiveType_MatMul}; // this method will not check whether tensor_idx is a weight tensor index, caller should ensure this. static bool WeightTensorNeedCopy(const lite::Model *model, const uint32_t tensor_idx) { @@ -63,7 +62,7 @@ int LiteSession::ConvertTensors(const lite::Model *model) { if (srcTensor->dims() == nullptr) { MS_LOG(DEBUG) << "Dims of " << i << "th tensor is nullptr"; } else { - if (srcTensor->nodeType() == schema::NodeType_ValueNode) { + if (TensorCategory(srcTensor) == Tensor::Category::CONST) { for (size_t j = 0; j < srcTensor->dims()->size(); j++) { shape.push_back(srcTensor->dims()->data()[j]); } @@ -71,12 +70,12 @@ int LiteSession::ConvertTensors(const lite::Model *model) { } int dataType = srcTensor->dataType(); auto *dstTensor = - new (std::nothrow) tensor::Tensor(TypeId(dataType), shape, srcTensor->format(), srcTensor->nodeType()); + new (std::nothrow) Tensor(TypeId(dataType), shape, srcTensor->format(), TensorCategory(srcTensor)); if (dstTensor == nullptr) { MS_LOG(ERROR) << "new " << i << "th tensor failed"; return RET_NULL_PTR; } - if (srcTensor->nodeType() == schema::NodeType_ValueNode && srcTensor->data() != nullptr && + if (TensorCategory(srcTensor) == Tensor::Category::CONST && srcTensor->data() != nullptr && srcTensor->data()->size() > 0) { if (shape.empty()) { shape.push_back(1); @@ -84,12 +83,12 @@ int LiteSession::ConvertTensors(const lite::Model *model) { } MS_ASSERT(dstTensor->Size() == srcTensor->data()->size()); if (WeightTensorNeedCopy(model, i)) { - auto ret = dstTensor->MallocData(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Malloc data for " << i << "th tensor failed"; + auto dst_data = dstTensor->MutableData(); + if (dst_data == nullptr) { + MS_LOG(ERROR) << "MutableData from " << i << "th tensor is nullptr"; return RET_ERROR; } - memcpy(dstTensor->Data(), srcTensor->data()->data(), dstTensor->Size()); + memcpy(dst_data, srcTensor->data()->data(), dstTensor->Size()); copyed_tensor_idxes_.emplace_back(i); } else { dstTensor->SetData(const_cast(srcTensor->data()->data())); @@ -98,7 +97,7 @@ int LiteSession::ConvertTensors(const lite::Model *model) { auto quant_params = srcTensor->quantParams(); if (quant_params != nullptr) { for (size_t j = 0; j < quant_params->size(); j++) { - tensor::QuantArg quant_arg{}; + QuantArg quant_arg{}; quant_arg.scale = quant_params->Get(j)->scale(); quant_arg.zeroPoint = quant_params->Get(j)->zeroPoint(); dstTensor->AddQuantParam(quant_arg); @@ -126,7 +125,7 @@ void LiteSession::InitGraphInputMSTensors() { MS_ASSERT(this->input_vec_.empty()); for (auto &input_tensor : this->inputs_) { MS_ASSERT(input_tensor != nullptr); - this->input_vec_.emplace_back(new lite::tensor::LiteTensor(input_tensor)); + this->input_vec_.emplace_back(input_tensor); } } @@ -168,13 +167,11 @@ void LiteSession::InitGraphInputMap(const lite::Model *model) { } MS_ASSERT(in_tensor_index < this->tensors_.size()); auto *in_tensor = this->tensors_.at(in_tensor_index); - MS_ASSERT(in_tensor != nullptr); - auto *ms_tensor = new (std::nothrow) tensor::LiteTensor(in_tensor); - if (ms_tensor == nullptr) { - MS_LOG(ERROR) << "new lite tensor fail!"; + if (in_tensor == nullptr) { + MS_LOG(ERROR) << "in_tensor is null!"; return; } - this->input_map_[in_node->name_].emplace_back(ms_tensor); + this->input_map_[in_node->name_].emplace_back(in_tensor); } } } @@ -202,13 +199,11 @@ void LiteSession::InitGraphOutputNodeMap(const lite::Model *model) { } MS_ASSERT(out_tensor_index < this->tensors_.size()); auto *out_tensor = this->tensors_.at(out_tensor_index); - MS_ASSERT(out_tensor != nullptr); - auto *ms_tensor = new (std::nothrow) tensor::LiteTensor(out_tensor); - if (ms_tensor == nullptr) { - MS_LOG(ERROR) << "new lite tensor fail!"; + if (out_tensor == nullptr) { + MS_LOG(ERROR) << "out_tensor is null!"; return; } - this->output_node_map_[out_node->name_].emplace_back(ms_tensor); + this->output_node_map_[out_node->name_].emplace_back(out_tensor); } } } @@ -230,13 +225,11 @@ void LiteSession::InitGraphOutputTensorMap(const lite::Model *model) { size_t graph_out_index = model->output_indices_[i]; MS_ASSERT(graph_out_index < this->tensors_.size()); auto *out_tensor = this->tensors_.at(graph_out_index); - MS_ASSERT(out_tensor != nullptr); - auto *ms_tensor = new (std::nothrow) tensor::LiteTensor(out_tensor); - if (ms_tensor == nullptr) { - MS_LOG(ERROR) << "new lite tensor fail!"; + if (out_tensor == nullptr) { + MS_LOG(ERROR) << "out_tensor is null!"; return; } - this->output_tensor_map_.insert(std::make_pair(std::to_string(graph_out_index), ms_tensor)); + this->output_tensor_map_.insert(std::make_pair(std::to_string(graph_out_index), out_tensor)); } } @@ -291,13 +284,20 @@ int LiteSession::RunGraph(const session::KernelCallBack &before, const session:: int LiteSession::Init(Context *context) { MS_ASSERT(nullptr != context); - this->context_ = new (std::nothrow) Context(context->thread_num_, context->allocator, context->device_ctx_); + this->context_ = new (std::nothrow) Context(); if (this->context_ == nullptr) { MS_LOG(ERROR) << "new context failed"; return RET_MEMORY_FAILED; } + // context->thread_num_, context->allocator, context->device_ctx + this->context_->thread_num_ = context->thread_num_; + this->context_->allocator = context->allocator; + this->context_->device_type_ = context->device_type_; this->context_->float16_priority = context->float16_priority; this->context_->cpu_bind_mode_ = context->cpu_bind_mode_; + if (context_->allocator == nullptr) { + context_->allocator = Allocator::Create(); + } ConfigThreadPool(THREAD_POOL_DEFAULT, context->thread_num_, context->cpu_bind_mode_); auto ret = KernelRegistry::GetInstance()->Init(); if (ret != RET_OK) { @@ -305,7 +305,7 @@ int LiteSession::Init(Context *context) { return ret; } #if SUPPORT_GPU - if (context_->device_ctx_.type == DT_GPU) { + if (context_->device_type_ == DT_GPU) { auto opencl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); opencl_runtime->Init(); MS_LOG(INFO) << "Init OpenCL runtime."; @@ -330,46 +330,22 @@ LiteSession::~LiteSession() { auto *tensor = tensors_.at(i); MS_ASSERT(tensor != nullptr); // data of weight tensor of node in packed_op can not be to free, we will free weight data when freeing meta_graph - if (tensor->TensorType() == schema::NodeType_ValueNode && !IsContain(this->inputs_, tensor) && + if (tensor->category() == Tensor::Category::CONST && !IsContain(this->inputs_, tensor) && !IsContain(copyed_tensor_idxes_, i)) { tensor->SetData(nullptr); } delete tensor; } - // tensor::Tensor * in input_map output_map are freed in tensors - for (auto iter : this->input_map_) { - for (auto *ms_tensor : iter.second) { - ((tensor::LiteTensor *)ms_tensor)->SetTensorImpl(nullptr); - delete ms_tensor; - } - iter.second.clear(); - } + // Tensor * in input_map output_map are freed in tensors input_map_.clear(); - for (auto iter : this->output_node_map_) { - for (auto *ms_tensor : iter.second) { - ((tensor::LiteTensor *)ms_tensor)->SetTensorImpl(nullptr); - delete ms_tensor; - } - iter.second.clear(); - } output_node_map_.clear(); - for (auto iter : this->output_tensor_map_) { - ((tensor::LiteTensor *)(iter.second))->SetTensorImpl(nullptr); - delete (iter.second); - } output_tensor_map_.clear(); + input_vec_.clear(); for (auto *kernel : kernels_) { delete kernel; } - for (auto *ms_tensor : input_vec_) { - if (ms_tensor != nullptr) { - ((tensor::LiteTensor *)ms_tensor)->SetTensorImpl(nullptr); - delete ms_tensor; - } - } - input_vec_.clear(); #if SUPPORT_GPU - if (context_->device_ctx_.type == DT_GPU) { + if (context_->device_type_ == DT_GPU) { lite::opencl::OpenCLRuntime::DeleteInstance(); } #endif @@ -388,10 +364,6 @@ std::vector LiteSession::GetInputsByName(const st return ret->second; } -std::unordered_map> LiteSession::GetOutputMapByNode() const { - return this->output_node_map_; -} - std::vector LiteSession::GetOutputsByNodeName(const std::string &node_name) const { auto ret = output_node_map_.find(node_name); if (ret == output_node_map_.end()) { @@ -413,7 +385,7 @@ mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const std::strin return ret->second; } -std::unordered_map LiteSession::GetOutputMapByTensor() const { +std::unordered_map LiteSession::GetOutputs() const { return this->output_tensor_map_; } @@ -434,7 +406,7 @@ int LiteSession::ResizeInputs(const std::vector & } int LiteSession::Resize(const std::vector &inputs) { - std::vector inputs_old(inputs_); + std::vector inputs_old(inputs_); auto ret = ResizeInputs(inputs); if (ret != RET_OK) { inputs_ = inputs_old; diff --git a/mindspore/lite/src/lite_session.h b/mindspore/lite/src/lite_session.h index 53a205cbe5..95a3b98a0d 100644 --- a/mindspore/lite/src/lite_session.h +++ b/mindspore/lite/src/lite_session.h @@ -28,6 +28,7 @@ #include "include/context.h" #include "schema/model_generated.h" #include "src/executor.h" +#include "src/tensor.h" namespace mindspore { namespace lite { @@ -50,15 +51,13 @@ class LiteSession : public session::LiteSession { int RunGraph(const session::KernelCallBack &before = nullptr, const session::KernelCallBack &after = nullptr) override; - std::unordered_map> GetOutputMapByNode() const override; - std::vector GetOutputsByNodeName(const std::string &node_name) const override; std::vector GetOutputTensorNames() const override; mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const override; - std::unordered_map GetOutputMapByTensor() const override; + std::unordered_map GetOutputs() const override; int Resize(const std::vector &inputs) override; @@ -86,12 +85,12 @@ class LiteSession : public session::LiteSession { protected: Context *context_ = nullptr; std::vector kernels_; - std::vector tensors_; + std::vector tensors_; std::vector copyed_tensor_idxes_; // graph input tensors - std::vector inputs_; + std::vector inputs_; // graph output tensors - std::vector outputs_; + std::vector outputs_; // graph input MSTensors std::vector input_vec_; // graph input node name -- input tensors diff --git a/mindspore/lite/src/model.cc b/mindspore/lite/src/model.cc index b2af727b46..4cf441ea80 100644 --- a/mindspore/lite/src/model.cc +++ b/mindspore/lite/src/model.cc @@ -30,7 +30,7 @@ bool ConvertNodes(const schema::MetaGraph *meta_graph, Model *model) { } auto c_node = meta_graph->nodes()->GetAs(i); auto src_prim = c_node->primitive(); - node->primitive_ = PrimitiveC::UnPackFromSchemaPrimitive(const_cast(src_prim)); + node->primitive_ = PrimitiveC::Create(const_cast(src_prim)); if (node->primitive_ == nullptr) { MS_LOG(ERROR) << "unpack primitive == nullptr!"; return false; @@ -56,7 +56,7 @@ bool ConvertTensors(const schema::MetaGraph *meta_graph, Model *model) { for (uint32_t i = 0; i < tensor_count; ++i) { auto *tensor = meta_graph->allTensors()->GetAs(i); if (tensor == nullptr) { - MS_LOG(ERROR) << i << "th tensor in model is nullptr"; + MS_LOG(ERROR) << i << "th tensor in model is nullptr"; return false; } model->all_tensors_.push_back(const_cast(tensor)); diff --git a/mindspore/lite/src/ops/addn.cc b/mindspore/lite/src/ops/addn.cc index 9c82f8c271..aaf8ea29b5 100644 --- a/mindspore/lite/src/ops/addn.cc +++ b/mindspore/lite/src/ops/addn.cc @@ -44,7 +44,7 @@ int AddN::GetN() const { return this->primitive_->value_as_AddN()->N(); } namespace { constexpr int kLeastInputNum = 2; } -int AddN::InferShape(std::vector inputs, std::vector outputs) { +int AddN::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/addn.h b/mindspore/lite/src/ops/addn.h index 8b5c61d060..4236016a87 100644 --- a/mindspore/lite/src/ops/addn.h +++ b/mindspore/lite/src/ops/addn.h @@ -37,7 +37,7 @@ class AddN : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetN() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/apply_momentum.cc b/mindspore/lite/src/ops/apply_momentum.cc index b50716b1eb..fdff5cc766 100644 --- a/mindspore/lite/src/ops/apply_momentum.cc +++ b/mindspore/lite/src/ops/apply_momentum.cc @@ -17,7 +17,6 @@ namespace mindspore { namespace lite { - #ifdef PRIMITIVE_WRITEABLE #else @@ -36,7 +35,7 @@ int ApplyMomentum::UnPackToFlatBuilder(const schema::Primitive *primitive, flatb } #endif -int ApplyMomentum::InferShape(std::vector inputs, std::vector outputs) { +int ApplyMomentum::InferShape(std::vector inputs, std::vector outputs) { if (5 != inputs.size()) { MS_LOG(ERROR) << "ApplyMomentum should have at 5 input tensors"; return RET_ERROR; diff --git a/mindspore/lite/src/ops/apply_momentum.h b/mindspore/lite/src/ops/apply_momentum.h index 77ecf588d9..f24460c745 100644 --- a/mindspore/lite/src/ops/apply_momentum.h +++ b/mindspore/lite/src/ops/apply_momentum.h @@ -36,7 +36,7 @@ class ApplyMomentum : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/argmax.cc b/mindspore/lite/src/ops/argmax.cc index 3005409cc3..e44ed1615d 100644 --- a/mindspore/lite/src/ops/argmax.cc +++ b/mindspore/lite/src/ops/argmax.cc @@ -54,7 +54,7 @@ int ArgMax::GetAxisType() const { return this->primitive_->value_as_ArgMax()->ax #endif -int ArgMax::InferShape(std::vector inputs_, std::vector outputs_) { +int ArgMax::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/argmax.h b/mindspore/lite/src/ops/argmax.h index 4b58916abc..9546642864 100644 --- a/mindspore/lite/src/ops/argmax.h +++ b/mindspore/lite/src/ops/argmax.h @@ -41,7 +41,7 @@ class ArgMax : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; bool GetOutMaxValue() const; int GetTopK() const; diff --git a/mindspore/lite/src/ops/argmin.cc b/mindspore/lite/src/ops/argmin.cc index c3e300130d..ce9e8f5ee2 100644 --- a/mindspore/lite/src/ops/argmin.cc +++ b/mindspore/lite/src/ops/argmin.cc @@ -54,7 +54,7 @@ int ArgMin::GetAxisType() const { return this->primitive_->value_as_ArgMin()->ax #endif -int ArgMin::InferShape(std::vector inputs_, std::vector outputs_) { +int ArgMin::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/argmin.h b/mindspore/lite/src/ops/argmin.h index a62fff3917..a81bce8f37 100644 --- a/mindspore/lite/src/ops/argmin.h +++ b/mindspore/lite/src/ops/argmin.h @@ -41,7 +41,7 @@ class ArgMin : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; bool GetOutMaxValue() const; int GetTopK() const; diff --git a/mindspore/lite/src/ops/arithmetic.cc b/mindspore/lite/src/ops/arithmetic.cc index fa352ce08e..2cffa49e2a 100644 --- a/mindspore/lite/src/ops/arithmetic.cc +++ b/mindspore/lite/src/ops/arithmetic.cc @@ -17,11 +17,11 @@ #include "src/ops/arithmetic.h" #include "include/errorcode.h" #include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" namespace mindspore { namespace lite { -int Arithmetic::InferShape(std::vector inputs_, std::vector outputs_) { +int Arithmetic::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); if (inputs_.size() != kDoubleNum) { MS_LOG(ERROR) << "The number of input must be " << kDoubleNum; diff --git a/mindspore/lite/src/ops/arithmetic.h b/mindspore/lite/src/ops/arithmetic.h index bcc516c1fe..e9df6776f9 100644 --- a/mindspore/lite/src/ops/arithmetic.h +++ b/mindspore/lite/src/ops/arithmetic.h @@ -38,7 +38,7 @@ class Arithmetic : public PrimitiveC { return RET_ERROR; } #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; bool Broadcasting() { return this->broadcasting_; } int NDims() { return this->ndim_; } std::vector InShape0() { return this->in_shape0_; } diff --git a/mindspore/lite/src/ops/arithmetic_grad.cc b/mindspore/lite/src/ops/arithmetic_grad.cc index ee57bb6443..d5d234dc91 100644 --- a/mindspore/lite/src/ops/arithmetic_grad.cc +++ b/mindspore/lite/src/ops/arithmetic_grad.cc @@ -17,12 +17,11 @@ #include "src/ops/arithmetic_grad.h" #include "include/errorcode.h" #include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" namespace mindspore { namespace lite { -int ArithmeticGrad::InferShape(std::vector inputs_, - std::vector outputs_) { +int ArithmeticGrad::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != 3) { MS_LOG(ERROR) << "The number of input must be 3"; return RET_ERROR; diff --git a/mindspore/lite/src/ops/arithmetic_grad.h b/mindspore/lite/src/ops/arithmetic_grad.h index 9354c5ff99..f05ced2936 100644 --- a/mindspore/lite/src/ops/arithmetic_grad.h +++ b/mindspore/lite/src/ops/arithmetic_grad.h @@ -38,7 +38,7 @@ class ArithmeticGrad : public PrimitiveC { return RET_ERROR; } #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; bool Broadcasting() { return this->broadcasting_; } int NDims() { return this->ndim_; } std::vector dyShape() { return this->dy_shape_; } diff --git a/mindspore/lite/src/ops/arithmetic_self.cc b/mindspore/lite/src/ops/arithmetic_self.cc index 9a4fa1546d..bee2a0a0d3 100644 --- a/mindspore/lite/src/ops/arithmetic_self.cc +++ b/mindspore/lite/src/ops/arithmetic_self.cc @@ -21,7 +21,7 @@ namespace mindspore { namespace lite { -int ArithmeticSelf::InferShape(std::vector inputs_, std::vector outputs_) { +int ArithmeticSelf::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/arithmetic_self.h b/mindspore/lite/src/ops/arithmetic_self.h index 57e8a108ef..17bbfdc6d9 100644 --- a/mindspore/lite/src/ops/arithmetic_self.h +++ b/mindspore/lite/src/ops/arithmetic_self.h @@ -35,7 +35,7 @@ class ArithmeticSelf : public PrimitiveC { return RET_ERROR; } #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/batch_to_space.cc b/mindspore/lite/src/ops/batch_to_space.cc index 683508b628..5f16aa9a41 100644 --- a/mindspore/lite/src/ops/batch_to_space.cc +++ b/mindspore/lite/src/ops/batch_to_space.cc @@ -18,7 +18,7 @@ #include "src/common/common.h" #include "include/errorcode.h" #include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" namespace mindspore { namespace lite { @@ -74,7 +74,7 @@ constexpr int kBlockShapeSize = 2; constexpr int kCropsSize = 4; } // namespace -int BatchToSpace::InferShape(std::vector inputs, std::vector outputs) { +int BatchToSpace::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive_ != nullptr); if (outputs.size() != kBatchToSpaceOutputNum || inputs.size() != kBatchToSpaceInputNum) { MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); @@ -82,7 +82,7 @@ int BatchToSpace::InferShape(std::vector inputs, std::ve } auto input = inputs.at(0); - if (input->GetFormat() != schema::Format_NHWC) { + if (input->GetFormat() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "batch_to_space only support NHWC now!"; return RET_FORMAT_ERR; } diff --git a/mindspore/lite/src/ops/batch_to_space.h b/mindspore/lite/src/ops/batch_to_space.h index 9c9632fc37..fad5f710e7 100644 --- a/mindspore/lite/src/ops/batch_to_space.h +++ b/mindspore/lite/src/ops/batch_to_space.h @@ -39,7 +39,7 @@ class BatchToSpace : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetBlockShape() const; std::vector GetCrops() const; }; diff --git a/mindspore/lite/src/ops/bias_grad.cc b/mindspore/lite/src/ops/bias_grad.cc index c3c4ac899b..73bd30b914 100644 --- a/mindspore/lite/src/ops/bias_grad.cc +++ b/mindspore/lite/src/ops/bias_grad.cc @@ -75,7 +75,7 @@ std::vector BiasGrad::GetAxis() const { return std::vector(fb_vector->begin(), fb_vector->end()); } -int BiasGrad::InferShape(std::vector inputs, std::vector outputs) { +int BiasGrad::InferShape(std::vector inputs, std::vector outputs) { if (1 != inputs.size()) { MS_LOG(ERROR) << "BiasGrad should have one input"; return RET_ERROR; @@ -100,7 +100,6 @@ int BiasGrad::InferShape(std::vector inputs, std::vector inputs, std::vector outputs) override; + int InferShape(std::vector inputs, std::vector outputs) override; #endif std::vector GetAxis() const; }; diff --git a/mindspore/lite/src/ops/broadcast_to.cc b/mindspore/lite/src/ops/broadcast_to.cc index 1c4e5875cd..5e483a8abe 100644 --- a/mindspore/lite/src/ops/broadcast_to.cc +++ b/mindspore/lite/src/ops/broadcast_to.cc @@ -56,7 +56,7 @@ constexpr int kBroadcastToInputNum = 1; constexpr int kBroadcastToOutputNum = 1; } // namespace -int BroadcastTo::InferShape(std::vector inputs, std::vector outputs) { +int BroadcastTo::InferShape(std::vector inputs, std::vector outputs) { if (inputs.size() != kBroadcastToInputNum || outputs.size() != kBroadcastToOutputNum) { MS_LOG(ERROR) << "input size:" << inputs.size() << ", output size:" << outputs.size(); return RET_PARAM_INVALID; diff --git a/mindspore/lite/src/ops/broadcast_to.h b/mindspore/lite/src/ops/broadcast_to.h index d0181da165..44a01dff36 100644 --- a/mindspore/lite/src/ops/broadcast_to.h +++ b/mindspore/lite/src/ops/broadcast_to.h @@ -39,7 +39,7 @@ class BroadcastTo : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetDstShape() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/cast.cc b/mindspore/lite/src/ops/cast.cc index 10cf0f63c2..48c1eab193 100644 --- a/mindspore/lite/src/ops/cast.cc +++ b/mindspore/lite/src/ops/cast.cc @@ -44,7 +44,7 @@ int Cast::GetDstT() const { return this->primitive_->value_as_Cast()->dstT(); } #endif -int Cast::InferShape(std::vector inputs_, std::vector outputs_) { +int Cast::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/cast.h b/mindspore/lite/src/ops/cast.h index 973e405a18..55dcf7663a 100644 --- a/mindspore/lite/src/ops/cast.h +++ b/mindspore/lite/src/ops/cast.h @@ -38,7 +38,7 @@ class Cast : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetSrcT() const; int GetDstT() const; }; diff --git a/mindspore/lite/src/ops/concat.cc b/mindspore/lite/src/ops/concat.cc index 9742aab110..90f7b9d5a3 100644 --- a/mindspore/lite/src/ops/concat.cc +++ b/mindspore/lite/src/ops/concat.cc @@ -18,7 +18,7 @@ #include #include "include/errorcode.h" #include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" namespace mindspore { namespace lite { @@ -81,7 +81,7 @@ int Concat::GetN() const { return this->primitive_->value_as_Concat()->n(); } namespace { constexpr int kConcatOutputNum = 1; } -int Concat::InferShape(std::vector inputs_, std::vector outputs_) { +int Concat::InferShape(std::vector inputs_, std::vector outputs_) { if (this->primitive_ == nullptr) { MS_LOG(ERROR) << "primitive is nullptr!"; return RET_PARAM_INVALID; diff --git a/mindspore/lite/src/ops/concat.h b/mindspore/lite/src/ops/concat.h index c12d98fb0f..d444033e6a 100644 --- a/mindspore/lite/src/ops/concat.h +++ b/mindspore/lite/src/ops/concat.h @@ -39,7 +39,7 @@ class Concat : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; int GetN() const; }; diff --git a/mindspore/lite/src/ops/constant_of_shape.cc b/mindspore/lite/src/ops/constant_of_shape.cc index 587bc17278..938bcedcfc 100644 --- a/mindspore/lite/src/ops/constant_of_shape.cc +++ b/mindspore/lite/src/ops/constant_of_shape.cc @@ -17,7 +17,7 @@ #include "src/ops/constant_of_shape.h" #include "include/errorcode.h" #include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" namespace mindspore::lite { namespace { @@ -47,7 +47,7 @@ float ConstantOfShape::GetValue() const { return this->primitive_->value_as_Cons #endif -int ConstantOfShape::InferShape(std::vector inputs_, std::vector outputs_) { +int ConstantOfShape::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != kShapeInputNum) { MS_LOG(ERROR) << "inputs to ConstantOfShape operator should be 1, but " << inputs_.size() << " is given."; return RET_ERROR; @@ -67,7 +67,7 @@ int ConstantOfShape::InferShape(std::vector inputs_, std::vect if (!GetInferFlag()) { return RET_OK; } - auto in_data = reinterpret_cast(in_tensor->Data()); + auto in_data = reinterpret_cast(in_tensor->MutableData()); int size = in_tensor->ElementsNum(); std::vector out_shape(size); for (int i = 0; i < size; ++i) { diff --git a/mindspore/lite/src/ops/constant_of_shape.h b/mindspore/lite/src/ops/constant_of_shape.h index ab96e088b3..a14d020c49 100644 --- a/mindspore/lite/src/ops/constant_of_shape.h +++ b/mindspore/lite/src/ops/constant_of_shape.h @@ -37,7 +37,7 @@ class ConstantOfShape : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; float GetValue() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/conv2d.cc b/mindspore/lite/src/ops/conv2d.cc index e2b72536c7..a9c350ebc2 100644 --- a/mindspore/lite/src/ops/conv2d.cc +++ b/mindspore/lite/src/ops/conv2d.cc @@ -128,11 +128,11 @@ void Conv2D::PopulaterConv2DMultiGroup(const Primitive &prim, schema::PrimitiveT auto attr = std::make_unique(); auto format = GetValue(prim.GetAttr("data_format")); if (format == "NCHW") { - attr->format = schema::Format_NCHW; + attr->format = schema::Format::Format_NCHW; } else if (format == "NHWC") { - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; } else { - attr->format = schema::Format_NUM_OF_FORMAT; + attr->format = schema::Format::Format_NUM_OF_FORMAT; } auto pad_list = GetValue>(prim.GetAttr("pad_list")); attr->padUp = pad_list[0]; @@ -191,11 +191,11 @@ void Conv2D::PopulaterConv2DSingleGroup(const Primitive &prim, schema::Primitive attr->group = group; auto format = GetValue(prim.GetAttr("data_format")); if (format == "NCHW") { - attr->format = schema::Format_NCHW; + attr->format = schema::Format::Format_NCHW; } else if (format == "NHWC") { - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; } else { - attr->format = schema::Format_NUM_OF_FORMAT; + attr->format = schema::Format::Format_NUM_OF_FORMAT; } auto pad_list = GetValue>(prim.GetAttr("pad_list")); attr->padUp = pad_list[0]; @@ -346,7 +346,7 @@ void Conv2D::ConvInferShape(int input_h, int input_w, int *output_h, int *output } } -int Conv2D::InferShape(std::vector inputs_, std::vector outputs_) { +int Conv2D::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != 2 && inputs_.size() != 3) { MS_LOG(ERROR) << "Add should has two or three inputs"; return RET_ERROR; diff --git a/mindspore/lite/src/ops/conv2d.h b/mindspore/lite/src/ops/conv2d.h index 3b85a20e85..5a8e8b4e79 100644 --- a/mindspore/lite/src/ops/conv2d.h +++ b/mindspore/lite/src/ops/conv2d.h @@ -66,7 +66,7 @@ class Conv2D : public PrimitiveC { #endif public: - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int PadUp() const; int PadDown() const; int PadLeft() const; diff --git a/mindspore/lite/src/ops/conv2d_grad_filter.cc b/mindspore/lite/src/ops/conv2d_grad_filter.cc index 37d05fe3f3..b1d2353418 100644 --- a/mindspore/lite/src/ops/conv2d_grad_filter.cc +++ b/mindspore/lite/src/ops/conv2d_grad_filter.cc @@ -232,7 +232,7 @@ int Conv2DGradFilter::GetActivationType() const { #endif -int Conv2DGradFilter::InferShape(std::vector inputs, std::vector outputs) { +int Conv2DGradFilter::InferShape(std::vector inputs, std::vector outputs) { if (3 != inputs.size()) { MS_LOG(ERROR) << "Conv2d Grad Filter should have 3 inputs"; return RET_ERROR; @@ -247,7 +247,7 @@ int Conv2DGradFilter::InferShape(std::vector inputs, std::vect MS_ASSERT(out != nullptr); std::vector output_shape; - int *out_shape = reinterpret_cast(in->Data()); + int *out_shape = reinterpret_cast(in->MutableData()); int new_size = in->ElementsNum(); if (in0->GetFormat() == in->GetFormat()) { for (int i = 0; i < new_size; i++) output_shape.push_back(out_shape[i]); diff --git a/mindspore/lite/src/ops/conv2d_grad_filter.h b/mindspore/lite/src/ops/conv2d_grad_filter.h index 46917b5413..e47848c453 100644 --- a/mindspore/lite/src/ops/conv2d_grad_filter.h +++ b/mindspore/lite/src/ops/conv2d_grad_filter.h @@ -59,7 +59,7 @@ class Conv2DGradFilter : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetGroup() const; int GetChannelIn() const; diff --git a/mindspore/lite/src/ops/conv2d_grad_input.cc b/mindspore/lite/src/ops/conv2d_grad_input.cc index 85a5156e97..dec4d43649 100644 --- a/mindspore/lite/src/ops/conv2d_grad_input.cc +++ b/mindspore/lite/src/ops/conv2d_grad_input.cc @@ -230,7 +230,7 @@ int Conv2DGradInput::GetActivationType() const { #endif -int Conv2DGradInput::InferShape(std::vector inputs, std::vector outputs) { +int Conv2DGradInput::InferShape(std::vector inputs, std::vector outputs) { if (3 != inputs.size()) { MS_LOG(ERROR) << "Conv2d Grad Input should have 3 inputs"; return RET_ERROR; @@ -245,7 +245,7 @@ int Conv2DGradInput::InferShape(std::vector inputs, std::vecto MS_ASSERT(out != nullptr); std::vector output_shape; - int *out_shape = reinterpret_cast(in->Data()); + int *out_shape = reinterpret_cast(in->MutableData()); int new_size = in->ElementsNum(); if (in0->GetFormat() == in->GetFormat()) { for (int i = 0; i < new_size; i++) output_shape.push_back(out_shape[i]); diff --git a/mindspore/lite/src/ops/conv2d_grad_input.h b/mindspore/lite/src/ops/conv2d_grad_input.h index 4656addee3..3997509ec9 100644 --- a/mindspore/lite/src/ops/conv2d_grad_input.h +++ b/mindspore/lite/src/ops/conv2d_grad_input.h @@ -59,7 +59,7 @@ class Conv2DGradInput : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetGroup() const; int GetChannelIn() const; diff --git a/mindspore/lite/src/ops/crop.cc b/mindspore/lite/src/ops/crop.cc index 514a5975d2..4cca858775 100644 --- a/mindspore/lite/src/ops/crop.cc +++ b/mindspore/lite/src/ops/crop.cc @@ -56,7 +56,7 @@ namespace { constexpr int kCropOutputNum = 1; constexpr int kCropInputNum = 2; } // namespace -int Crop::InferShape(std::vector inputs, std::vector outputs) { +int Crop::InferShape(std::vector inputs, std::vector outputs) { if (outputs.size() != kCropOutputNum || inputs.size() != kCropInputNum) { MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); return RET_PARAM_INVALID; diff --git a/mindspore/lite/src/ops/crop.h b/mindspore/lite/src/ops/crop.h index 0650f7925f..8c8a81464c 100644 --- a/mindspore/lite/src/ops/crop.h +++ b/mindspore/lite/src/ops/crop.h @@ -39,7 +39,7 @@ class Crop : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int64_t GetAxis() const; std::vector GetOffsets() const; }; diff --git a/mindspore/lite/src/ops/deconv2d.cc b/mindspore/lite/src/ops/deconv2d.cc index 892207c8cc..536e380238 100644 --- a/mindspore/lite/src/ops/deconv2d.cc +++ b/mindspore/lite/src/ops/deconv2d.cc @@ -93,7 +93,7 @@ bool DeConv2D::GetHasBias() const { return this->primitive_->value_as_DeConv2D() int DeConv2D::GetActivationType() const { return this->primitive_->value_as_DeConv2D()->activationType(); } #endif -int DeConv2D::InferShape(std::vector inputs_, std::vector outputs_) { +int DeConv2D::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/deconv2d.h b/mindspore/lite/src/ops/deconv2d.h index 020b5a95d4..acde7c9527 100644 --- a/mindspore/lite/src/ops/deconv2d.h +++ b/mindspore/lite/src/ops/deconv2d.h @@ -53,7 +53,7 @@ class DeConv2D : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetGroup() const; int GetChannelIn() const; diff --git a/mindspore/lite/src/ops/dedepthwise_conv2d.cc b/mindspore/lite/src/ops/dedepthwise_conv2d.cc index b2ac622907..c92666aae6 100644 --- a/mindspore/lite/src/ops/dedepthwise_conv2d.cc +++ b/mindspore/lite/src/ops/dedepthwise_conv2d.cc @@ -110,8 +110,7 @@ int DeDepthwiseConv2D::GetActivationType() const { } #endif -int DeDepthwiseConv2D::InferShape(std::vector inputs_, - std::vector outputs_) { +int DeDepthwiseConv2D::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != kDoubleNum && inputs_.size() != kMultiNum) { MS_LOG(ERROR) << "inputs number is invalid"; return 1; diff --git a/mindspore/lite/src/ops/dedepthwise_conv2d.h b/mindspore/lite/src/ops/dedepthwise_conv2d.h index 25e40421aa..6b0064979c 100644 --- a/mindspore/lite/src/ops/dedepthwise_conv2d.h +++ b/mindspore/lite/src/ops/dedepthwise_conv2d.h @@ -52,7 +52,7 @@ class DeDepthwiseConv2D : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetChannelIn() const; int GetChannelMultiplier() const; diff --git a/mindspore/lite/src/ops/depth_to_space.cc b/mindspore/lite/src/ops/depth_to_space.cc index e090fe0dc5..e062c112ad 100644 --- a/mindspore/lite/src/ops/depth_to_space.cc +++ b/mindspore/lite/src/ops/depth_to_space.cc @@ -48,7 +48,7 @@ constexpr int kDepthToSpaceOutputNum = 1; constexpr int kDepthToSpaceInputNum = 1; } // namespace -int DepthToSpace::InferShape(std::vector inputs, std::vector outputs) { +int DepthToSpace::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive_ != nullptr); if (outputs.size() != kDepthToSpaceOutputNum || inputs.size() != kDepthToSpaceInputNum) { MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); @@ -56,7 +56,7 @@ int DepthToSpace::InferShape(std::vector inputs, std::ve } auto input = inputs.at(0); - if (input->GetFormat() != schema::Format_NHWC) { + if (input->GetFormat() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "depth_to_space only support NHWC now!"; return RET_FORMAT_ERR; } diff --git a/mindspore/lite/src/ops/depth_to_space.h b/mindspore/lite/src/ops/depth_to_space.h index 5320b27c4a..6457655f9c 100644 --- a/mindspore/lite/src/ops/depth_to_space.h +++ b/mindspore/lite/src/ops/depth_to_space.h @@ -38,7 +38,7 @@ class DepthToSpace : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetBlockSize() const; int GetFormat() const; }; diff --git a/mindspore/lite/src/ops/depthwise_conv2d.cc b/mindspore/lite/src/ops/depthwise_conv2d.cc index 99d78dc150..67cf1aef9e 100644 --- a/mindspore/lite/src/ops/depthwise_conv2d.cc +++ b/mindspore/lite/src/ops/depthwise_conv2d.cc @@ -76,11 +76,11 @@ int DepthwiseConv2D::UnPackAttr(const Primitive &prim, const std::vector(prim.GetAttr("data_format")); if (format == "NCHW") { - attr->format = schema::Format_NCHW; + attr->format = schema::Format::Format_NCHW; } else if (format == "NHWC") { - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; } else { - attr->format = schema::Format_NUM_OF_FORMAT; + attr->format = schema::Format::Format_NUM_OF_FORMAT; } auto pad_list = GetValue>(prim.GetAttr("pads")); attr->padUp = pad_list[0]; @@ -188,8 +188,7 @@ int DepthwiseConv2D::GetActivationType() const { } #endif -int DepthwiseConv2D::InferShape(std::vector inputs_, - std::vector outputs_) { +int DepthwiseConv2D::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != kDoubleNum && inputs_.size() != kMultiNum) { MS_LOG(ERROR) << "inputs number is invalid"; return 1; diff --git a/mindspore/lite/src/ops/depthwise_conv2d.h b/mindspore/lite/src/ops/depthwise_conv2d.h index e64d7a3262..36bc8f037f 100644 --- a/mindspore/lite/src/ops/depthwise_conv2d.h +++ b/mindspore/lite/src/ops/depthwise_conv2d.h @@ -60,7 +60,7 @@ class DepthwiseConv2D : public PrimitiveC { #endif public: - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetChannelIn() const; int GetChannelMultiplier() const; diff --git a/mindspore/lite/src/ops/detection_post_process.cc b/mindspore/lite/src/ops/detection_post_process.cc index e901c9f07b..cf31065a5b 100644 --- a/mindspore/lite/src/ops/detection_post_process.cc +++ b/mindspore/lite/src/ops/detection_post_process.cc @@ -144,8 +144,7 @@ namespace { constexpr int kDetectionPostProcessOutputNum = 4; constexpr int kDetectionPostProcessInputNum = 3; } // namespace -int DetectionPostProcess::InferShape(std::vector inputs_, - std::vector outputs_) { +int DetectionPostProcess::InferShape(std::vector inputs_, std::vector outputs_) { if (outputs_.size() != kDetectionPostProcessOutputNum || inputs_.size() != kDetectionPostProcessInputNum) { MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs_.size() << ",input size: " << inputs_.size(); return RET_PARAM_INVALID; diff --git a/mindspore/lite/src/ops/detection_post_process.h b/mindspore/lite/src/ops/detection_post_process.h index c53f433c06..8fecba0027 100644 --- a/mindspore/lite/src/ops/detection_post_process.h +++ b/mindspore/lite/src/ops/detection_post_process.h @@ -50,7 +50,7 @@ class DetectionPostProcess : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetInputSize() const; float GetHScale() const; diff --git a/mindspore/lite/src/ops/embedding_lookup.cc b/mindspore/lite/src/ops/embedding_lookup.cc index 270f22f310..38079af394 100644 --- a/mindspore/lite/src/ops/embedding_lookup.cc +++ b/mindspore/lite/src/ops/embedding_lookup.cc @@ -43,7 +43,7 @@ float EmbeddingLookup::GetMaxNorm() const { return this->primitive_->value_as_Em #endif -int EmbeddingLookup::InferShape(std::vector inputs_, std::vector outputs_) { +int EmbeddingLookup::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); if (inputs_.size() < kDoubleNum) { MS_LOG(ERROR) << "Embedding Lookup should have at least two inputs"; diff --git a/mindspore/lite/src/ops/embedding_lookup.h b/mindspore/lite/src/ops/embedding_lookup.h index c51441b9b0..0ed66f718a 100644 --- a/mindspore/lite/src/ops/embedding_lookup.h +++ b/mindspore/lite/src/ops/embedding_lookup.h @@ -38,7 +38,7 @@ class EmbeddingLookup : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; float GetMaxNorm() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/expand_dims.cc b/mindspore/lite/src/ops/expand_dims.cc index 7961a6385b..9952793931 100644 --- a/mindspore/lite/src/ops/expand_dims.cc +++ b/mindspore/lite/src/ops/expand_dims.cc @@ -42,7 +42,7 @@ int ExpandDims::GetDim() const { return this->primitive_->value_as_ExpandDims()- #endif -int ExpandDims::InferShape(std::vector inputs_, std::vector outputs_) { +int ExpandDims::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/expand_dims.h b/mindspore/lite/src/ops/expand_dims.h index 52007b874d..78899485a0 100644 --- a/mindspore/lite/src/ops/expand_dims.h +++ b/mindspore/lite/src/ops/expand_dims.h @@ -38,7 +38,7 @@ class ExpandDims : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetDim() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/fill.cc b/mindspore/lite/src/ops/fill.cc index 9ae1fb4305..9b1c8da7f7 100644 --- a/mindspore/lite/src/ops/fill.cc +++ b/mindspore/lite/src/ops/fill.cc @@ -50,7 +50,7 @@ std::vector Fill::GetDims() const { #endif -int Fill::InferShape(std::vector inputs_, std::vector outputs_) { +int Fill::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); auto output = outputs_.front(); diff --git a/mindspore/lite/src/ops/fill.h b/mindspore/lite/src/ops/fill.h index f95d22542c..372be6a0ce 100644 --- a/mindspore/lite/src/ops/fill.h +++ b/mindspore/lite/src/ops/fill.h @@ -39,7 +39,7 @@ class Fill : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetDims() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/flatten.cc b/mindspore/lite/src/ops/flatten.cc index 949bf3d92d..b77df96150 100644 --- a/mindspore/lite/src/ops/flatten.cc +++ b/mindspore/lite/src/ops/flatten.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { -int Flatten::InferShape(std::vector inputs_, std::vector outputs_) { +int Flatten::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); auto output = outputs_.front(); diff --git a/mindspore/lite/src/ops/flatten.h b/mindspore/lite/src/ops/flatten.h index ce60608cff..3a237b501f 100644 --- a/mindspore/lite/src/ops/flatten.h +++ b/mindspore/lite/src/ops/flatten.h @@ -37,7 +37,7 @@ class Flatten : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/full_connection.cc b/mindspore/lite/src/ops/full_connection.cc index 4be4e45b4b..d0661869fd 100644 --- a/mindspore/lite/src/ops/full_connection.cc +++ b/mindspore/lite/src/ops/full_connection.cc @@ -52,8 +52,7 @@ bool FullConnection::GetUseAxis() const { return this->primitive_->value_as_Full int FullConnection::GetActivationType() const { return this->primitive_->value_as_FullConnection()->activationType(); } #endif -int FullConnection::InferShape(std::vector inputs_, - std::vector outputs_) { +int FullConnection::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input0 = inputs_.front(); MS_ASSERT(input0 != nullptr); diff --git a/mindspore/lite/src/ops/full_connection.h b/mindspore/lite/src/ops/full_connection.h index c4d5e980ff..02c2f0a542 100644 --- a/mindspore/lite/src/ops/full_connection.h +++ b/mindspore/lite/src/ops/full_connection.h @@ -40,7 +40,7 @@ class FullConnection : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; bool GetHasBias() const; int GetAxis() const; bool GetUseAxis() const; diff --git a/mindspore/lite/src/ops/gather.cc b/mindspore/lite/src/ops/gather.cc index 25f0e9d861..ede7cc3ce9 100644 --- a/mindspore/lite/src/ops/gather.cc +++ b/mindspore/lite/src/ops/gather.cc @@ -17,7 +17,7 @@ #include "src/ops/gather.h" #include "include/errorcode.h" #include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" namespace mindspore { namespace lite { @@ -48,7 +48,7 @@ int Gather::GetBatchDims() const { return this->primitive_->value_as_Gather()->b #endif -int Gather::InferShape(std::vector inputs_, std::vector outputs_) { +int Gather::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); if (inputs_.size() != kDoubleNum) { MS_LOG(ERROR) << "Gather should have two inputs"; diff --git a/mindspore/lite/src/ops/gather.h b/mindspore/lite/src/ops/gather.h index 088a736efa..73882f2879 100644 --- a/mindspore/lite/src/ops/gather.h +++ b/mindspore/lite/src/ops/gather.h @@ -38,7 +38,7 @@ class Gather : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; int GetBatchDims() const; }; diff --git a/mindspore/lite/src/ops/gather_nd.cc b/mindspore/lite/src/ops/gather_nd.cc index e88e913339..0456b02d0a 100644 --- a/mindspore/lite/src/ops/gather_nd.cc +++ b/mindspore/lite/src/ops/gather_nd.cc @@ -42,7 +42,7 @@ int GatherNd::GetBatchDims() const { return this->primitive_->value_as_GatherNd( #endif -int GatherNd::InferShape(std::vector inputs_, std::vector outputs_) { +int GatherNd::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); if (inputs_.size() != kDoubleNum) { MS_LOG(ERROR) << "GatherNd should have two inputs"; diff --git a/mindspore/lite/src/ops/gather_nd.h b/mindspore/lite/src/ops/gather_nd.h index f578b55ae4..e2946d8aa7 100644 --- a/mindspore/lite/src/ops/gather_nd.h +++ b/mindspore/lite/src/ops/gather_nd.h @@ -38,7 +38,7 @@ class GatherNd : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetBatchDims() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/lstm.cc b/mindspore/lite/src/ops/lstm.cc index 8548bc846e..af9df6c1df 100644 --- a/mindspore/lite/src/ops/lstm.cc +++ b/mindspore/lite/src/ops/lstm.cc @@ -43,7 +43,7 @@ int Lstm::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::F const int kLstmInputNum = 6; const int kLstmOutputNum = 3; -int Lstm::InferShape(std::vector inputs_, std::vector outputs_) { +int Lstm::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); if (inputs_.size() != kLstmInputNum || outputs_.size() != kLstmOutputNum) { MS_LOG(ERROR) << "OpLstm inputs or outputs size error."; diff --git a/mindspore/lite/src/ops/lstm.h b/mindspore/lite/src/ops/lstm.h index f30dbd0aed..f64538f79a 100644 --- a/mindspore/lite/src/ops/lstm.h +++ b/mindspore/lite/src/ops/lstm.h @@ -38,7 +38,7 @@ class Lstm : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; bool GetBidirection() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/matmul.cc b/mindspore/lite/src/ops/matmul.cc index 25e437366c..65fb9a4e4f 100644 --- a/mindspore/lite/src/ops/matmul.cc +++ b/mindspore/lite/src/ops/matmul.cc @@ -88,7 +88,7 @@ int MatMul::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers: #endif -int MatMul::InferShape(std::vector inputs_, std::vector outputs_) { +int MatMul::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input0 = inputs_.front(); MS_ASSERT(input0 != nullptr); diff --git a/mindspore/lite/src/ops/matmul.h b/mindspore/lite/src/ops/matmul.h index d943b4c419..94faa9abfc 100644 --- a/mindspore/lite/src/ops/matmul.h +++ b/mindspore/lite/src/ops/matmul.h @@ -45,7 +45,7 @@ class MatMul : public PrimitiveC { #endif public: - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; bool GetTransposeA() const; bool GetTransposeB() const; }; diff --git a/mindspore/lite/src/ops/mean.cc b/mindspore/lite/src/ops/mean.cc index 2e7324796b..fa51105c52 100644 --- a/mindspore/lite/src/ops/mean.cc +++ b/mindspore/lite/src/ops/mean.cc @@ -59,7 +59,7 @@ namespace { constexpr size_t kInputSize = 1; constexpr size_t kOutputSize = 1; } // namespace -int Mean::InferShape(std::vector inputs_, std::vector outputs_) { +int Mean::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != kInputSize || outputs_.size() != kOutputSize) { return RET_ERROR; } diff --git a/mindspore/lite/src/ops/mean.h b/mindspore/lite/src/ops/mean.h index fd9a42e8a3..4ec2ddbba6 100644 --- a/mindspore/lite/src/ops/mean.h +++ b/mindspore/lite/src/ops/mean.h @@ -39,7 +39,7 @@ class Mean : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetAxis() const; bool GetKeepDims() const; }; diff --git a/mindspore/lite/src/ops/nchw2nhwc.cc b/mindspore/lite/src/ops/nchw2nhwc.cc index 0ac5c25639..e573fbf9f7 100644 --- a/mindspore/lite/src/ops/nchw2nhwc.cc +++ b/mindspore/lite/src/ops/nchw2nhwc.cc @@ -31,13 +31,13 @@ int Nchw2Nhwc::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffe } #endif -int Nchw2Nhwc::InferShape(std::vector inputs_, std::vector outputs_) { +int Nchw2Nhwc::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); auto output = outputs_.front(); MS_ASSERT(output != nullptr); - output->SetFormat(schema::Format_NHWC); + output->SetFormat(schema::Format::Format_NHWC); output->set_data_type(input->data_type()); if (!GetInferFlag()) { return RET_OK; diff --git a/mindspore/lite/src/ops/nchw2nhwc.h b/mindspore/lite/src/ops/nchw2nhwc.h index 8f7ddd0ef1..bec8b04aa7 100644 --- a/mindspore/lite/src/ops/nchw2nhwc.h +++ b/mindspore/lite/src/ops/nchw2nhwc.h @@ -36,7 +36,7 @@ class Nchw2Nhwc : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/nhwc2nchw.cc b/mindspore/lite/src/ops/nhwc2nchw.cc index a5f73bcfe0..c3a03fc453 100644 --- a/mindspore/lite/src/ops/nhwc2nchw.cc +++ b/mindspore/lite/src/ops/nhwc2nchw.cc @@ -32,13 +32,13 @@ int Nhwc2Nchw::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffe } #endif -int Nhwc2Nchw::InferShape(std::vector inputs_, std::vector outputs_) { +int Nhwc2Nchw::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); auto output = outputs_.front(); MS_ASSERT(output != nullptr); - output->SetFormat(schema::Format_NCHW); + output->SetFormat(schema::Format::Format_NCHW); output->set_data_type(input->data_type()); if (!GetInferFlag()) { return RET_OK; diff --git a/mindspore/lite/src/ops/nhwc2nchw.h b/mindspore/lite/src/ops/nhwc2nchw.h index 479769cc19..8a8e9fab7d 100644 --- a/mindspore/lite/src/ops/nhwc2nchw.h +++ b/mindspore/lite/src/ops/nhwc2nchw.h @@ -36,7 +36,7 @@ class Nhwc2Nchw : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/one_hot.cc b/mindspore/lite/src/ops/one_hot.cc index 41b2040088..5b47480054 100644 --- a/mindspore/lite/src/ops/one_hot.cc +++ b/mindspore/lite/src/ops/one_hot.cc @@ -45,7 +45,7 @@ int OneHot::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers: namespace { constexpr size_t kOneHotInputNum = 4; } -int OneHot::InferShape(std::vector inputs, std::vector outputs) { +int OneHot::InferShape(std::vector inputs, std::vector outputs) { if (this->primitive_ == nullptr) { return RET_NULL_PTR; } @@ -60,7 +60,7 @@ int OneHot::InferShape(std::vector inputs, std::vector(depth_tensor->Data()); + const int *depth = static_cast(depth_tensor->MutableData()); auto input = inputs.front(); if (input == nullptr) { return RET_NULL_PTR; diff --git a/mindspore/lite/src/ops/one_hot.h b/mindspore/lite/src/ops/one_hot.h index deaa9ab1f1..ebbe730ff6 100644 --- a/mindspore/lite/src/ops/one_hot.h +++ b/mindspore/lite/src/ops/one_hot.h @@ -38,7 +38,7 @@ class OneHot : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/pad.cc b/mindspore/lite/src/ops/pad.cc index 852686972c..0f0a5e6f53 100644 --- a/mindspore/lite/src/ops/pad.cc +++ b/mindspore/lite/src/ops/pad.cc @@ -61,7 +61,7 @@ int Pad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::Fl namespace { const size_t kInputRank = 4; } // namespace -int Pad::InferShape(std::vector inputs, std::vector outputs) { +int Pad::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive_ != nullptr); if (this->primitive_ == nullptr) { return RET_NULL_PTR; diff --git a/mindspore/lite/src/ops/pad.h b/mindspore/lite/src/ops/pad.h index 695a1c1742..3694464e26 100644 --- a/mindspore/lite/src/ops/pad.h +++ b/mindspore/lite/src/ops/pad.h @@ -40,7 +40,7 @@ class Pad : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetPaddings() const; int GetPaddingMode() const; float GetConstantValue() const; diff --git a/mindspore/lite/src/ops/pooling.cc b/mindspore/lite/src/ops/pooling.cc index 2b409622ca..5c94a6cd7f 100644 --- a/mindspore/lite/src/ops/pooling.cc +++ b/mindspore/lite/src/ops/pooling.cc @@ -86,11 +86,11 @@ int Pooling::UnPackAttr(const Primitive &prim, const std::vector &in auto format = GetValue(prim.GetAttr("data_format")); if (format == "NCHW") { - attr->format = schema::Format_NCHW; + attr->format = schema::Format::Format_NCHW; } else if (format == "NHWC") { - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; } else { - attr->format = schema::Format_NUM_OF_FORMAT; + attr->format = schema::Format::Format_NUM_OF_FORMAT; } auto pad_mode = GetValue(prim.GetAttr("padding")); @@ -160,14 +160,14 @@ int Pooling::PadDown() const { return this->pad_d_; } int Pooling::PadLeft() const { return this->pad_l_; } int Pooling::PadRight() const { return this->pad_r_; } -int Pooling::InferShape(std::vector inputs_, std::vector outputs_) { +int Pooling::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); auto output = outputs_.front(); MS_ASSERT(output != nullptr); output->set_data_type(input->data_type()); - output->SetFormat(schema::Format_NHWC); + output->SetFormat(schema::Format::Format_NHWC); if (!GetInferFlag()) { return RET_OK; } diff --git a/mindspore/lite/src/ops/pooling.h b/mindspore/lite/src/ops/pooling.h index 6892d5b95e..8dd73c6c9d 100644 --- a/mindspore/lite/src/ops/pooling.h +++ b/mindspore/lite/src/ops/pooling.h @@ -51,7 +51,7 @@ class Pooling : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetPoolingMode() const; bool GetGlobal() const; diff --git a/mindspore/lite/src/ops/pooling_grad.cc b/mindspore/lite/src/ops/pooling_grad.cc index 7eba1875a1..2e687db019 100644 --- a/mindspore/lite/src/ops/pooling_grad.cc +++ b/mindspore/lite/src/ops/pooling_grad.cc @@ -144,7 +144,7 @@ int PoolingGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuf } #endif -int PoolingGrad::InferShape(std::vector inputs_, std::vector outputs_) { +int PoolingGrad::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.at(0); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/pooling_grad.h b/mindspore/lite/src/ops/pooling_grad.h index 1fe47eb327..ff9a5cad4f 100644 --- a/mindspore/lite/src/ops/pooling_grad.h +++ b/mindspore/lite/src/ops/pooling_grad.h @@ -51,7 +51,7 @@ class PoolingGrad : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetPoolingMode() const; bool GetGlobal() const; diff --git a/mindspore/lite/src/ops/power.cc b/mindspore/lite/src/ops/power.cc index aca5e1c8c6..afc01bd57b 100644 --- a/mindspore/lite/src/ops/power.cc +++ b/mindspore/lite/src/ops/power.cc @@ -47,11 +47,11 @@ int Power::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers:: } #endif -int Power::InferShape(std::vector inputs, std::vector outputs) { +int Power::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive_ != nullptr); auto x_tensor = inputs[0]; MS_ASSERT(x_tensor != nullptr); - tensor::Tensor *exp_tensor = nullptr; + Tensor *exp_tensor = nullptr; if (inputs.size() == 2) { exp_tensor = inputs[1]; MS_ASSERT(exp_tensor != nullptr); diff --git a/mindspore/lite/src/ops/power.h b/mindspore/lite/src/ops/power.h index b38dce1bdd..5797c2e8ee 100644 --- a/mindspore/lite/src/ops/power.h +++ b/mindspore/lite/src/ops/power.h @@ -39,7 +39,7 @@ class Power : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; float GetPower() const; float GetScale() const; float GetShift() const; diff --git a/mindspore/lite/src/ops/primitive_c.cc b/mindspore/lite/src/ops/primitive_c.cc index fa52d02c68..385864b9d4 100644 --- a/mindspore/lite/src/ops/primitive_c.cc +++ b/mindspore/lite/src/ops/primitive_c.cc @@ -139,7 +139,6 @@ #include "src/ops/arithmetic_grad.h" #endif - namespace mindspore { namespace lite { #ifdef PRIMITIVE_WRITEABLE @@ -173,10 +172,10 @@ void PrimitiveC::PopulaterQuantParam(const Primitive &prim, } else { auto inputMin = prim.GetAttr("input_minq"); auto inputMax = prim.GetAttr("input_maxq"); - auto inputMinPtr = inputMin->cast(); - auto inputMaxPtr = inputMax->cast(); - float *minBuf = static_cast(inputMinPtr->Data()); - float *maxBuf = static_cast(inputMaxPtr->Data()); + auto inputMinPtr = inputMin->cast(); + auto inputMaxPtr = inputMax->cast(); + float *minBuf = static_cast(inputMinPtr->data_c()); + float *maxBuf = static_cast(inputMaxPtr->data_c()); quantParam.min = *minBuf; quantParam.max = *maxBuf; } @@ -189,13 +188,13 @@ void PrimitiveC::PopulaterQuantParam(const Primitive &prim, auto filterMin = prim.GetAttr("filter_minq"); auto filterMax = prim.GetAttr("filter_maxq"); if (filterMin != nullptr && filterMax != nullptr) { - auto filterMinPtr = filterMin->cast(); - auto filterMaxPtr = filterMax->cast(); - float *minBuf = static_cast(filterMinPtr->Data()); - float *maxBuf = static_cast(filterMaxPtr->Data()); + auto filterMinPtr = filterMin->cast(); + auto filterMaxPtr = filterMax->cast(); + float *minBuf = static_cast(filterMinPtr->data_c()); + float *maxBuf = static_cast(filterMaxPtr->data_c()); quantParam.min = FLT_MAX; quantParam.max = FLT_MIN; - for (int i = 0; i < filterMinPtr->DataSize(); ++i) { + for (int i = 0; i < filterMinPtr->ElementsNum(); ++i) { quantParam.min = (*(minBuf) < quantParam.min) ? (*minBuf) : quantParam.min; quantParam.max = (*(maxBuf) > quantParam.max) ? (*maxBuf) : quantParam.max; minBuf++; @@ -218,10 +217,10 @@ void PrimitiveC::PopulaterQuantParam(const Primitive &prim, auto outputMin = prim.GetAttr("output_minq"); auto outputMax = prim.GetAttr("output_maxq"); if (outputMin != nullptr && outputMax != nullptr) { - auto outputMinPtr = outputMin->cast(); - auto outputMaxPtr = outputMax->cast(); - float *minBuf = static_cast(outputMinPtr->Data()); - float *maxBuf = static_cast(outputMaxPtr->Data()); + auto outputMinPtr = outputMin->cast(); + auto outputMaxPtr = outputMax->cast(); + float *minBuf = static_cast(outputMinPtr->data_c()); + float *maxBuf = static_cast(outputMaxPtr->data_c()); quantParam.min = *minBuf; quantParam.max = *maxBuf; quant::CalQuantizationParams(&quantParam, quantParam.min, quantParam.max, narrowRangeQuantParam, @@ -326,9 +325,8 @@ std::shared_ptr NewPrimitiveC(const Primitive &prim, const std::vect return primc; } -std::shared_ptr PrimitiveC::UnPackFromPrimitive(const Primitive &prim, - const std::vector &inputs, - const schema::QuantType &quantType) { +std::shared_ptr PrimitiveC::Create(const Primitive &prim, const std::vector &inputs, + const schema::QuantType &quantType) { const auto &op_type = prim.name(); if (op_type == "ReLU" || op_type == "ReLU6" || op_type == "Sigmoid") { return NewPrimitiveC(prim, inputs, quantType); @@ -399,12 +397,12 @@ std::shared_ptr PrimitiveC::UnPackFromPrimitive(const Primitive &pri return NewPrimitiveC(prim, inputs, quantType); #endif } else { - MS_LOG(ERROR) << "Unsupported primitive type in UnPackFromPrimitive : " << op_type; + MS_LOG(ERROR) << "Unsupported primitive type in Create : " << op_type; return nullptr; } } -PrimitiveC *PrimitiveC::UnPackFromSchemaPrimitiveT(mindspore::schema::PrimitiveT *primitive) { +PrimitiveC *PrimitiveC::Create(mindspore::schema::PrimitiveT *primitive) { MS_ASSERT(primitive != nullptr); auto op_type = primitive->value.type; switch (op_type) { @@ -641,14 +639,13 @@ PrimitiveC *PrimitiveC::UnPackFromSchemaPrimitiveT(mindspore::schema::PrimitiveT #endif default: - MS_LOG(ERROR) << "Unsupported primitive type in UnPackFromSchemaPrimitiveT : " - << schema::EnumNamePrimitiveType(op_type); + MS_LOG(ERROR) << "Unsupported primitive type in Create : " << schema::EnumNamePrimitiveType(op_type); break; } return nullptr; } #else -PrimitiveC *PrimitiveC::UnPackFromSchemaPrimitive(const schema::Primitive *primitive) { +PrimitiveC *PrimitiveC::Create(const schema::Primitive *primitive) { MS_ASSERT(primitive); auto op_type = primitive->value_type(); switch (op_type) { @@ -877,19 +874,16 @@ PrimitiveC *PrimitiveC::UnPackFromSchemaPrimitive(const schema::Primitive *primi case schema::PrimitiveType_MulGrad: return NewPrimitiveC(primitive); case schema::PrimitiveType_DivGrad: - return NewPrimitiveC(primitive); + return NewPrimitiveC(primitive); #endif default: - MS_LOG(ERROR) << "Unsupported primitive type in UnPackFromSchemaPrimitive : " - << schema::EnumNamePrimitiveType(op_type); + MS_LOG(ERROR) << "Unsupported primitive type in Create : " << schema::EnumNamePrimitiveType(op_type); break; } return nullptr; } -void PrimitiveC::SetQuantType(schema::QuantType quant_type) { - this->quant_type_ = quant_type; -} -schema::QuantType PrimitiveC::GetQuantType() const { return quant_type_;} +void PrimitiveC::SetQuantType(schema::QuantType quant_type) { this->quant_type_ = quant_type; } +schema::QuantType PrimitiveC::GetQuantType() const { return quant_type_; } #endif int PrimitiveC::Type() const { @@ -906,7 +900,7 @@ bool PrimitiveC::GetInferFlag() const { return this->infer_flag_; } void PrimitiveC::SetInferFlag(bool flag) { this->infer_flag_ = flag; } -int PrimitiveC::InferShape(std::vector inputs_, std::vector outputs_) { +int PrimitiveC::InferShape(std::vector inputs_, std::vector outputs_) { auto input = inputs_.front(); MS_ASSERT(input != nullptr); auto output = outputs_.front(); diff --git a/mindspore/lite/src/ops/primitive_c.h b/mindspore/lite/src/ops/primitive_c.h index bb19204a56..2163d1f057 100644 --- a/mindspore/lite/src/ops/primitive_c.h +++ b/mindspore/lite/src/ops/primitive_c.h @@ -28,7 +28,7 @@ #include "schema/model_generated.h" #endif -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "include/errorcode.h" #include "utils/log_adapter.h" @@ -42,6 +42,7 @@ constexpr uint32_t kDimension_4d = 4; const std::set kSupportDataType = {kNumberTypeUInt8, kNumberTypeInt32, kNumberTypeFloat32, kNumberTypeFloat16}; #ifdef PRIMITIVE_WRITEABLE +using TensorPtr = std::shared_ptr; constexpr int kAnfPopulaterOne = 1; constexpr int kAnfPopulaterTwo = 2; constexpr int kAnfPopulaterThree = 3; @@ -102,20 +103,18 @@ class PrimitiveC : public mindspore::Primitive { schema::QuantType GetQuantType() const; - virtual int InferShape(std::vector inputs_, std::vector outputs_); + virtual int InferShape(std::vector inputs_, std::vector outputs_); bool GetInferFlag() const; void SetInferFlag(bool flag); - static PrimitiveC *UnPackFromSchemaPrimitive(mindspore::schema::Primitive *primitive) { - return UnPackFromSchemaPrimitiveT(primitive->UnPack()); - } + static PrimitiveC *Create(mindspore::schema::Primitive *primitive) { return Create(primitive->UnPack()); } - static PrimitiveC *UnPackFromSchemaPrimitiveT(mindspore::schema::PrimitiveT *primitive); + static PrimitiveC *Create(mindspore::schema::PrimitiveT *primitive); - static std::shared_ptr UnPackFromPrimitive(const Primitive &prim, const std::vector &inputs, - const schema::QuantType &quantType); + static std::shared_ptr Create(const Primitive &prim, const std::vector &inputs, + const schema::QuantType &quantType); void PopulaterQuantParam(const Primitive &prim, std::vector> *vecInputQuantParam, std::vector> *vecOutputQuantParam); void CalQuantParam(const double &mean, const double &stdDev, float *mMin, float *mMax); @@ -143,13 +142,13 @@ class PrimitiveC { virtual ~PrimitiveC() { free(this->primitive_buf_); } - static PrimitiveC *UnPackFromSchemaPrimitive(const schema::Primitive *primitive); + static PrimitiveC *Create(const schema::Primitive *primitive); bool GetInferFlag() const; void SetInferFlag(bool flag); - virtual int InferShape(std::vector inputs, std::vector outputs); + virtual int InferShape(std::vector inputs, std::vector outputs); int Type() const; diff --git a/mindspore/lite/src/ops/prior_box.cc b/mindspore/lite/src/ops/prior_box.cc index 1bd60c151e..3f9d53abac 100644 --- a/mindspore/lite/src/ops/prior_box.cc +++ b/mindspore/lite/src/ops/prior_box.cc @@ -122,7 +122,7 @@ constexpr int kPriorBoxN = 1; constexpr int kPriorBoxW = 1; constexpr int kPriorBoxC = 2; } // namespace -int PriorBox::InferShape(std::vector inputs_, std::vector outputs_) { +int PriorBox::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(param != nullptr); auto input = inputs_.at(0); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/prior_box.h b/mindspore/lite/src/ops/prior_box.h index d6f105a31c..cad8fe2f47 100644 --- a/mindspore/lite/src/ops/prior_box.h +++ b/mindspore/lite/src/ops/prior_box.h @@ -48,7 +48,7 @@ class PriorBox : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetMinSizes() const; std::vector GetMaxSizes() const; std::vector GetAspectRatios() const; diff --git a/mindspore/lite/src/ops/quant_dtype_cast.cc b/mindspore/lite/src/ops/quant_dtype_cast.cc index a3adb9a5c4..6f2270ac84 100644 --- a/mindspore/lite/src/ops/quant_dtype_cast.cc +++ b/mindspore/lite/src/ops/quant_dtype_cast.cc @@ -44,7 +44,7 @@ int QuantDTypeCast::UnPackToFlatBuilder(const schema::Primitive *primitive, flat } #endif -int QuantDTypeCast::InferShape(std::vector inputs_, std::vector outputs_) { +int QuantDTypeCast::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/quant_dtype_cast.h b/mindspore/lite/src/ops/quant_dtype_cast.h index 0523272982..001a5c8908 100644 --- a/mindspore/lite/src/ops/quant_dtype_cast.h +++ b/mindspore/lite/src/ops/quant_dtype_cast.h @@ -38,7 +38,7 @@ class QuantDTypeCast : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetSrcT() const; int GetDstT() const; }; diff --git a/mindspore/lite/src/ops/range.cc b/mindspore/lite/src/ops/range.cc index 75afe4efb2..65b594bf42 100644 --- a/mindspore/lite/src/ops/range.cc +++ b/mindspore/lite/src/ops/range.cc @@ -50,7 +50,7 @@ int Range::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers:: } #endif -int Range::InferShape(std::vector inputs_, std::vector outputs_) { +int Range::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/range.h b/mindspore/lite/src/ops/range.h index 4f0a432462..b0601fa5c3 100644 --- a/mindspore/lite/src/ops/range.h +++ b/mindspore/lite/src/ops/range.h @@ -40,7 +40,7 @@ class Range : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetDType() const; int GetStart() const; int GetLimit() const; diff --git a/mindspore/lite/src/ops/rank.cc b/mindspore/lite/src/ops/rank.cc index 1c95012d95..a95efb2006 100644 --- a/mindspore/lite/src/ops/rank.cc +++ b/mindspore/lite/src/ops/rank.cc @@ -29,7 +29,7 @@ int Rank::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::F return RET_OK; } #endif -int Rank::InferShape(std::vector inputs_, std::vector outputs_) { +int Rank::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/rank.h b/mindspore/lite/src/ops/rank.h index 5251247e1f..5b48ffd399 100644 --- a/mindspore/lite/src/ops/rank.h +++ b/mindspore/lite/src/ops/rank.h @@ -36,7 +36,7 @@ class Rank : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/reduce.cc b/mindspore/lite/src/ops/reduce.cc index 7b7af97b5b..29855d082d 100644 --- a/mindspore/lite/src/ops/reduce.cc +++ b/mindspore/lite/src/ops/reduce.cc @@ -114,7 +114,7 @@ namespace { constexpr size_t kInputSize = 1; constexpr size_t kOutputSize = 1; } // namespace -int Reduce::InferShape(std::vector inputs_, std::vector outputs_) { +int Reduce::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() < kInputSize || outputs_.size() != kOutputSize) { return RET_ERROR; } diff --git a/mindspore/lite/src/ops/reduce.h b/mindspore/lite/src/ops/reduce.h index f29c7c8779..e5d50a2641 100644 --- a/mindspore/lite/src/ops/reduce.h +++ b/mindspore/lite/src/ops/reduce.h @@ -43,7 +43,7 @@ class Reduce : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetAxes() const; int GetKeepDims() const; int GetMode() const; diff --git a/mindspore/lite/src/ops/reshape.cc b/mindspore/lite/src/ops/reshape.cc index 8928a89b94..0e17c2d262 100644 --- a/mindspore/lite/src/ops/reshape.cc +++ b/mindspore/lite/src/ops/reshape.cc @@ -19,7 +19,7 @@ #include #include "include/errorcode.h" #include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" namespace mindspore { namespace lite { @@ -102,7 +102,7 @@ int Reshape::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers } #endif -int Reshape::CalNewShape(const tensor::Tensor *in_tensor, std::vector *out_shape) const { +int Reshape::CalNewShape(const Tensor *in_tensor, std::vector *out_shape) const { size_t in_shape_size = 1; for (size_t i = 0; i < in_tensor->shape().size(); i++) { in_shape_size *= in_tensor->shape()[i]; @@ -137,7 +137,7 @@ int Reshape::CalNewShape(const tensor::Tensor *in_tensor, std::vector *out_ return RET_OK; } template -void CalShape(const T *data, const std::vector &inputs, std::vector *out_shape, int shape_size) { +void CalShape(const T *data, const std::vector &inputs, std::vector *out_shape, int shape_size) { int input_count = inputs[0]->ElementsNum(); int index = 0; int size = 1; @@ -153,7 +153,7 @@ void CalShape(const T *data, const std::vector &inputs, std::v (*out_shape)[index] = input_count / size; } } -int Reshape::InferShape(std::vector inputs_, std::vector outputs_) { +int Reshape::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); @@ -169,30 +169,30 @@ int Reshape::InferShape(std::vector inputs_, std::vector out_shape; if (inputs_.size() == kDoubleNum) { auto shape_tensor = inputs_.at(1); - if (shape_tensor->Data() == nullptr) { + if (shape_tensor->MutableData() == nullptr) { MS_LOG(INFO) << "Do infer shape in runtime."; return RET_INFER_INVALID; } size_t shape_size = shape_tensor->ElementsNum(); switch (shape_tensor->data_type()) { case kNumberTypeInt8: { - auto data = reinterpret_cast(shape_tensor->Data()); + auto data = reinterpret_cast(shape_tensor->MutableData()); CalShape(data, inputs_, &out_shape, shape_size); } break; case kNumberTypeInt32: { - auto data = reinterpret_cast(shape_tensor->Data()); + auto data = reinterpret_cast(shape_tensor->MutableData()); CalShape(data, inputs_, &out_shape, shape_size); } break; case kNumberTypeInt64: { - auto data = reinterpret_cast(shape_tensor->Data()); + auto data = reinterpret_cast(shape_tensor->MutableData()); CalShape(data, inputs_, &out_shape, shape_size); } break; case kNumberTypeFloat: { - auto data = reinterpret_cast(shape_tensor->Data()); + auto data = reinterpret_cast(shape_tensor->MutableData()); CalShape(data, inputs_, &out_shape, shape_size); } break; case kNumberTypeUInt32: { - auto data = reinterpret_cast(shape_tensor->Data()); + auto data = reinterpret_cast(shape_tensor->MutableData()); CalShape(data, inputs_, &out_shape, shape_size); } break; default: { diff --git a/mindspore/lite/src/ops/reshape.h b/mindspore/lite/src/ops/reshape.h index b7b3760946..3ae61825b2 100644 --- a/mindspore/lite/src/ops/reshape.h +++ b/mindspore/lite/src/ops/reshape.h @@ -40,12 +40,12 @@ class Reshape : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; std::vector GetShape() const; private: - int CalNewShape(const lite::tensor::Tensor *in_tensor, std::vector *out_shape) const; + int CalNewShape(const lite::Tensor *in_tensor, std::vector *out_shape) const; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/resize.cc b/mindspore/lite/src/ops/resize.cc index aa0dd10648..6f1c4912d2 100644 --- a/mindspore/lite/src/ops/resize.cc +++ b/mindspore/lite/src/ops/resize.cc @@ -61,7 +61,7 @@ int Resize::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers: namespace { constexpr int kInputRank = 4; } // namespace -int Resize::InferShape(std::vector inputs_, std::vector outputs_) { +int Resize::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); if (input == nullptr) { diff --git a/mindspore/lite/src/ops/resize.h b/mindspore/lite/src/ops/resize.h index 3e1d71a484..84b22debac 100644 --- a/mindspore/lite/src/ops/resize.h +++ b/mindspore/lite/src/ops/resize.h @@ -42,7 +42,7 @@ class Resize : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetMethod() const; int64_t GetNewHeight() const; diff --git a/mindspore/lite/src/ops/return.cc b/mindspore/lite/src/ops/return.cc index c4b2b4b7ec..43dd4349e7 100644 --- a/mindspore/lite/src/ops/return.cc +++ b/mindspore/lite/src/ops/return.cc @@ -53,7 +53,7 @@ namespace { constexpr size_t kInputSize = 1; constexpr size_t kOutputSize = 1; } // namespace -int Return::InferShape(std::vector inputs_, std::vector outputs_) { +int Return::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != kInputSize || outputs_.size() != kOutputSize) { return RET_ERROR; } diff --git a/mindspore/lite/src/ops/return.h b/mindspore/lite/src/ops/return.h index ec6af73b67..695781131b 100644 --- a/mindspore/lite/src/ops/return.h +++ b/mindspore/lite/src/ops/return.h @@ -35,7 +35,7 @@ class Return : public PrimitiveC { #else Return() = default; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/reverse_sequence.cc b/mindspore/lite/src/ops/reverse_sequence.cc index 01155b69b2..6fceb8b1cb 100644 --- a/mindspore/lite/src/ops/reverse_sequence.cc +++ b/mindspore/lite/src/ops/reverse_sequence.cc @@ -47,7 +47,7 @@ int ReverseSequence::UnPackToFlatBuilder(const schema::Primitive *primitive, fla } #endif -int ReverseSequence::InferShape(std::vector inputs, std::vector outputs) { +int ReverseSequence::InferShape(std::vector inputs, std::vector outputs) { auto input = inputs.front(); auto output = outputs.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/reverse_sequence.h b/mindspore/lite/src/ops/reverse_sequence.h index a51be8e644..7b2b7e525a 100644 --- a/mindspore/lite/src/ops/reverse_sequence.h +++ b/mindspore/lite/src/ops/reverse_sequence.h @@ -39,7 +39,7 @@ class ReverseSequence : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetSeqAxis() const; int GetBatchAxis() const; }; diff --git a/mindspore/lite/src/ops/roi_pooling.cc b/mindspore/lite/src/ops/roi_pooling.cc index 4d1270e35a..90608afd85 100644 --- a/mindspore/lite/src/ops/roi_pooling.cc +++ b/mindspore/lite/src/ops/roi_pooling.cc @@ -49,7 +49,7 @@ int ROIPooling::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuff } #endif -int ROIPooling::InferShape(std::vector inputs_, std::vector outputs_) { +int ROIPooling::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); if (inputs_.size() != kDoubleNum) { MS_LOG(ERROR) << "inputs number is not equal to " << kDoubleNum; diff --git a/mindspore/lite/src/ops/roi_pooling.h b/mindspore/lite/src/ops/roi_pooling.h index 4dabb95f90..0150847d91 100644 --- a/mindspore/lite/src/ops/roi_pooling.h +++ b/mindspore/lite/src/ops/roi_pooling.h @@ -39,7 +39,7 @@ class ROIPooling : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetPooledH() const; int GetPooledW() const; float GetScale() const; diff --git a/mindspore/lite/src/ops/scatter_nd.cc b/mindspore/lite/src/ops/scatter_nd.cc index 6cd425ac75..121ae4608f 100644 --- a/mindspore/lite/src/ops/scatter_nd.cc +++ b/mindspore/lite/src/ops/scatter_nd.cc @@ -26,7 +26,7 @@ constexpr int kScatterShapeIndex = 0; constexpr int kScatterIndicesIndex = 1; constexpr int kScatterUpdateIndex = 2; } // namespace -int ScatterND::InferShape(std::vector inputs_, std::vector outputs_) { +int ScatterND::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != kScatterNDInputNum) { MS_LOG(ERROR) << "inputs number is not equal to " << kScatterNDInputNum; return RET_ERROR; @@ -56,8 +56,8 @@ int ScatterND::InferShape(std::vector inputs_, std::vector(shape->Data()); - std::vector out_shape(shape_data, shape_data + shape->DataSize()); + auto shape_data = reinterpret_cast(shape->MutableData()); + std::vector out_shape(shape_data, shape_data + shape->ElementsNum()); output->set_shape(out_shape); return RET_OK; } diff --git a/mindspore/lite/src/ops/scatter_nd.h b/mindspore/lite/src/ops/scatter_nd.h index ad7bc2c887..988e277f35 100644 --- a/mindspore/lite/src/ops/scatter_nd.h +++ b/mindspore/lite/src/ops/scatter_nd.h @@ -36,7 +36,7 @@ class ScatterND : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/shape.cc b/mindspore/lite/src/ops/shape.cc index 349fb8cb0d..cd63042973 100644 --- a/mindspore/lite/src/ops/shape.cc +++ b/mindspore/lite/src/ops/shape.cc @@ -17,7 +17,7 @@ #include "src/ops/shape.h" #include "include/errorcode.h" #include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" namespace mindspore { namespace lite { @@ -26,7 +26,7 @@ namespace { constexpr int kShapeInputNum = 1; constexpr int kShapeOutputNum = 1; } // namespace -int Shape::InferShape(std::vector inputs_, std::vector outputs_) { +int Shape::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != kShapeInputNum) { MS_LOG(ERROR) << "inputs to Shape operator should be 1, but " << inputs_.size() << " is given."; return RET_ERROR; @@ -38,17 +38,13 @@ int Shape::InferShape(std::vector inputs_, std::vectorset_data_type(kNumberTypeInt32); - out_tensor->SetFormat(schema::Format_NHWC); + out_tensor->SetFormat(schema::Format::Format_NHWC); if (!GetInferFlag()) { return RET_OK; } std::vector out_shape; out_shape.push_back(static_cast(in_tensor->shape().size())); - auto ret_shape = out_tensor->set_shape(out_shape); - if (ret_shape != 1 || size_t(out_tensor->shape()[0]) != in_tensor->shape().size()) { - MS_LOG(ERROR) << "Set shape fails."; - return RET_ERROR; - } + out_tensor->set_shape(out_shape); return RET_OK; } #ifdef PRIMITIVE_WRITEABLE diff --git a/mindspore/lite/src/ops/shape.h b/mindspore/lite/src/ops/shape.h index 7dc856eca5..7e6feb8217 100644 --- a/mindspore/lite/src/ops/shape.h +++ b/mindspore/lite/src/ops/shape.h @@ -36,7 +36,7 @@ class Shape : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/slice.cc b/mindspore/lite/src/ops/slice.cc index b421a4c885..c4d3a4c75c 100644 --- a/mindspore/lite/src/ops/slice.cc +++ b/mindspore/lite/src/ops/slice.cc @@ -17,7 +17,7 @@ #include "src/ops/slice.h" #include "include/errorcode.h" #include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" namespace mindspore { namespace lite { @@ -88,7 +88,7 @@ int Slice::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers:: std::vector Slice::GetPostProcessBegin() const { return this->begin; } std::vector Slice::GetPostProcessSize() const { return this->size; } -int Slice::InferShape(std::vector inputs, std::vector outputs) { +int Slice::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive_ != nullptr); if (inputs.size() != kSliceInputNum || outputs.size() != kSliceOutputNum) { MS_LOG(ERROR) << "input size:" << inputs.size() << ",output size:" << outputs.size(); diff --git a/mindspore/lite/src/ops/slice.h b/mindspore/lite/src/ops/slice.h index b5fa281e4b..e420e161fe 100644 --- a/mindspore/lite/src/ops/slice.h +++ b/mindspore/lite/src/ops/slice.h @@ -40,7 +40,7 @@ class Slice : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; std::vector GetBegin() const; std::vector GetSize() const; diff --git a/mindspore/lite/src/ops/softmax.cc b/mindspore/lite/src/ops/softmax.cc index af518125d0..0e97d1ec87 100644 --- a/mindspore/lite/src/ops/softmax.cc +++ b/mindspore/lite/src/ops/softmax.cc @@ -71,7 +71,7 @@ int SoftMax::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers } #endif -int SoftMax::InferShape(std::vector inputs_, std::vector outputs_) { +int SoftMax::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/softmax.h b/mindspore/lite/src/ops/softmax.h index 3659d88820..249b62c84d 100644 --- a/mindspore/lite/src/ops/softmax.h +++ b/mindspore/lite/src/ops/softmax.h @@ -39,7 +39,7 @@ class SoftMax : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/softmax_cross_entropy.cc b/mindspore/lite/src/ops/softmax_cross_entropy.cc index 41e6c1e22f..5b5a5c7f24 100644 --- a/mindspore/lite/src/ops/softmax_cross_entropy.cc +++ b/mindspore/lite/src/ops/softmax_cross_entropy.cc @@ -52,7 +52,7 @@ int SoftmaxCrossEntropy::UnPackToFlatBuilder(const schema::Primitive *primitive, } #endif -int SoftmaxCrossEntropy::InferShape(std::vector inputs, std::vector outputs) { +int SoftmaxCrossEntropy::InferShape(std::vector inputs, std::vector outputs) { if (1 > outputs.size()) { MS_LOG(ERROR) << "SoftmaxCrossEntropy should have at least one output"; return RET_ERROR; diff --git a/mindspore/lite/src/ops/softmax_cross_entropy.h b/mindspore/lite/src/ops/softmax_cross_entropy.h index b81a435abe..4f74ab44cd 100644 --- a/mindspore/lite/src/ops/softmax_cross_entropy.h +++ b/mindspore/lite/src/ops/softmax_cross_entropy.h @@ -39,7 +39,7 @@ class SoftmaxCrossEntropy : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetAxis() const; }; diff --git a/mindspore/lite/src/ops/space_to_batch.cc b/mindspore/lite/src/ops/space_to_batch.cc index ac2902a307..bc0ea6e6de 100644 --- a/mindspore/lite/src/ops/space_to_batch.cc +++ b/mindspore/lite/src/ops/space_to_batch.cc @@ -73,7 +73,7 @@ constexpr int kBlockSizesSize = 2; constexpr int kPaddingsSize = 4; } // namespace -int SpaceToBatch::InferShape(std::vector inputs, std::vector outputs) { +int SpaceToBatch::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive_ != nullptr); if (outputs.size() != kSpaceToBatchNDOutputNum || inputs.size() != kSpaceToBatchNDInputNum) { MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); @@ -81,7 +81,7 @@ int SpaceToBatch::InferShape(std::vector inputs, std::ve } auto input = inputs.at(0); - if (input->GetFormat() != schema::Format_NHWC) { + if (input->GetFormat() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "space_to_batch only support NHWC now!"; return 1; } diff --git a/mindspore/lite/src/ops/space_to_batch.h b/mindspore/lite/src/ops/space_to_batch.h index 3c3888bcb7..834c529eeb 100644 --- a/mindspore/lite/src/ops/space_to_batch.h +++ b/mindspore/lite/src/ops/space_to_batch.h @@ -39,7 +39,7 @@ class SpaceToBatch : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs, std::vector outputs) override; + int InferShape(std::vector inputs, std::vector outputs) override; std::vector GetBlockShape() const; std::vector GetPaddings() const; diff --git a/mindspore/lite/src/ops/space_to_batch_nd.cc b/mindspore/lite/src/ops/space_to_batch_nd.cc index 2c2aadd39e..91a5a4366a 100644 --- a/mindspore/lite/src/ops/space_to_batch_nd.cc +++ b/mindspore/lite/src/ops/space_to_batch_nd.cc @@ -78,15 +78,14 @@ int SpaceToBatchND::UnPackToFlatBuilder(const schema::Primitive *primitive, flat #endif // PRIMITIVE_WRITEABLE -int SpaceToBatchND::InferShape(std::vector inputs, - std::vector outputs) { +int SpaceToBatchND::InferShape(std::vector inputs, std::vector outputs) { if (outputs.size() != kSpaceToBatchNDOutputNum || inputs.size() != kSpaceToBatchNDInputNum) { MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); return 1; } auto input = inputs.at(0); - if (input->GetFormat() != schema::Format_NHWC) { + if (input->GetFormat() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "space_to_batch_nd only support NHWC now!"; return RET_ERROR; } diff --git a/mindspore/lite/src/ops/space_to_batch_nd.h b/mindspore/lite/src/ops/space_to_batch_nd.h index f308efd509..fd9719c0c3 100644 --- a/mindspore/lite/src/ops/space_to_batch_nd.h +++ b/mindspore/lite/src/ops/space_to_batch_nd.h @@ -39,7 +39,7 @@ class SpaceToBatchND : public PrimitiveC { #endif std::vector GetBlockShape() const; std::vector GetPaddings() const; - int InferShape(std::vector inputs, std::vector outputs) override; + int InferShape(std::vector inputs, std::vector outputs) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/space_to_depth.cc b/mindspore/lite/src/ops/space_to_depth.cc index f98956d089..f095b6d45e 100644 --- a/mindspore/lite/src/ops/space_to_depth.cc +++ b/mindspore/lite/src/ops/space_to_depth.cc @@ -49,7 +49,7 @@ constexpr int kSpaceToDepthOutputNum = 1; constexpr int kSpaceToDepthInputNum = 1; } // namespace -int SpaceToDepth::InferShape(std::vector inputs, std::vector outputs) { +int SpaceToDepth::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive_ != nullptr); if (outputs.size() != kSpaceToDepthOutputNum || inputs.size() != kSpaceToDepthInputNum) { MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); @@ -57,7 +57,7 @@ int SpaceToDepth::InferShape(std::vector inputs, std::ve } auto input = inputs.at(0); - if (input->GetFormat() != schema::Format_NHWC) { + if (input->GetFormat() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "space_to_depth only support NHWC now!"; return 1; } diff --git a/mindspore/lite/src/ops/space_to_depth.h b/mindspore/lite/src/ops/space_to_depth.h index 8edeb3ea0f..4c40c55aa8 100644 --- a/mindspore/lite/src/ops/space_to_depth.h +++ b/mindspore/lite/src/ops/space_to_depth.h @@ -38,7 +38,7 @@ class SpaceToDepth : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetBlockSize() const; int GetFormat() const; }; diff --git a/mindspore/lite/src/ops/split.cc b/mindspore/lite/src/ops/split.cc index 4798f54d88..02941142fa 100644 --- a/mindspore/lite/src/ops/split.cc +++ b/mindspore/lite/src/ops/split.cc @@ -62,7 +62,7 @@ int Split::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers:: namespace { constexpr int kSplitInputNum = 1; } // namespace -int Split::InferShape(std::vector inputs_, std::vector outputs_) { +int Split::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/split.h b/mindspore/lite/src/ops/split.h index 86c9fe3594..dc33c7141c 100644 --- a/mindspore/lite/src/ops/split.h +++ b/mindspore/lite/src/ops/split.h @@ -40,7 +40,7 @@ class Split : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetNumberSplit() const; std::vector GetSizeSplits() const; int GetSplitDim() const; diff --git a/mindspore/lite/src/ops/squeeze.cc b/mindspore/lite/src/ops/squeeze.cc index d9dbd6c734..37b044a005 100644 --- a/mindspore/lite/src/ops/squeeze.cc +++ b/mindspore/lite/src/ops/squeeze.cc @@ -54,7 +54,7 @@ namespace { constexpr int kSqueezeInputNum = 1; constexpr int kSqueezeOutputNum = 1; } // namespace -int Squeeze::InferShape(std::vector inputs_, std::vector outputs_) { +int Squeeze::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); if (kSqueezeInputNum != inputs_.size()) { MS_LOG(ERROR) << "Add should has " << kSqueezeInputNum << " inputs"; diff --git a/mindspore/lite/src/ops/squeeze.h b/mindspore/lite/src/ops/squeeze.h index aec38d7d68..58013eb913 100644 --- a/mindspore/lite/src/ops/squeeze.h +++ b/mindspore/lite/src/ops/squeeze.h @@ -39,7 +39,7 @@ class Squeeze : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetAxis() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/stack.cc b/mindspore/lite/src/ops/stack.cc index 219ab72fd3..208c79a1b0 100644 --- a/mindspore/lite/src/ops/stack.cc +++ b/mindspore/lite/src/ops/stack.cc @@ -60,7 +60,7 @@ namespace { constexpr int kStackOutputNum = 1; constexpr int kStackMinInputNum = 1; } // namespace -int Stack::InferShape(std::vector inputs, std::vector outputs) { +int Stack::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive_ != nullptr); if (outputs.size() != kStackOutputNum) { MS_LOG(ERROR) << "Invalid output size:" << outputs.size(); diff --git a/mindspore/lite/src/ops/stack.h b/mindspore/lite/src/ops/stack.h index b1e480349d..a6b12956d1 100644 --- a/mindspore/lite/src/ops/stack.h +++ b/mindspore/lite/src/ops/stack.h @@ -40,7 +40,7 @@ class Stack : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; int GetN() const; std::vector GetIsScale() const; diff --git a/mindspore/lite/src/ops/strided_slice.cc b/mindspore/lite/src/ops/strided_slice.cc index 892229b866..bd56848c12 100644 --- a/mindspore/lite/src/ops/strided_slice.cc +++ b/mindspore/lite/src/ops/strided_slice.cc @@ -182,7 +182,7 @@ void StridedSlice::ApplyEndMask() { } } -int StridedSlice::InferShape(std::vector inputs, std::vector outputs) { +int StridedSlice::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive_ != nullptr); if (outputs.size() != kStridedSliceOutputNum) { MS_LOG(ERROR) << "Invalid output size:" << outputs.size(); diff --git a/mindspore/lite/src/ops/strided_slice.h b/mindspore/lite/src/ops/strided_slice.h index 13d8d9151a..06b1f6e311 100644 --- a/mindspore/lite/src/ops/strided_slice.h +++ b/mindspore/lite/src/ops/strided_slice.h @@ -46,7 +46,7 @@ class StridedSlice : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetBeginMask() const; int GetEndMask() const; int GetEllipsisMask() const; diff --git a/mindspore/lite/src/ops/tile.cc b/mindspore/lite/src/ops/tile.cc index cf7058ceaf..f4870de2f8 100644 --- a/mindspore/lite/src/ops/tile.cc +++ b/mindspore/lite/src/ops/tile.cc @@ -66,7 +66,7 @@ int Tile::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::F } #endif -int Tile::InferShape(std::vector inputs_, std::vector outputs_) { +int Tile::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/tile.h b/mindspore/lite/src/ops/tile.h index f46685a605..78245f0dac 100644 --- a/mindspore/lite/src/ops/tile.h +++ b/mindspore/lite/src/ops/tile.h @@ -40,7 +40,7 @@ class Tile : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetMultiples() const; std::vector GetDims() const; }; diff --git a/mindspore/lite/src/ops/topk.cc b/mindspore/lite/src/ops/topk.cc index fb3a8a47e9..7ed1782574 100644 --- a/mindspore/lite/src/ops/topk.cc +++ b/mindspore/lite/src/ops/topk.cc @@ -44,7 +44,7 @@ int TopK::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::F } #endif -int TopK::InferShape(std::vector inputs_, std::vector outputs_) { +int TopK::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); if (inputs_.size() != kSingleNum || outputs_.size() != kDoubleNum) { MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size(); diff --git a/mindspore/lite/src/ops/topk.h b/mindspore/lite/src/ops/topk.h index 082f83fd7e..bf4140566c 100644 --- a/mindspore/lite/src/ops/topk.h +++ b/mindspore/lite/src/ops/topk.h @@ -38,7 +38,7 @@ class TopK : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetK() const; bool GetSorted() const; }; diff --git a/mindspore/lite/src/ops/transpose.cc b/mindspore/lite/src/ops/transpose.cc index 11c057a6e1..b6d7b6d275 100644 --- a/mindspore/lite/src/ops/transpose.cc +++ b/mindspore/lite/src/ops/transpose.cc @@ -99,7 +99,7 @@ int Transpose::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffe } #endif -int Transpose::InferShape(std::vector inputs_, std::vector outputs_) { +int Transpose::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/transpose.h b/mindspore/lite/src/ops/transpose.h index b12507993d..c6c71ffef4 100644 --- a/mindspore/lite/src/ops/transpose.h +++ b/mindspore/lite/src/ops/transpose.h @@ -40,7 +40,7 @@ class Transpose : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetPerm() const; bool GetConjugate() const; }; diff --git a/mindspore/lite/src/ops/unique.cc b/mindspore/lite/src/ops/unique.cc index 8f0a626bcc..073a8771a9 100644 --- a/mindspore/lite/src/ops/unique.cc +++ b/mindspore/lite/src/ops/unique.cc @@ -41,7 +41,7 @@ int Unique::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers: } #endif -int Unique::InferShape(std::vector inputs_, std::vector outputs_) { +int Unique::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); if (inputs_.size() != kSingleNum || outputs_.size() != kDoubleNum) { MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size(); diff --git a/mindspore/lite/src/ops/unique.h b/mindspore/lite/src/ops/unique.h index 4904a1e813..eb634a1278 100644 --- a/mindspore/lite/src/ops/unique.h +++ b/mindspore/lite/src/ops/unique.h @@ -38,7 +38,7 @@ class Unique : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetOutType() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/unsqueeze.cc b/mindspore/lite/src/ops/unsqueeze.cc index 5d55cd19f2..80beb0e387 100644 --- a/mindspore/lite/src/ops/unsqueeze.cc +++ b/mindspore/lite/src/ops/unsqueeze.cc @@ -17,7 +17,7 @@ #include "src/ops/unsqueeze.h" #include "include/errorcode.h" #include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" namespace mindspore { namespace lite { @@ -53,7 +53,7 @@ int Unsqueeze::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffe } #endif -int Unsqueeze::InferShape(std::vector inputs_, std::vector outputs_) { +int Unsqueeze::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/unsqueeze.h b/mindspore/lite/src/ops/unsqueeze.h index 36bc2b261c..81011561b0 100644 --- a/mindspore/lite/src/ops/unsqueeze.h +++ b/mindspore/lite/src/ops/unsqueeze.h @@ -39,7 +39,7 @@ class Unsqueeze : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetAxis() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/unstack.cc b/mindspore/lite/src/ops/unstack.cc index 24da3faab6..1e892b3651 100644 --- a/mindspore/lite/src/ops/unstack.cc +++ b/mindspore/lite/src/ops/unstack.cc @@ -44,7 +44,7 @@ int Unstack::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers } #endif -int Unstack::InferShape(std::vector inputs, std::vector outputs) { +int Unstack::InferShape(std::vector inputs, std::vector outputs) { auto input = inputs.at(0); MS_ASSERT(input != nullptr); auto input_shape = input->shape(); diff --git a/mindspore/lite/src/ops/unstack.h b/mindspore/lite/src/ops/unstack.h index b0a24b672e..ecadc56252 100644 --- a/mindspore/lite/src/ops/unstack.h +++ b/mindspore/lite/src/ops/unstack.h @@ -38,7 +38,7 @@ class Unstack : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetNum() const; int GetAxis() const; }; diff --git a/mindspore/lite/src/ops/where.cc b/mindspore/lite/src/ops/where.cc index 39ca552670..39155899c7 100644 --- a/mindspore/lite/src/ops/where.cc +++ b/mindspore/lite/src/ops/where.cc @@ -52,7 +52,7 @@ int Where::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers:: } #endif -int Where::InferShape(std::vector inputs_, std::vector outputs_) { +int Where::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/where.h b/mindspore/lite/src/ops/where.h index 9597c813e2..4a6be28f23 100644 --- a/mindspore/lite/src/ops/where.h +++ b/mindspore/lite/src/ops/where.h @@ -39,7 +39,7 @@ class Where : public PrimitiveC { int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetCondition() const; }; } // namespace lite diff --git a/mindspore/lite/src/ops/zeros_like.cc b/mindspore/lite/src/ops/zeros_like.cc index f4562e38fc..b57384432a 100644 --- a/mindspore/lite/src/ops/zeros_like.cc +++ b/mindspore/lite/src/ops/zeros_like.cc @@ -32,7 +32,7 @@ int ZerosLike::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffe } #endif -int ZerosLike::InferShape(std::vector inputs_, std::vector outputs_) { +int ZerosLike::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); diff --git a/mindspore/lite/src/ops/zeros_like.h b/mindspore/lite/src/ops/zeros_like.h index 08a0325d11..5c9eedf828 100644 --- a/mindspore/lite/src/ops/zeros_like.h +++ b/mindspore/lite/src/ops/zeros_like.h @@ -35,7 +35,7 @@ class ZerosLike : public PrimitiveC { ZerosLike() = default; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; #endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + int InferShape(std::vector inputs_, std::vector outputs_) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/param_value_lite.h b/mindspore/lite/src/param_value_lite.h index a57c4b8140..fcf812b587 100644 --- a/mindspore/lite/src/param_value_lite.h +++ b/mindspore/lite/src/param_value_lite.h @@ -21,9 +21,8 @@ #include #include #include - +#include "src/tensor.h" #include "ir/dtype/type_id.h" -#include "schema/inner/model_generated.h" namespace mindspore { class ParamValueLite : public Value { @@ -66,7 +65,7 @@ class ParamValueLite : public Value { private: void *tensor_addr_ = nullptr; size_t tensor_size_ = 0; - int format_ = schema::Format_KCHW; + int format_ = schema::Format::Format_KCHW; std::vector tensor_shape_{}; TypeId type_id_ = TypeId::kNumberTypeFloat32; }; diff --git a/mindspore/lite/src/populate_parameter.cc b/mindspore/lite/src/populate_parameter.cc index ede3f91bef..57efab75b3 100644 --- a/mindspore/lite/src/populate_parameter.cc +++ b/mindspore/lite/src/populate_parameter.cc @@ -1210,7 +1210,7 @@ OpParameter *PopulateSpaceToDepthParameter(const mindspore::lite::PrimitiveC *pr auto param = reinterpret_cast(const_cast(primitive)); space_depth_param->op_parameter_.type_ = primitive->Type(); space_depth_param->block_size_ = param->GetBlockSize(); - if (param->GetFormat() != schema::Format_NHWC) { + if (param->GetFormat() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "Currently only NHWC format is supported."; free(space_depth_param); return nullptr; @@ -1699,9 +1699,8 @@ PopulateParameterFunc PopulateParameterRegistry::GetParameterFunc(int type) { } int PopulateParameterRegistry::AddPopulateParameterFunc(const schema::PrimitiveType &type, PopulateParameterFunc func) { - if ((type < schema::PrimitiveType_MIN)|| (type > schema::PrimitiveType_MAX)) - return -1; - populate_parameter_funcs_[type] = func; + if ((type < schema::PrimitiveType_MIN) || (type > schema::PrimitiveType_MAX)) return -1; + populate_parameter_funcs_[type] = func; return 0; } diff --git a/mindspore/lite/src/runtime/allocator.cc b/mindspore/lite/src/runtime/allocator.cc index a190e97072..772b5a8461 100644 --- a/mindspore/lite/src/runtime/allocator.cc +++ b/mindspore/lite/src/runtime/allocator.cc @@ -122,4 +122,3 @@ void DefaultAllocator::Clear() { UnLock(); } } // namespace mindspore::lite - diff --git a/mindspore/lite/src/runtime/allocator.h b/mindspore/lite/src/runtime/allocator.h index 4c868c1e22..fe16bfed8c 100644 --- a/mindspore/lite/src/runtime/allocator.h +++ b/mindspore/lite/src/runtime/allocator.h @@ -77,4 +77,3 @@ class DefaultAllocator : public Allocator { } // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_ALLOCATOR_H_ - diff --git a/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc index 38bb63005d..e9c727902f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc @@ -68,10 +68,13 @@ int ArgMinMaxBaseCPUKernel::ReSize() { } int ArgMinMaxBaseCPUKernel::Run() { - auto input_data = in_tensors_.at(0)->Data(); - auto output_data = out_tensors_.at(0)->Data(); + auto input_data = in_tensors_.at(0)->MutableData(); + auto output_data = out_tensors_.at(0)->MutableData(); + + auto in_tensor = in_tensors_.at(0)->shape(); + auto shape = reinterpret_cast(malloc(in_tensor.size() * sizeof(int))); + memcpy(shape, in_tensor.data(), in_tensor.size() * sizeof(int)); - auto shape = in_tensors_.at(0)->shape().data(); auto param = reinterpret_cast(op_parameter_); MS_ASSERT(context_->allocator != nullptr); if (param->topk_ > 1 || param->keep_dims_) { @@ -88,10 +91,9 @@ int ArgMinMaxBaseCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuArgMinMaxInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuArgMinMaxInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; @@ -113,10 +115,9 @@ kernel::LiteKernel *CpuArgMinMaxInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuArgMinMaxFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h b/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h index 0605e405e3..636c66c2ae 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class ArgMinMaxBaseCPUKernel : public LiteKernel { public: - ArgMinMaxBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ArgMinMaxBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc index 5def8f0186..96fd47cb5e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc @@ -40,15 +40,15 @@ int BatchToSpaceBaseCPUKernel::Init() { } int BatchToSpaceBaseCPUKernel::ReSize() { - if (in_tensors_[0]->GetFormat() != schema::Format_NHWC) { + if (in_tensors_[0]->GetFormat() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "batch_to_space only support NHWC now!"; return RET_FORMAT_ERR; } return RET_OK; } -kernel::LiteKernel *CpuBatchToSpaceInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuBatchToSpaceInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { @@ -73,8 +73,8 @@ kernel::LiteKernel *CpuBatchToSpaceInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuBatchToSpaceFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h b/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h index 6bd3ff8af3..2d454df1f6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class BatchToSpaceBaseCPUKernel : public LiteKernel { public: - BatchToSpaceBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + BatchToSpaceBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc index 0d8392f269..e6404b400c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc @@ -36,10 +36,9 @@ int ConcatBaseCPUKernel::ReSize() { return RET_OK; } -kernel::LiteKernel *CpuConcatInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuConcatInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -61,10 +60,9 @@ kernel::LiteKernel *CpuConcatInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuConcatInt32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -86,10 +84,9 @@ kernel::LiteKernel *CpuConcatInt32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuConcatFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/concat_base.h b/mindspore/lite/src/runtime/kernel/arm/base/concat_base.h index 8a3a531d6e..f13aa2fc35 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/concat_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/concat_base.h @@ -27,8 +27,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class ConcatBaseCPUKernel : public LiteKernel { public: - ConcatBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ConcatBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { concat_param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc index 88a1ac4818..1d305d2ac1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc @@ -105,10 +105,10 @@ int ConvolutionBaseCPUKernel::CheckResizeValid() { return RET_OK; } -int ConvolutionBaseCPUKernel::CheckLayout(lite::tensor::Tensor *input_tensor) { +int ConvolutionBaseCPUKernel::CheckLayout(lite::Tensor *input_tensor) { auto data_type = input_tensor->data_type(); auto input_format = input_tensor->GetFormat(); - schema::Format execute_format = schema::Format_NHWC4; + schema::Format execute_format = schema::Format::Format_NHWC4; convert_func_ = LayoutTransform(data_type, input_format, execute_format); if (convert_func_ == nullptr) { MS_LOG(ERROR) << "layout convert func is nullptr."; @@ -154,7 +154,7 @@ int ConvolutionBaseCPUKernel::SetIfAsymmetric() { uint8_t asymmetric = 0b0; auto filter_tensor = in_tensors_.at(kWeightIndex); auto filter_ele_num = filter_tensor->ElementsNum(); - auto filter_data = reinterpret_cast(filter_tensor->Data()); + auto filter_data = reinterpret_cast(filter_tensor->MutableData()); int min_value = INT8_MAX; int max_value = INT8_MIN; for (int i = 0; i < filter_ele_num; ++i) { @@ -326,7 +326,7 @@ int ConvolutionBaseCPUKernel::SetQuantParam() { return RET_OK; } -int ConvolutionBaseCPUKernel::RestoreFilter(lite::tensor::Tensor *input_tensor) { +int ConvolutionBaseCPUKernel::RestoreFilter(lite::Tensor *input_tensor) { MS_ASSERT(input_tensor != nullptr); if (input_tensor->data_type() != kNumberTypeUInt8) { MS_LOG(ERROR) << "conv weight input type error" << input_tensor->data_type(); @@ -336,8 +336,8 @@ int ConvolutionBaseCPUKernel::RestoreFilter(lite::tensor::Tensor *input_tensor) MS_LOG(ERROR) << "no quant param"; return RET_ERROR; } - const auto* quant_data = static_cast(input_tensor->Data()); - auto* dequant_data = static_cast(malloc(input_tensor->DataSize() * sizeof(float))); + const auto *quant_data = static_cast(input_tensor->MutableData()); + auto *dequant_data = static_cast(malloc(input_tensor->ElementsNum() * sizeof(float))); if (dequant_data == nullptr) { MS_LOG(ERROR) << "malloc faile"; return RET_ERROR; @@ -350,15 +350,15 @@ int ConvolutionBaseCPUKernel::RestoreFilter(lite::tensor::Tensor *input_tensor) free(dequant_data); return RET_ERROR; } - size_t per_channel_size = input_tensor->DataSize() / channels; + size_t per_channel_size = input_tensor->ElementsNum() / channels; auto quant_param = input_tensor->GetQuantParams(); for (size_t i = 0; i < channels; i++) { auto param = quant_param.at(i); auto scale = param.scale; auto zero_point = param.zeroPoint; for (size_t j = 0; j < per_channel_size; j++) { - dequant_data[per_channel_size * i + j] = static_cast( - (quant_data[per_channel_size * i + j] - zero_point) * scale); + dequant_data[per_channel_size * i + j] = + static_cast((quant_data[per_channel_size * i + j] - zero_point) * scale); } } } else { @@ -366,7 +366,7 @@ int ConvolutionBaseCPUKernel::RestoreFilter(lite::tensor::Tensor *input_tensor) auto param = quant_param.front(); auto scale = param.scale; auto zero_point = param.zeroPoint; - for (int64_t j = 0; j < input_tensor->DataSize(); j++) { + for (int64_t j = 0; j < input_tensor->ElementsNum(); j++) { dequant_data[j] = static_cast((quant_data[j] - zero_point) * scale); } } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h index 9dd5ba5966..447e61533d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h @@ -37,8 +37,8 @@ static constexpr int kPerTensor = 1; namespace mindspore::kernel { class ConvolutionBaseCPUKernel : public LiteKernel { public: - ConvolutionBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ConvolutionBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { op_parameter_->thread_num_ = ctx->thread_num_; @@ -49,7 +49,7 @@ class ConvolutionBaseCPUKernel : public LiteKernel { int Init() override; int ReSize() override { return 0; } int Run() override { return 0; } - virtual int CheckLayout(lite::tensor::Tensor *input_tensor); + virtual int CheckLayout(lite::Tensor *input_tensor); int SetIfAsymmetric(); int SetIfPerChannel(); int MallocQuantParam(); @@ -60,7 +60,7 @@ class ConvolutionBaseCPUKernel : public LiteKernel { int SetQuantMultiplier(); int CheckResizeValid(); void FreeQuantParam(); - static int RestoreFilter(lite::tensor::Tensor *input_tensor); + static int RestoreFilter(lite::Tensor *input_tensor); protected: int tile_num_; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/crop_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/crop_base.cc index 2322083c11..283d483022 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/crop_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/crop_base.cc @@ -30,10 +30,9 @@ using mindspore::schema::PrimitiveType_Crop; namespace mindspore::kernel { int CropBaseCPUKernel::Init() { return RET_OK; } -kernel::LiteKernel *CpuCropInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuCropInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -55,10 +54,9 @@ kernel::LiteKernel *CpuCropInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuCropInt32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -80,10 +78,9 @@ kernel::LiteKernel *CpuCropInt32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuCropFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/crop_base.h b/mindspore/lite/src/runtime/kernel/arm/base/crop_base.h index bd9338cf3a..a123a80e9d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/crop_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/crop_base.h @@ -26,8 +26,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class CropBaseCPUKernel : public LiteKernel { public: - CropBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + CropBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~CropBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc index b68e45b366..a803bb8141 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc @@ -34,7 +34,7 @@ namespace mindspore::kernel { int DepthToSpaceBaseCPUKernel::Init() { return RET_OK; } int DepthToSpaceBaseCPUKernel::ReSize() { - if (in_tensors_[0]->GetFormat() != schema::Format_NHWC) { + if (in_tensors_[0]->GetFormat() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "depth_to_space only support NHWC now!"; return RET_FORMAT_ERR; } @@ -61,8 +61,8 @@ int DepthToSpaceBaseCPUKernel::ReSize() { return RET_OK; } -kernel::LiteKernel *CpuDepthToSpaceInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuDepthToSpaceInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { @@ -87,8 +87,8 @@ kernel::LiteKernel *CpuDepthToSpaceInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuDepthToSpaceFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h index b8c8ea8b4a..e90532ac49 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class DepthToSpaceBaseCPUKernel : public LiteKernel { public: - DepthToSpaceBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + DepthToSpaceBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.cc index e7d8eb3eeb..dfd5d88125 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.cc @@ -32,8 +32,8 @@ int FullconnectionBaseCPUKernel::Init() { return RET_OK; } -kernel::LiteKernel *CpuFullConnectionInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuFullConnectionInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { @@ -53,7 +53,7 @@ kernel::LiteKernel *CpuFullConnectionInt8KernelCreator(const std::vectordata_type() != kNumberTypeInt8) { MS_LOG(ERROR) << "full connect input type error" << input_tensor->data_type(); @@ -63,8 +63,12 @@ int RestoreFullconnectWeight(lite::tensor::Tensor *input_tensor) { MS_LOG(ERROR) << "no quant param"; return RET_ERROR; } - const auto* quant_data = static_cast(input_tensor->Data()); - auto* dequant_data = static_cast(malloc(input_tensor->DataSize() * sizeof(float))); + const auto *quant_data = static_cast(input_tensor->MutableData()); + if (quant_data == nullptr) { + MS_LOG(ERROR) << "input_tensor MutableData is nullptr."; + return RET_ERROR; + } + auto *dequant_data = static_cast(malloc(input_tensor->ElementsNum() * sizeof(float))); if (dequant_data == nullptr) { MS_LOG(ERROR) << "malloc faile"; return RET_ERROR; @@ -76,15 +80,15 @@ int RestoreFullconnectWeight(lite::tensor::Tensor *input_tensor) { MS_LOG(ERROR) << "Quant param not equal channel num " << input_tensor->GetQuantParams().size() << channels; return RET_ERROR; } - size_t per_channel_size = input_tensor->DataSize() / channels; + size_t per_channel_size = input_tensor->ElementsNum() / channels; auto quant_param = input_tensor->GetQuantParams(); for (size_t i = 0; i < channels; i++) { auto param = quant_param.at(i); auto scale = param.scale; auto zero_point = param.zeroPoint; for (size_t j = 0; j < per_channel_size; j++) { - dequant_data[per_channel_size * i + j] = static_cast( - (quant_data[per_channel_size * i + j] - zero_point) * scale); + dequant_data[per_channel_size * i + j] = + static_cast((quant_data[per_channel_size * i + j] - zero_point) * scale); } } } else { @@ -92,22 +96,23 @@ int RestoreFullconnectWeight(lite::tensor::Tensor *input_tensor) { auto param = quant_param.front(); auto scale = param.scale; auto zero_point = param.zeroPoint; - for (int64_t j = 0; j < input_tensor->DataSize(); j++) { + for (int64_t j = 0; j < input_tensor->ElementsNum(); j++) { dequant_data[j] = static_cast((quant_data[j] - zero_point) * scale); } } input_tensor->SetData(dequant_data); return RET_OK; } -kernel::LiteKernel *CpuFullConnectionFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuFullConnectionFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Concat); auto *weight_tensor = inputs.at(kWeightIndex); - auto *restore_data = weight_tensor->Data(); + // data of second tensor of fc may be nullptr + auto *restore_data = weight_tensor->data_c(); if (!weight_tensor->GetQuantParams().empty()) { RestoreFullconnectWeight(inputs.at(kWeightIndex)); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h b/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h index 9707f19d0a..c9e6b42f61 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h @@ -28,8 +28,8 @@ static constexpr int kPerTensor = 1; namespace mindspore::kernel { class FullconnectionBaseCPUKernel : public LiteKernel { public: - FullconnectionBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + FullconnectionBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { fc_param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.cc b/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.cc index c7899b7bf7..cbdcfe8f53 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.cc @@ -20,25 +20,25 @@ using mindspore::schema::Format; namespace mindspore::kernel { LayoutConvertor LayoutTransformFp32(schema::Format src_format, schema::Format dst_format) { - if (src_format == schema::Format_NHWC && dst_format == schema::Format_NC4HW4) { + if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NC4HW4) { return PackNHWCToNC4HW4Fp32; - } else if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) { + } else if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NHWC4) { return PackNHWCToNHWC4Fp32; - } else if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC4) { + } else if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC4) { return PackNC4HW4ToNHWC4Fp32; - } else if (src_format == schema::Format_NCHW && dst_format == schema::Format_NC4HW4) { + } else if (src_format == schema::Format::Format_NCHW && dst_format == schema::Format::Format_NC4HW4) { return PackNCHWToNC4HW4Fp32; - } else if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC) { + } else if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC) { return PackNC4HW4ToNHWCFp32; } else { - MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(src_format) << " to " - << schema::EnumNameFormat(dst_format); + MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(src_format) << " to " + << EnumNameFormat(dst_format); return nullptr; } } LayoutConvertor LayoutTransformInt8(schema::Format src_format, schema::Format dst_format) { - if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) { + if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NHWC4) { return PackNHWCToNHWC4Int8; } else { return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.h b/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.h index a1c9fb2c2e..efa5da1e48 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.h @@ -23,6 +23,7 @@ #include "nnacl/pack.h" #include "ir/dtype/type_id.h" #include "schema/ops_generated.h" +#include "src/tensor.h" namespace mindspore::kernel { typedef void (*LayoutConvertor)(const void *src, void *dst, int batch, int plane, int channel); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc index a13739bc6d..1658f581a7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc @@ -29,11 +29,10 @@ using mindspore::schema::PrimitiveType_LeakyReLU; namespace mindspore::kernel { int LeakyReluBaseCPUKernel::Init() { return RET_OK; } -kernel::LiteKernel *CpuLeakyReluInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { +kernel::LiteKernel *CpuLeakyReluInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.h b/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.h index 85dd45c7ef..e61a5633ba 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.h @@ -27,9 +27,9 @@ using mindspore::lite::Context; namespace mindspore::kernel { class LeakyReluBaseCPUKernel : public LiteKernel { public: - LeakyReluBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, - const mindspore::lite::PrimitiveC *primitive) + LeakyReluBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~LeakyReluBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.cc index 7ab281959e..b10a3e5212 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_MatMul; namespace mindspore::kernel { -int RestoreMatmulWeight(lite::tensor::Tensor *input_tensor) { +int RestoreMatmulWeight(lite::Tensor *input_tensor) { MS_ASSERT(input_tensor != nullptr); if (input_tensor->data_type() != kNumberTypeUInt8) { MS_LOG(ERROR) << "mat mul input type error" << input_tensor->data_type(); @@ -36,8 +36,12 @@ int RestoreMatmulWeight(lite::tensor::Tensor *input_tensor) { MS_LOG(ERROR) << "no quant param"; return RET_ERROR; } - const auto* quant_data = static_cast(input_tensor->Data()); - auto* dequant_data = static_cast(malloc(input_tensor->DataSize() * sizeof(float))); + const auto *quant_data = static_cast(input_tensor->MutableData()); + if (quant_data == nullptr) { + MS_LOG(ERROR) << "input_tensor MutableData is nullptr."; + return RET_ERROR; + } + auto *dequant_data = static_cast(malloc(input_tensor->ElementsNum() * sizeof(float))); if (dequant_data == nullptr) { MS_LOG(ERROR) << "malloc faile"; return RET_ERROR; @@ -49,15 +53,15 @@ int RestoreMatmulWeight(lite::tensor::Tensor *input_tensor) { MS_LOG(ERROR) << "Quant param not equal channel num " << input_tensor->GetQuantParams().size() << channels; return RET_ERROR; } - size_t per_channel_size = input_tensor->DataSize() / channels; + size_t per_channel_size = input_tensor->ElementsNum() / channels; auto quant_param = input_tensor->GetQuantParams(); for (size_t i = 0; i < channels; i++) { auto param = quant_param.at(i); auto scale = param.scale; auto zero_point = param.zeroPoint; for (size_t j = 0; j < per_channel_size; j++) { - dequant_data[per_channel_size * i + j] = static_cast( - (quant_data[per_channel_size * i + j] - zero_point) * scale); + dequant_data[per_channel_size * i + j] = + static_cast((quant_data[per_channel_size * i + j] - zero_point) * scale); } } } else { @@ -65,22 +69,26 @@ int RestoreMatmulWeight(lite::tensor::Tensor *input_tensor) { auto param = quant_param.front(); auto scale = param.scale; auto zero_point = param.zeroPoint; - for (int64_t j = 0; j < input_tensor->DataSize(); j++) { + for (int64_t j = 0; j < input_tensor->ElementsNum(); j++) { dequant_data[j] = static_cast((quant_data[j] - zero_point) * scale); } } input_tensor->SetData(dequant_data); return RET_OK; } -kernel::LiteKernel *CpuMatmulKernelCreator(const std::vector &inputs, - const std::vector &outputs, OpParameter *opParameter, +kernel::LiteKernel *CpuMatmulKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Concat); auto *weight_tensor = inputs.at(kWeightIndex); - auto *restore_data = weight_tensor->Data(); + auto *restore_data = weight_tensor->MutableData(); + if (restore_data == nullptr) { + MS_LOG(ERROR) << "weight_tensor MutableData is nullptr."; + return nullptr; + } if (primitive->GetQuantType() == schema::QuantType_WeightQuant) { RestoreMatmulWeight(inputs.at(kWeightIndex)); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h b/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h index dd33c90131..f9c07c38ea 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h @@ -28,8 +28,8 @@ static constexpr int kPerTensor = 1; namespace mindspore::kernel { class MatmulBaseCPUKernel : public LiteKernel { public: - MatmulBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + MatmulBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { params_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/pad.cc b/mindspore/lite/src/runtime/kernel/arm/base/pad.cc index b1971db1db..ea9e075549 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/pad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/pad.cc @@ -28,10 +28,9 @@ using mindspore::schema::PrimitiveType_Pad; namespace mindspore::kernel { -kernel::LiteKernel *CpuPadInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuPadInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Pad); @@ -50,10 +49,9 @@ kernel::LiteKernel *CpuPadInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuPadFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Pad); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc index 98891024b9..ee93ea5a99 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc @@ -90,10 +90,9 @@ int PoolingBaseCPUKernel::ReSize() { return RET_OK; } -kernel::LiteKernel *CpuPoolingInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuPoolingInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -115,10 +114,9 @@ kernel::LiteKernel *CpuPoolingInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuPoolingFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h index afaae8b8a3..f02fb668f4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h @@ -28,8 +28,8 @@ using mindspore::lite::RET_OK; namespace mindspore::kernel { class PoolingBaseCPUKernel : public LiteKernel { public: - PoolingBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + PoolingBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { pooling_param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/power_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/power_base.cc index 04789e32a2..8e1d6700ad 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/power_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/power_base.cc @@ -31,10 +31,9 @@ int PowerBaseCPUKernel::Init() { return RET_OK; } int PowerBaseCPUKernel::ReSize() { return RET_OK; } -kernel::LiteKernel *CpuPowerInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuPowerInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -56,10 +55,9 @@ kernel::LiteKernel *CpuPowerInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuPowerFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Power); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/power_base.h b/mindspore/lite/src/runtime/kernel/arm/base/power_base.h index 044579b803..eb57deaebf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/power_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/power_base.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class PowerBaseCPUKernel : public LiteKernel { public: - PowerBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + PowerBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc index 9cf795c862..874bc69c8b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc @@ -55,9 +55,7 @@ int PriorBoxCPUKernel::Init() { return ReSize(); } -int PriorBoxCPUKernel::ReSize() { - return GeneratePriorBox(); -} +int PriorBoxCPUKernel::ReSize() { return GeneratePriorBox(); } int PriorBoxCPUKernel::GeneratePriorBox() { const int fmap_w = in_tensors_[0]->Width(); @@ -149,7 +147,7 @@ int PriorBoxCPUKernel::PriorBoxImpl(int task_id) { if (output == nullptr) { return RET_NULL_PTR; } - auto ret = PriorBox(src, reinterpret_cast(output->Data()), output_.size(), task_id, thread_count_); + auto ret = PriorBox(src, reinterpret_cast(output->MutableData()), output_.size(), task_id, thread_count_); return ret; } @@ -178,10 +176,9 @@ int PriorBoxCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuPriorBoxKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *op_parameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuPriorBoxKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h index 5f26ce00c0..2781374ea0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h @@ -27,8 +27,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class PriorBoxCPUKernel : public LiteKernel { public: - PriorBoxCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + PriorBoxCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { prior_box_param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc index a1ba123cf5..2f381d64ee 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc @@ -67,7 +67,7 @@ int QuantDTypeCastCPUKernel::Init() { int QuantDTypeCastCPUKernel::ReSize() { auto in_tensor = in_tensors_.front(); - num_unit_ = static_cast(in_tensor->DataSize()); + num_unit_ = static_cast(in_tensor->ElementsNum()); thread_n_num_ = MSMIN(thread_num_, num_unit_); thread_n_stride_ = UP_DIV(num_unit_, thread_n_num_); return RET_OK; @@ -112,11 +112,11 @@ int QuantDTypeCastCPUKernel::Run() { return prepare_ret; } if (inverse_) { - int8_ptr_ = reinterpret_cast(in_tensors_[0]->Data()); - float32_ptr_ = reinterpret_cast(out_tensors_[0]->Data()); + int8_ptr_ = reinterpret_cast(in_tensors_[0]->MutableData()); + float32_ptr_ = reinterpret_cast(out_tensors_[0]->MutableData()); } else { - float32_ptr_ = reinterpret_cast(in_tensors_[0]->Data()); - int8_ptr_ = reinterpret_cast(out_tensors_[0]->Data()); + float32_ptr_ = reinterpret_cast(in_tensors_[0]->MutableData()); + int8_ptr_ = reinterpret_cast(out_tensors_[0]->MutableData()); } auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, QuantDTypeCastRun, this, thread_n_num_); @@ -128,8 +128,8 @@ int QuantDTypeCastCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuQuantDTypeCastFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuQuantDTypeCastFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h index 73510c6825..703c9ee774 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class QuantDTypeCastCPUKernel : public LiteKernel { public: - QuantDTypeCastCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + QuantDTypeCastCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) {} ~QuantDTypeCastCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc index 9f80f402a5..4024f3afe3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc @@ -102,7 +102,7 @@ int ReduceBaseCPUKernel::Init() { if (in_tensors_.size() > 1) { auto axes_ptr = in_tensors_.at(1); num_axes_ = axes_ptr->ElementsNum(); - memcpy(axes_, axes_ptr->Data(), axes_ptr->Size()); + memcpy(axes_, axes_ptr->MutableData(), axes_ptr->Size()); } else { num_axes_ = reduce_param->num_axes_; memcpy(axes_, reduce_param->axes_, sizeof(reduce_param->axes_)); @@ -122,10 +122,9 @@ int ReduceBaseCPUKernel::Init() { int ReduceBaseCPUKernel::ReSize() { return CheckParameters(); } -kernel::LiteKernel *CpuReduceFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuReduceFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Reduce); @@ -152,10 +151,9 @@ kernel::LiteKernel *CpuReduceFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuMeanFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Mean); @@ -182,10 +180,9 @@ kernel::LiteKernel *CpuMeanFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuReduceInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Reduce); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.h b/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.h index cb84429144..9a60f9a250 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class ReduceBaseCPUKernel : public LiteKernel { public: - ReduceBaseCPUKernel(OpParameter *param, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ReduceBaseCPUKernel(OpParameter *param, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(param, inputs, outputs, ctx, primitive) {} virtual ~ReduceBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc index 8232f34149..637fc38100 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc @@ -30,10 +30,9 @@ using mindspore::schema::PrimitiveType_Reshape; namespace mindspore::kernel { int ReshapeBaseCPUKernel::Init() { return RET_OK; } -kernel::LiteKernel *CpuReshapeInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuReshapeInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -55,10 +54,9 @@ kernel::LiteKernel *CpuReshapeInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuReshapeInt32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -80,10 +78,9 @@ kernel::LiteKernel *CpuReshapeInt32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuReshapeFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h index f4b27c88e2..7ff2835521 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h @@ -26,8 +26,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class ReshapeBaseCPUKernel : public LiteKernel { public: - ReshapeBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ReshapeBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx) { reshape_param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc index 62a17607ef..1aec98e9d8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc @@ -105,10 +105,9 @@ int ResizeBaseCPUKernel::Init() { return RET_OK; } -kernel::LiteKernel *CpuResizeFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuResizeFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -131,10 +130,9 @@ kernel::LiteKernel *CpuResizeFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuResizeInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h index bf9662cb72..59b0afc24d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h @@ -26,8 +26,8 @@ using mindspore::schema::ResizeMethod; namespace mindspore::kernel { class ResizeBaseCPUKernel : public LiteKernel { public: - ResizeBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ResizeBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), context_(ctx) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc index 3422547dc7..77a77978e6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc @@ -61,10 +61,9 @@ int SliceBaseCPUKernel::ReSize() { return RET_OK; } -kernel::LiteKernel *CpuSliceInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuSliceInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -86,10 +85,9 @@ kernel::LiteKernel *CpuSliceInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuSliceFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.h b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.h index 14e9e51b66..51d585d723 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class SliceBaseCPUKernel : public LiteKernel { public: - SliceBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + SliceBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc index 87c00fe745..b06e26c73a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc @@ -56,10 +56,9 @@ int SoftmaxBaseCPUKernel::ReSize() { return RET_OK; } -kernel::LiteKernel *CpuSoftmaxInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuSoftmaxInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -81,10 +80,9 @@ kernel::LiteKernel *CpuSoftmaxInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuSoftmaxFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h index a8febaf783..0c7bbda0a2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class SoftmaxBaseCPUKernel : public LiteKernel { public: - SoftmaxBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + SoftmaxBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { softmax_param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc index 5c0ad059cf..7d4612dae4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc @@ -64,10 +64,9 @@ int SplitBaseCPUKernel::ReSize() { return RET_OK; } -kernel::LiteKernel *CpuSplitInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuSplitInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -89,10 +88,9 @@ kernel::LiteKernel *CpuSplitInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuSplitInt32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; @@ -114,10 +112,9 @@ kernel::LiteKernel *CpuSplitInt32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuSplitFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/split_base.h b/mindspore/lite/src/runtime/kernel/arm/base/split_base.h index e77e4fd0a1..45bcf96afc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/split_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/split_base.h @@ -26,8 +26,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class SplitBaseCPUKernel : public LiteKernel { public: - SplitBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + SplitBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { param = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.cc index ff1d65ebfc..513dc390e4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.cc @@ -29,10 +29,9 @@ using mindspore::schema::PrimitiveType_Squeeze; namespace mindspore::kernel { int SqueezeBaseCPUKernel::Init() { return RET_OK; } -kernel::LiteKernel *CpuSqueezeInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuSqueezeInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.h b/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.h index 27cf796265..6df6dc36f1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.h @@ -27,8 +27,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class SqueezeBaseCPUKernel : public LiteKernel { public: - SqueezeBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + SqueezeBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc index 7e1470e4c9..ef66899567 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc @@ -59,7 +59,8 @@ int StridedSliceCPUKernel::Run() { MS_ASSERT(input); MS_ASSERT(output); - ret = DoStridedSlice(input->Data(), output->Data(), reinterpret_cast(op_parameter_)); + ret = DoStridedSlice(input->MutableData(), output->MutableData(), + reinterpret_cast(op_parameter_)); if (ret != RET_OK) { MS_LOG(ERROR) << "StridedSlice error error_code[" << ret << "]"; return RET_ERROR; @@ -67,10 +68,9 @@ int StridedSliceCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuStridedSliceKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuStridedSliceKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_StridedSlice); if (opParameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.h b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.h index 9e3d8d693f..dc8349a5e3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class StridedSliceCPUKernel : public LiteKernel { public: - StridedSliceCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + StridedSliceCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~StridedSliceCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.cc index 46dc6dc63e..0786c17bda 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.cc @@ -38,7 +38,7 @@ int ActivationFp16CPUKernel::Init() { return RET_OK; } int ActivationFp16CPUKernel::ReSize() { return RET_OK; } int ActivationFp16CPUKernel::MallocTmpBuffer() { - fp16_input_ = ConvertInputFp32toFp16(in_tensors_.at(0), context_); + fp16_input_ = ConvertInputFp32toFp16(in_tensors_.at(0), context_); if (fp16_input_ == nullptr) { MS_LOG(ERROR) << "malloc data failed"; return RET_ERROR; @@ -124,16 +124,15 @@ int ActivationFp16CPUKernel::Run() { auto out_tensor = out_tensors_.at(0); if (out_tensor->data_type() == kNumberTypeFloat32) { - Float16ToFloat32(fp16_output_, reinterpret_cast(out_tensor->Data()), out_tensor->ElementsNum()); + Float16ToFloat32(fp16_output_, reinterpret_cast(out_tensor->MutableData()), out_tensor->ElementsNum()); } FreeTmpBuffer(); return RET_OK; } -kernel::LiteKernel *CpuActivationFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuActivationFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Activation); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.h index 8cdfe18ef4..2374126959 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class ActivationFp16CPUKernel : public LiteKernel { public: - ActivationFp16CPUKernel(OpParameter *param, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ActivationFp16CPUKernel(OpParameter *param, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(param, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { type_ = (reinterpret_cast(param))->type_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc index 34a13724a5..cfc696d7ba 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc @@ -30,6 +30,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Add; using mindspore::schema::PrimitiveType_Div; +using mindspore::schema::PrimitiveType_Eltwise; using mindspore::schema::PrimitiveType_Equal; using mindspore::schema::PrimitiveType_FloorDiv; using mindspore::schema::PrimitiveType_FloorMod; @@ -45,7 +46,6 @@ using mindspore::schema::PrimitiveType_Mul; using mindspore::schema::PrimitiveType_NotEqual; using mindspore::schema::PrimitiveType_SquaredDifference; using mindspore::schema::PrimitiveType_Sub; -using mindspore::schema::PrimitiveType_Eltwise; namespace mindspore::kernel { ARITHMETIC_FUNC_INFO_FP16 arithmetic_fun_table_fp16[] = { @@ -207,7 +207,8 @@ int ArithmeticFP16CPUKernel::Run() { MS_LOG(ERROR) << "ArithmeticsRunFp16 run error error_code[" << ret << "]"; } if (is_output_fp32_) { - Float16ToFloat32(output_fp16_, reinterpret_cast(output_tensor->Data()), output_tensor->ElementsNum()); + Float16ToFloat32(output_fp16_, reinterpret_cast(output_tensor->MutableData()), + output_tensor->ElementsNum()); } FreeTmpBuffer(); return ret; @@ -228,10 +229,9 @@ void ArithmeticFP16CPUKernel::FreeTmpBuffer() { } } -kernel::LiteKernel *CpuArithmeticFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuArithmeticFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr) { MS_LOG(ERROR) << "input parameter is null!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h index d3ec77e461..2eca1833a2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h @@ -35,8 +35,8 @@ typedef struct { class ArithmeticFP16CPUKernel : public LiteKernel { public: - ArithmeticFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ArithmeticFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param_ = reinterpret_cast(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.cc index c3735673d3..472b021481 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.cc @@ -36,10 +36,10 @@ int BatchnormFp16CPUKernel::InitConstTensor() { FreeMeanAndVariance(); return RET_ERROR; } - Float32ToFloat16(reinterpret_cast(mean_fp32->Data()), - reinterpret_cast(mean_), mean_fp32->ElementsNum()); - Float32ToFloat16(reinterpret_cast(variance_fp32->Data()), - reinterpret_cast(variance_), variance_fp32->ElementsNum()); + Float32ToFloat16(reinterpret_cast(mean_fp32->MutableData()), reinterpret_cast(mean_), + mean_fp32->ElementsNum()); + Float32ToFloat16(reinterpret_cast(variance_fp32->MutableData()), reinterpret_cast(variance_), + variance_fp32->ElementsNum()); } else { BatchnormCPUKernel::InitConstTensor(); } @@ -67,7 +67,7 @@ int BatchnormFp16CPUKernel::Run() { MS_LOG(ERROR) << "BatchnormRun error error_code[" << ret << "]"; } if (is_output_fp32_) { - Float16ToFloat32(output_, reinterpret_cast(output_tensor->Data()), output_tensor->ElementsNum()); + Float16ToFloat32(output_, reinterpret_cast(output_tensor->MutableData()), output_tensor->ElementsNum()); } FreeInputAndOutput(); return ret; @@ -90,10 +90,9 @@ void BatchnormFp16CPUKernel::FreeInputAndOutput() { } } -kernel::LiteKernel *CpuBatchnormFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuBatchnormFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) BatchnormFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.h index eeec184169..c4d586f3a0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class BatchnormFp16CPUKernel : public BatchnormCPUKernel { public: - BatchnormFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + BatchnormFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : BatchnormCPUKernel(parameter, inputs, outputs, ctx, primitive) {} virtual ~BatchnormFp16CPUKernel() {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc index da776d2cc9..43112f8eaa 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc @@ -65,14 +65,14 @@ int CastFp16CPUKernel::DoCast(int thread_id) { } auto offset = thread_id * stride_; - auto output_data = out_tensors_.at(0)->Data(); + auto output_data = out_tensors_.at(0)->MutableData(); switch (input->data_type()) { case kNumberTypeFloat32: - Float32ToFloat16(reinterpret_cast(input->Data()) + offset, + Float32ToFloat16(reinterpret_cast(input->MutableData()) + offset, reinterpret_cast(output_data) + offset, data_num); break; case kNumberTypeFloat16: - Float16ToFloat32(reinterpret_cast(input->Data()) + offset, + Float16ToFloat32(reinterpret_cast(input->MutableData()) + offset, reinterpret_cast(output_data) + offset, data_num); break; default: @@ -94,10 +94,9 @@ int CastFp16CPUKernel::Run() { return ParallelLaunch(THREAD_POOL_DEFAULT, CastRun, this, op_parameter_->thread_num_); } -kernel::LiteKernel *CpuCastFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuCastFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.h index f8a32d1965..3249aedebc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.h @@ -22,8 +22,8 @@ namespace mindspore::kernel { class CastFp16CPUKernel : public LiteKernel { public: - CastFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + CastFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/common_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/common_fp16.cc index c1e2ed56aa..6e733b77df 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/common_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/common_fp16.cc @@ -18,28 +18,28 @@ #include "nnacl/fp16/cast_fp16.h" namespace mindspore::kernel { -float16_t *ConvertInputFp32toFp16(lite::tensor::Tensor *input, const lite::Context *ctx) { +float16_t *ConvertInputFp32toFp16(lite::Tensor *input, const lite::Context *ctx) { float16_t *fp16_data = nullptr; auto data_type = input->data_type(); if (data_type == kNumberTypeFloat32) { auto ele_num = input->ElementsNum(); fp16_data = reinterpret_cast(ctx->allocator->Malloc(ele_num * sizeof(float16_t))); - auto ori_data = reinterpret_cast(input->Data()); + auto ori_data = reinterpret_cast(input->MutableData()); Float32ToFloat16(ori_data, fp16_data, ele_num); } else { - fp16_data = reinterpret_cast(input->Data()); + fp16_data = reinterpret_cast(input->MutableData()); } return fp16_data; } -float16_t *MallocOutputFp16(lite::tensor::Tensor *output, const lite::Context *ctx) { +float16_t *MallocOutputFp16(lite::Tensor *output, const lite::Context *ctx) { float16_t *fp16_data = nullptr; auto data_type = output->data_type(); if (data_type == kNumberTypeFloat32) { auto ele_num = output->ElementsNum(); fp16_data = reinterpret_cast(ctx->allocator->Malloc(ele_num * sizeof(float16_t))); } else { - fp16_data = reinterpret_cast(output->Data()); + fp16_data = reinterpret_cast(output->MutableData()); } return fp16_data; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/common_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/common_fp16.h index 9a177d0924..1d056f0517 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/common_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/common_fp16.h @@ -19,9 +19,9 @@ #include "src/lite_kernel.h" namespace mindspore::kernel { -float16_t *ConvertInputFp32toFp16(lite::tensor::Tensor *input, const lite::Context *ctx); +float16_t *ConvertInputFp32toFp16(lite::Tensor *input, const lite::Context *ctx); -float16_t *MallocOutputFp16(lite::tensor::Tensor *output, const lite::Context *ctx); +float16_t *MallocOutputFp16(lite::Tensor *output, const lite::Context *ctx); } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc index d4137c09b6..d288df203f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc @@ -111,14 +111,14 @@ int ConcatFp16CPUKernel::Run() { for (size_t i = 0; i < input_num; ++i) { const auto in_tensor = in_tensors_[i]; if (in_tensor->data_type() == kNumberTypeFloat || in_tensor->data_type() == kNumberTypeFloat32) { - auto in_tensor_data = reinterpret_cast(in_tensor->Data()); + auto in_tensor_data = reinterpret_cast(in_tensor->MutableData()); if (in_tensor_data == nullptr) { MS_LOG(ERROR) << "got nullptr when cast in_tensor to float ptr"; return RET_ERROR; } Float32ToFloat16(in_tensor_data, fp16_inputs_[i], in_tensor->ElementsNum()); } else { - fp16_inputs_[i] = reinterpret_cast(in_tensor->Data()); + fp16_inputs_[i] = reinterpret_cast(in_tensor->MutableData()); } shapes.push_back(in_tensors_[i]->shape()); @@ -126,9 +126,9 @@ int ConcatFp16CPUKernel::Run() { } auto output_shape = out_tensors_.at(0)->shape(); inputs_output_shape[input_num] = output_shape.data(); - auto output_addr = out_tensors_.at(0)->Data(); + auto output_addr = out_tensors_.at(0)->MutableData(); if (out_tensors_.at(0)->data_type() == kNumberTypeFloat16) { - fp16_output_ = reinterpret_cast(out_tensors_.at(0)->Data()); + fp16_output_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); } ConcatFp16(reinterpret_cast(fp16_inputs_.data()), input_num, axis_, inputs_output_shape.data(), @@ -141,10 +141,9 @@ int ConcatFp16CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuConcatFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuConcatFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h index 9af31e92b1..9b47ffc184 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h @@ -27,8 +27,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class ConcatFp16CPUKernel : public ConcatBaseCPUKernel { public: - ConcatFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ConcatFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConcatBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.cc index 888c2f8e1a..e78cc4c6c8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.cc @@ -96,9 +96,9 @@ int Convolution1x1FP16CPUKernel::InitWeightBias() { if (in_tensors_.size() == 3) { auto bias_tensor = in_tensors_.at(kBiasIndex); if (bias_tensor->data_type() == kNumberTypeFloat16) { - memcpy(bias_data_, bias_tensor->Data(), output_channel * sizeof(float16_t)); + memcpy(bias_data_, bias_tensor->MutableData(), output_channel * sizeof(float16_t)); } else { - Float32ToFloat16(reinterpret_cast(bias_tensor->Data()), reinterpret_cast(bias_data_), + Float32ToFloat16(reinterpret_cast(bias_tensor->MutableData()), reinterpret_cast(bias_data_), output_channel); } } @@ -110,7 +110,7 @@ int Convolution1x1FP16CPUKernel::InitWeightBias() { return RET_ERROR; } memset(weight_ptr_, 0, size); - ColMajor2Row8MajorFp16(weight_tensor->Data(), weight_ptr_, input_channel, output_channel, + ColMajor2Row8MajorFp16(weight_tensor->MutableData(), weight_ptr_, input_channel, output_channel, weight_tensor->data_type() == kNumberTypeFloat16); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h index 61133c0486..551dd9a609 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h @@ -28,8 +28,8 @@ namespace mindspore::kernel { class Convolution1x1FP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: - Convolution1x1FP16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + Convolution1x1FP16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~Convolution1x1FP16CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.cc index 99eef1760e..6ffa2e9fa0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.cc @@ -81,7 +81,7 @@ int Convolution3x3FP16CPUKernel::InitWeightBias() { memset(bias_data_, 0, new_bias_size); auto fp16_bias_data = reinterpret_cast(bias_data_); if (in_tensors_.size() == kInputSize2) { - auto ori_bias_addr = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + auto ori_bias_addr = reinterpret_cast(in_tensors_.at(kBiasIndex)->MutableData()); for (int i = 0; i < output_channel; ++i) { fp16_bias_data[i] = (float16_t)ori_bias_addr[i]; } @@ -133,7 +133,7 @@ int Convolution3x3FP16CPUKernel::InitTmpBuffer() { void Convolution3x3FP16CPUKernel::ConfigInputOutput() { auto input_tensor = in_tensors_.at(kInputIndex); auto input_format = input_tensor->GetFormat(); - schema::Format execute_format = schema::Format_NHWC4; + schema::Format execute_format = schema::Format::Format_NHWC4; convert_func_ = LayoutTransformFp16(input_format, execute_format); if (convert_func_ == nullptr) { MS_LOG(ERROR) << "layout convert func is nullptr."; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.h index 507c6ae206..613e2a5931 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class Convolution3x3FP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: - Convolution3x3FP16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + Convolution3x3FP16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~Convolution3x3FP16CPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.cc index fc264c1737..22f7143046 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.cc @@ -59,7 +59,7 @@ int ConvolutionBaseFP16CPUKernel::GetExecuteFilter() { MS_ASSERT(weight_data_type == kNumberTypeFloat32 || weight_data_type == kNumberTypeFloat16); if (weight_data_type == kNumberTypeFloat32) { - float *origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); + float *origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->MutableData()); size_t fp16_weight_size = input_channel * output_channel * kernel_h * kernel_w * sizeof(float16_t); fp16_weight_ = reinterpret_cast(malloc(fp16_weight_size)); if (fp16_weight_ == nullptr) { @@ -71,7 +71,7 @@ int ConvolutionBaseFP16CPUKernel::GetExecuteFilter() { } execute_weight_ = fp16_weight_; } else { - auto *origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); + auto *origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->MutableData()); execute_weight_ = origin_weight; fp16_weight_ = nullptr; } @@ -82,7 +82,7 @@ void ConvolutionBaseFP16CPUKernel::IfCastOutput() { if (out_data_type_ == kNumberTypeFloat32) { auto out_tensor = out_tensors_.at(kOutputIndex); auto out_ele_num = out_tensor->ElementsNum(); - auto output_addr = reinterpret_cast(out_tensor->Data()); + auto output_addr = reinterpret_cast(out_tensor->MutableData()); Float16ToFloat32(execute_output_, output_addr, out_ele_num); } } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.h index 5029c342c8..5d4c82bc18 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class ConvolutionBaseFP16CPUKernel : public ConvolutionBaseCPUKernel { public: - ConvolutionBaseFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ConvolutionBaseFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionBaseFP16CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc index fadaa906a5..4581daee20 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc @@ -62,7 +62,7 @@ int ConvolutionDepthwiseFp16CPUKernel::InitWeightBias() { auto bias_fp16 = reinterpret_cast(bias_data_); if (in_tensors_.size() == kInputSize2) { auto bias_tensor = in_tensors_.at(kBiasIndex); - auto ori_bias = reinterpret_cast(bias_tensor->Data()); + auto ori_bias = reinterpret_cast(bias_tensor->MutableData()); for (int i = 0; i < bias_tensor->ElementsNum(); i++) { bias_fp16[i] = (float16_t)ori_bias[i]; } @@ -136,10 +136,9 @@ int ConvolutionDepthwiseFp16CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h index ff0a3d0314..32687e7a53 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h @@ -34,8 +34,8 @@ void ConvDwFp16(float16_t *output_data, const float16_t *input_data, const float namespace mindspore::kernel { class ConvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: - ConvolutionDepthwiseFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ConvolutionDepthwiseFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionDepthwiseFp16CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc index 4e8aa956f8..8d3a3a2fa5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc @@ -65,7 +65,7 @@ int ConvolutionDepthwiseSWFp16CPUKernel::InitWeightBias() { // init weight: o, h, w, i; o == group, i == 1 auto weight_tensor = in_tensors_[kWeightIndex]; int OC8 = UP_DIV(weight_tensor->Batch(), C8NUM); - auto origin_weight = reinterpret_cast(weight_tensor->Data()); + auto origin_weight = reinterpret_cast(weight_tensor->MutableData()); int pack_weight_size = C8NUM * OC8 * weight_tensor->Height() * weight_tensor->Width(); packed_weight_ = reinterpret_cast(malloc(pack_weight_size * sizeof(float16_t))); @@ -85,7 +85,7 @@ int ConvolutionDepthwiseSWFp16CPUKernel::InitWeightBias() { auto bias_fp16 = reinterpret_cast(bias_data_); if (in_tensors_.size() == kInputSize2) { auto bias_tensor = in_tensors_.at(kBiasIndex); - auto ori_bias = reinterpret_cast(bias_tensor->Data()); + auto ori_bias = reinterpret_cast(bias_tensor->MutableData()); for (int i = 0; i < bias_tensor->ElementsNum(); i++) { bias_fp16[i] = (float16_t)ori_bias[i]; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h index 582fde8eec..9b94ad8742 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h @@ -35,8 +35,8 @@ void ConvDwC8Fp16(float16_t *output_data, const float16_t *input_data, const flo namespace mindspore::kernel { class ConvolutionDepthwiseSWFp16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: - ConvolutionDepthwiseSWFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ConvolutionDepthwiseSWFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionDepthwiseSWFp16CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc index 0b2ce749a2..c3bcff2847 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc @@ -73,7 +73,7 @@ int ConvolutionFP16CPUKernel::InitWeightBias() { memset(bias_data_, 0, oc8 * C8NUM * sizeof(float16_t)); auto fp16_bias_data = reinterpret_cast(bias_data_); if (in_tensors_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->MutableData()); for (int i = 0; i < out_channel; ++i) { fp16_bias_data[i] = (float16_t)ori_bias[i]; } @@ -123,7 +123,7 @@ int ConvolutionFP16CPUKernel::InitTmpBuffer() { void ConvolutionFP16CPUKernel::ConfigInputOutput() { auto input_tensor = in_tensors_.at(kInputIndex); auto input_format = input_tensor->GetFormat(); - schema::Format execute_format = schema::Format_NHWC4; + schema::Format execute_format = schema::Format::Format_NHWC4; convert_func_ = LayoutTransformFp16(input_format, execute_format); if (convert_func_ == nullptr) { MS_LOG(ERROR) << "layout convert func is nullptr."; @@ -212,10 +212,9 @@ int ConvolutionFP16CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Conv2D); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.h index 41c4aada5c..7c9a35ede7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class ConvolutionFP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: - ConvolutionFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ConvolutionFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionFP16CPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.cc index eb06a05c56..f8203a7951 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.cc @@ -91,7 +91,7 @@ int ConvolutionSWFP16CPUKernel::InitWeightBias() { memset(bias_data_, 0, oc4 * C4NUM * sizeof(float16_t)); auto fp16_bias_data = reinterpret_cast(bias_data_); if (in_tensors_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->MutableData()); for (int i = 0; i < out_channel; ++i) { fp16_bias_data[i] = (float16_t)ori_bias[i]; } @@ -117,7 +117,7 @@ int ConvolutionSWFP16CPUKernel::InitTmpBuffer() { void ConvolutionSWFP16CPUKernel::ConfigInputOutput() { auto input_tensor = in_tensors_.at(kInputIndex); auto input_format = input_tensor->GetFormat(); - schema::Format execute_format = schema::Format_NHWC4; + schema::Format execute_format = schema::Format::Format_NHWC4; convert_func_ = LayoutTransformFp16(input_format, execute_format); if (convert_func_ == nullptr) { MS_LOG(ERROR) << "layout convert func is nullptr."; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.h index 079c1fab10..d1ed0f6300 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class ConvolutionSWFP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: - ConvolutionSWFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ConvolutionSWFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionSWFP16CPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc index 6ec07a0b7f..a61063508c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc @@ -159,7 +159,7 @@ int ConvolutionWinogradFP16CPUKernel::InitWeightBias() { memset(bias_data_, 0, oc_block_num * oc_block * sizeof(float16_t)); auto fp16_bias_data = reinterpret_cast(bias_data_); if (in_tensors_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->MutableData()); for (int i = 0; i < out_channel; ++i) { fp16_bias_data[i] = (float16_t)ori_bias[i]; } @@ -260,7 +260,7 @@ int ConvolutionWinogradFP16CPUKernel::InitTmpBuffer() { int ConvolutionWinogradFP16CPUKernel::ConfigInputOutput() { auto output_tensor = out_tensors_.at(kOutputIndex); - output_tensor->SetFormat(schema::Format_NHWC); + output_tensor->SetFormat(schema::Format::Format_NHWC); // choose input transformer function (4x4 unit or 8x8 unit) input_trans_func_ = GetInputTransFuncFp16(input_unit_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h index f8a63de2d4..2440062b43 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h @@ -29,8 +29,8 @@ namespace mindspore::kernel { class ConvolutionWinogradFP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: - ConvolutionWinogradFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ConvolutionWinogradFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive, int out_unit) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive), output_unit_(out_unit) {} ~ConvolutionWinogradFP16CPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc index 8018f43f63..5b3f5f2936 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc @@ -75,7 +75,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::InitWeightBias() { // init weight: o, h, w, i; o == group, i == 1 auto weight_tensor = in_tensors_[kWeightIndex]; int OC8 = UP_DIV(weight_tensor->Batch(), C8NUM); - auto origin_weight = reinterpret_cast(weight_tensor->Data()); + auto origin_weight = reinterpret_cast(weight_tensor->MutableData()); int pack_weight_size = C8NUM * OC8 * weight_tensor->Height() * weight_tensor->Width(); packed_weight_ = reinterpret_cast(malloc(pack_weight_size * sizeof(float16_t))); @@ -94,7 +94,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::InitWeightBias() { memset(bias_data_, 0, C8NUM * OC8 * sizeof(float16_t)); if (in_tensors_.size() == kInputSize2) { auto bias_tensor = in_tensors_.at(kBiasIndex); - auto ori_bias = reinterpret_cast(bias_tensor->Data()); + auto ori_bias = reinterpret_cast(bias_tensor->MutableData()); for (int i = 0; i < bias_tensor->ElementsNum(); i++) { reinterpret_cast(bias_data_)[i] = (float16_t)ori_bias[i]; } @@ -195,10 +195,9 @@ int DeconvolutionDepthwiseFp16CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuDeconvDwFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuDeconvDwFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeDepthwiseConv2D); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h index 539d129664..aa8392c8cb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h @@ -36,8 +36,8 @@ void ComputeStrides(int *shape, int *strides, int ndim); namespace mindspore::kernel { class DeconvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: - DeconvolutionDepthwiseFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + DeconvolutionDepthwiseFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~DeconvolutionDepthwiseFp16CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc index 718ce95895..a4791925d8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc @@ -68,8 +68,8 @@ int DeConvolutionFp16CPUKernel::InitWeightBias() { } memset(bias_data_, 0, UP_ROUND(conv_param_->output_channel_, C4NUM) * sizeof(float16_t)); if (in_tensors_.size() == 3) { - Float32ToFloat16(reinterpret_cast(in_tensors_[2]->Data()), reinterpret_cast(bias_data_), - conv_param_->output_channel_); + Float32ToFloat16(reinterpret_cast(in_tensors_[2]->MutableData()), + reinterpret_cast(bias_data_), conv_param_->output_channel_); } size_t weight_pack_size = conv_param_->input_channel_ * conv_param_->kernel_w_ * conv_param_->kernel_h_ * @@ -80,7 +80,7 @@ int DeConvolutionFp16CPUKernel::InitWeightBias() { return RET_ERROR; } memset(execute_weight_, 0, weight_pack_size); - PackNHWCFp32ToC8HWN8Fp16(reinterpret_cast(in_tensors_[1]->Data()), execute_weight_, + PackNHWCFp32ToC8HWN8Fp16(reinterpret_cast(in_tensors_[1]->MutableData()), execute_weight_, conv_param_->input_channel_, kernel_plane_, conv_param_->output_channel_); return RET_OK; } @@ -204,10 +204,9 @@ int DeConvolutionFp16CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeConv2D); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.h index 68b125358b..130eaea936 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.h @@ -32,8 +32,8 @@ namespace mindspore::kernel { class DeConvolutionFp16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: - DeConvolutionFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + DeConvolutionFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) { matmul_param_ = new (std::nothrow) MatMulParameter(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.cc index 58f62550a3..689c46b8e0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.cc @@ -76,7 +76,7 @@ int FullconnectionFP16CPUKernel::ReSize() { } memset(b_pack_ptr_, 0, fc_param_->col_8_ * fc_param_->deep_ * sizeof(float16_t)); - InitMatrixB(reinterpret_cast(in_tensors_[1]->Data()), b_pack_ptr_); + InitMatrixB(reinterpret_cast(in_tensors_[1]->MutableData()), b_pack_ptr_); if (in_tensors_.size() == 3) { bias_ptr_ = reinterpret_cast(ctx_->allocator->Malloc(fc_param_->col_8_ * sizeof(float16_t))); if (bias_ptr_ == nullptr) { @@ -84,7 +84,7 @@ int FullconnectionFP16CPUKernel::ReSize() { return RET_MEMORY_FAILED; } memset(bias_ptr_, 0, fc_param_->col_8_ * sizeof(float16_t)); - Float32ToFloat16(reinterpret_cast(in_tensors_[2]->Data()), bias_ptr_, fc_param_->col_); + Float32ToFloat16(reinterpret_cast(in_tensors_[2]->MutableData()), bias_ptr_, fc_param_->col_); } if (out_tensors_[0]->data_type() == kNumberTypeFloat32) { @@ -147,24 +147,24 @@ int FullconnectionFP16CPUKernel::Run() { if (out_tensor->data_type() == kNumberTypeFloat32) { output_ptr_ = output_fp16_; } else { - output_ptr_ = reinterpret_cast(out_tensor->Data()); + output_ptr_ = reinterpret_cast(out_tensor->MutableData()); } if (in_tensors_[0]->data_type() == kNumberTypeFloat32) { - InitMatrixA(reinterpret_cast(in_tensors_[0]->Data()), a_pack_ptr_); + InitMatrixA(reinterpret_cast(in_tensors_[0]->MutableData()), a_pack_ptr_); } else { - InitMatrixA(reinterpret_cast(in_tensors_[0]->Data()), a_pack_ptr_); + InitMatrixA(reinterpret_cast(in_tensors_[0]->MutableData()), a_pack_ptr_); } ParallelLaunch(THREAD_POOL_DEFAULT, FcFP16Run, this, thread_count_); if (out_tensor->data_type() == kNumberTypeFloat32) { auto size = out_tensor->ElementsNum(); - auto out_tensor_data = reinterpret_cast(out_tensor->Data()); + auto out_tensor_data = reinterpret_cast(out_tensor->MutableData()); Float16ToFloat32(output_fp16_, out_tensor_data, size); } return RET_OK; } -kernel::LiteKernel *CpuFullConnectionFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuFullConnectionFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.h index 964a0b017a..a14017348e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.h @@ -28,8 +28,8 @@ namespace mindspore::kernel { class FullconnectionFP16CPUKernel : public FullconnectionBaseCPUKernel { public: - explicit FullconnectionFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit FullconnectionFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : FullconnectionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~FullconnectionFP16CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.cc index 4e097b8aa4..6ff5a25d48 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.cc @@ -40,8 +40,8 @@ int FusedBatchnormFp16CPUKernel::DoExecute(int task_id) { auto mean_fp16 = context_->allocator->Malloc(mean->ElementsNum() * sizeof(float16_t)); auto variance_fp16 = context_->allocator->Malloc(variance->ElementsNum() * sizeof(float16_t)); auto output_fp16 = context_->allocator->Malloc(output->ElementsNum() * sizeof(float16_t)); - if (input_fp16 == nullptr || scale_fp16 == nullptr || offset_fp16 == nullptr || - mean_fp16 == nullptr || variance_fp16 == nullptr || output_fp16 == nullptr) { + if (input_fp16 == nullptr || scale_fp16 == nullptr || offset_fp16 == nullptr || mean_fp16 == nullptr || + variance_fp16 == nullptr || output_fp16 == nullptr) { context_->allocator->Free(input_fp16); context_->allocator->Free(scale_fp16); context_->allocator->Free(offset_fp16); @@ -49,19 +49,18 @@ int FusedBatchnormFp16CPUKernel::DoExecute(int task_id) { context_->allocator->Free(variance_fp16); context_->allocator->Free(output_fp16); } - Float32ToFloat16(reinterpret_cast(input->Data()), - reinterpret_cast(input_fp16), input->ElementsNum()); - Float32ToFloat16(reinterpret_cast(scale->Data()), - reinterpret_cast(scale_fp16), scale->ElementsNum()); - Float32ToFloat16(reinterpret_cast(offset->Data()), - reinterpret_cast(offset_fp16), offset->ElementsNum()); - Float32ToFloat16(reinterpret_cast(mean->Data()), - reinterpret_cast(mean_fp16), mean->ElementsNum()); - Float32ToFloat16(reinterpret_cast(variance->Data()), - reinterpret_cast(variance_fp16), variance->ElementsNum()); + Float32ToFloat16(reinterpret_cast(input->MutableData()), reinterpret_cast(input_fp16), + input->ElementsNum()); + Float32ToFloat16(reinterpret_cast(scale->MutableData()), reinterpret_cast(scale_fp16), + scale->ElementsNum()); + Float32ToFloat16(reinterpret_cast(offset->MutableData()), reinterpret_cast(offset_fp16), + offset->ElementsNum()); + Float32ToFloat16(reinterpret_cast(mean->MutableData()), reinterpret_cast(mean_fp16), + mean->ElementsNum()); + Float32ToFloat16(reinterpret_cast(variance->MutableData()), reinterpret_cast(variance_fp16), + variance->ElementsNum()); - FusedBatchNormFp16(input_fp16, scale_fp16, offset_fp16, mean_fp16, variance_fp16, param, task_id, - output_fp16); + FusedBatchNormFp16(input_fp16, scale_fp16, offset_fp16, mean_fp16, variance_fp16, param, task_id, output_fp16); Float16ToFloat32(reinterpret_cast(output_fp16), reinterpret_cast(output), output->ElementsNum()); @@ -73,13 +72,13 @@ int FusedBatchnormFp16CPUKernel::DoExecute(int task_id) { context_->allocator->Free(output_fp16); return mindspore::lite::RET_OK; } - FusedBatchNormFp16(in_tensors_.at(0)->Data(), scale_, offset_, mean_, variance_, param, task_id, - out_tensors_.at(0)->Data()); + FusedBatchNormFp16(in_tensors_.at(0)->MutableData(), scale_, offset_, mean_, variance_, param, task_id, + out_tensors_.at(0)->MutableData()); return mindspore::lite::RET_OK; } -kernel::LiteKernel *CpuFusedBatchnormFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuFusedBatchnormFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.h index ab12c1eb12..27e14927fc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class FusedBatchnormFp16CPUKernel : public FusedBatchnormCPUKernel { public: - FusedBatchnormFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + FusedBatchnormFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : FusedBatchnormCPUKernel(parameter, inputs, outputs, ctx, primitive) {} virtual ~FusedBatchnormFp16CPUKernel() {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.cc index 32b05c44d9..7decc6ce5e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.cc @@ -20,19 +20,19 @@ namespace mindspore::kernel { LayoutConvertor LayoutTransformFp16(schema::Format src_format, schema::Format dst_format) { - if (src_format == schema::Format_NHWC && dst_format == schema::Format_NC4HW4) { + if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NC4HW4) { return PackNHWCToNC4HW4Fp16; - } else if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) { + } else if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NHWC4) { return PackNHWCToNHWC4Fp16; - } else if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC4) { + } else if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC4) { return PackNC4HW4ToNHWC4Fp16; - } else if (src_format == schema::Format_NCHW && dst_format == schema::Format_NC4HW4) { + } else if (src_format == schema::Format::Format_NCHW && dst_format == schema::Format::Format_NC4HW4) { return PackNCHWToNC4HW4Fp16; - } else if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC) { + } else if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC) { return PackNC4HW4ToNHWCFp16; } else { - MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(src_format) << " to " - << schema::EnumNameFormat(dst_format); + MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(src_format) << " to " + << EnumNameFormat(dst_format); return nullptr; } } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.cc index e1b4fcd7d8..c8cf94d8be 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.cc @@ -91,17 +91,17 @@ int MatmulFP16CPUKernel::ReSize() { } memset(b_pack_ptr_, 0, params_->batch * params_->col_8_ * params_->deep_ * sizeof(float16_t)); - params_->a_const_ = (in_tensors_[0]->Data() != nullptr); - params_->b_const_ = (in_tensors_[1]->Data() != nullptr); + params_->a_const_ = (in_tensors_[0]->MutableData() != nullptr); + params_->b_const_ = (in_tensors_[1]->MutableData() != nullptr); if (params_->a_const_ == true) { if (in_tensors_[0]->data_type() == kNumberTypeFloat32) { - InitMatrixA(reinterpret_cast(in_tensors_[0]->Data()), a_pack_ptr_); + InitMatrixA(reinterpret_cast(in_tensors_[0]->MutableData()), a_pack_ptr_); } else { - InitMatrixA(reinterpret_cast(in_tensors_[0]->Data()), a_pack_ptr_); + InitMatrixA(reinterpret_cast(in_tensors_[0]->MutableData()), a_pack_ptr_); } } if (params_->b_const_ == true) { - InitMatrixB(reinterpret_cast(in_tensors_[1]->Data()), b_pack_ptr_); + InitMatrixB(reinterpret_cast(in_tensors_[1]->MutableData()), b_pack_ptr_); } if (in_tensors_.size() == 3) { @@ -111,7 +111,7 @@ int MatmulFP16CPUKernel::ReSize() { return RET_MEMORY_FAILED; } memset(bias_ptr_, 0, params_->col_8_ * sizeof(float16_t)); - Float32ToFloat16(reinterpret_cast(in_tensors_[2]->Data()), bias_ptr_, params_->col_); + Float32ToFloat16(reinterpret_cast(in_tensors_[2]->MutableData()), bias_ptr_, params_->col_); } if (out_tensors_[0]->data_type() == kNumberTypeFloat32) { @@ -194,19 +194,19 @@ int MatmulFP16CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto b = reinterpret_cast(in_tensors_[1]->Data()); + auto b = reinterpret_cast(in_tensors_[1]->MutableData()); auto out_tensor = out_tensors_[0]; float16_t *c_ptr; if (out_tensor->data_type() == kNumberTypeFloat32) { c_ptr = output_ptr_; } else { - c_ptr = reinterpret_cast(out_tensor->Data()); + c_ptr = reinterpret_cast(out_tensor->MutableData()); } if (params_->a_const_ == false) { if (in_tensors_[0]->data_type() == kNumberTypeFloat32) { - InitMatrixA(reinterpret_cast(in_tensors_[0]->Data()), a_pack_ptr_); + InitMatrixA(reinterpret_cast(in_tensors_[0]->MutableData()), a_pack_ptr_); } else { - InitMatrixA(reinterpret_cast(in_tensors_[0]->Data()), a_pack_ptr_); + InitMatrixA(reinterpret_cast(in_tensors_[0]->MutableData()), a_pack_ptr_); } } if (params_->b_const_ == false) { @@ -220,16 +220,15 @@ int MatmulFP16CPUKernel::Run() { } if (out_tensor->data_type() == kNumberTypeFloat32) { auto size = out_tensor->ElementsNum(); - auto out_tensor_data = reinterpret_cast(out_tensor->Data()); + auto out_tensor_data = reinterpret_cast(out_tensor->MutableData()); Float16ToFloat32(output_ptr_, out_tensor_data, size); } return RET_OK; } -kernel::LiteKernel *CpuMatmulFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuMatmulFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) MatmulFP16CPUKernel(opParameter, inputs, outputs, ctx, primitive); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.h index 2600f5d6ef..a13dc93ae4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.h @@ -28,8 +28,8 @@ namespace mindspore::kernel { class MatmulFP16CPUKernel : public MatmulBaseCPUKernel { public: - explicit MatmulFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit MatmulFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : MatmulBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~MatmulFP16CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc index 80116acb65..7880722d0b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc @@ -99,17 +99,16 @@ int PoolingFp16CPUKernel::Run() { } if (out_data_type_ == kNumberTypeFloat32) { auto out_ele_num = out_tensor->ElementsNum(); - auto output_addr = reinterpret_cast(out_tensor->Data()); + auto output_addr = reinterpret_cast(out_tensor->MutableData()); Float16ToFloat32(fp16_output_, output_addr, out_ele_num); context_->allocator->Free(fp16_output_); } return RET_OK; } -kernel::LiteKernel *CpuPoolingFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuPoolingFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.h index adf2145571..d0cad4d1cb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class PoolingFp16CPUKernel : public PoolingBaseCPUKernel { public: - PoolingFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + PoolingFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : PoolingBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~PoolingFp16CPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc index 5b689b0595..cae5b1c415 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc @@ -57,9 +57,7 @@ int ReduceFp16CPUKernel::Init() { return ReSize(); } -int ReduceFp16CPUKernel::ReSize() { - return ReduceBaseCPUKernel::ReSize(); -} +int ReduceFp16CPUKernel::ReSize() { return ReduceBaseCPUKernel::ReSize(); } int ReduceFp16CPUKernel::CallReduceUnit(int task_id) { auto ret = reducer_(outer_size_, inner_size_, axis_size_, fp16_src_data_, tmp_shape_.data(), fp16_dst_data_, task_id, @@ -93,10 +91,10 @@ int ReduceFp16CPUKernel::Run() { tmp_shape_ = in_tensors_.at(0)->shape(); auto in_tensor = in_tensors_.at(0); if (in_tensor->data_type() == kNumberTypeFloat32 || in_tensor->data_type() == kNumberTypeFloat) { - auto input_data = reinterpret_cast(in_tensor->Data()); + auto input_data = reinterpret_cast(in_tensor->MutableData()); Float32ToFloat16(input_data, fp16_input_, in_tensor->ElementsNum()); } else { - fp16_input_ = reinterpret_cast(in_tensor->Data()); + fp16_input_ = reinterpret_cast(in_tensor->MutableData()); } fp16_src_data_ = fp16_input_; @@ -124,10 +122,10 @@ int ReduceFp16CPUKernel::Run() { auto out_tensor = out_tensors_.at(0); if (out_tensor->data_type() == kNumberTypeFloat32 || out_tensor->data_type() == kNumberTypeFloat) { - dst_data_ = reinterpret_cast(out_tensor->Data()); + dst_data_ = reinterpret_cast(out_tensor->MutableData()); Float16ToFloat32(fp16_dst_data_, dst_data_, out_tensor->ElementsNum()); } else { - memcpy(out_tensor->Data(), fp16_dst_data_, out_tensor->ElementsNum() * sizeof(float16_t)); + memcpy(out_tensor->MutableData(), fp16_dst_data_, out_tensor->ElementsNum() * sizeof(float16_t)); } FreeTmpBuffer(); @@ -183,10 +181,9 @@ int ReduceFp16CPUKernel::MallocTmpBuffer() { return RET_OK; } -kernel::LiteKernel *CpuReduceFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuReduceFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Reduce); @@ -213,10 +210,9 @@ kernel::LiteKernel *CpuReduceFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuMeanFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Mean); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h index f94102f5ae..cb076c9375 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h @@ -30,8 +30,8 @@ class ReduceFp16CPUKernel : public ReduceBaseCPUKernel { const int *src_shape, float16_t *dst_data, const int tid, const int thread_num); public: - ReduceFp16CPUKernel(OpParameter *param, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ReduceFp16CPUKernel(OpParameter *param, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ReduceBaseCPUKernel(param, inputs, outputs, ctx, primitive) {} ~ReduceFp16CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.cc index 140779d5ad..cd3288fdee 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.cc @@ -38,8 +38,8 @@ int ReshapeFp16CPUKernel::Run() { } auto in_tensor = in_tensors_.at(kInputIndex); auto out_tensor = out_tensors_.at(kOutputIndex); - auto input_ptr = in_tensor->Data(); - auto output_ptr = out_tensor->Data(); + auto input_ptr = in_tensor->MutableData(); + auto output_ptr = out_tensor->MutableData(); size_t data_size = out_tensor->Size(); auto in_datatype = in_tensor->data_type(); @@ -51,7 +51,7 @@ int ReshapeFp16CPUKernel::Run() { MS_LOG(ERROR) << "malloc in tensor fail!"; return mindspore::lite::RET_MEMORY_FAILED; } - Float32ToFloat16(reinterpret_cast(in_tensor->Data()), reinterpret_cast(input_ptr), + Float32ToFloat16(reinterpret_cast(in_tensor->MutableData()), reinterpret_cast(input_ptr), in_tensor->ElementsNum()); } else if ((in_datatype == kNumberTypeFloat16 && out_datatype == kNumberTypeFloat32)) { input_ptr = context_->allocator->Malloc(in_tensor->ElementsNum() * sizeof(float)); @@ -59,7 +59,7 @@ int ReshapeFp16CPUKernel::Run() { MS_LOG(ERROR) << "malloc in tensor fail!"; return mindspore::lite::RET_MEMORY_FAILED; } - Float16ToFloat32(reinterpret_cast(in_tensor->Data()), reinterpret_cast(input_ptr), + Float16ToFloat32(reinterpret_cast(in_tensor->MutableData()), reinterpret_cast(input_ptr), in_tensor->ElementsNum()); } else { MS_LOG(ERROR) << "unsupported data type, in_datatype: " << in_datatype << ",out_datatype: " << out_datatype; @@ -74,10 +74,9 @@ int ReshapeFp16CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuReshapeFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuReshapeFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.h index b501f554e8..50f50da85a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class ReshapeFp16CPUKernel : public ReshapeCPUKernel { public: - ReshapeFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ReshapeFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ReshapeCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ReshapeFp16CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc index edfe40b321..2bf28e91b3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc @@ -43,9 +43,7 @@ int SoftmaxFp16CPUKernel::Init() { return ReSize(); } -int SoftmaxFp16CPUKernel::ReSize() { - return SoftmaxBaseCPUKernel::ReSize(); -} +int SoftmaxFp16CPUKernel::ReSize() { return SoftmaxBaseCPUKernel::ReSize(); } int SoftmaxFp16CPUKernel::MallocTmpBuffer() { auto n_dim = softmax_param_->n_dim_; @@ -120,16 +118,15 @@ int SoftmaxFp16CPUKernel::Run() { SoftmaxFp16(input_fp16_, output_fp16_, sum_data_, softmax_param_); auto out_tensor = out_tensors_.at(kOutputIndex); if (out_tensor->data_type() == kNumberTypeFloat32) { - Float16ToFloat32(output_fp16_, reinterpret_cast(out_tensor->Data()), out_tensor->ElementsNum()); + Float16ToFloat32(output_fp16_, reinterpret_cast(out_tensor->MutableData()), out_tensor->ElementsNum()); } FreeTmpBuffer(); return RET_OK; } -kernel::LiteKernel *CpuSoftmaxFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuSoftmaxFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.h index 669a595c2d..2bd2c03fc2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class SoftmaxFp16CPUKernel : public SoftmaxBaseCPUKernel { public: - SoftmaxFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + SoftmaxFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : SoftmaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive), sum_data_(nullptr) {} ~SoftmaxFp16CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc index 3a4e9f41cc..53235b1465 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc @@ -83,18 +83,18 @@ int SplitFp16CPUKernel::Run() { if (in_tensor->data_type() == kNumberTypeFloat32) { input_ptr_ = reinterpret_cast(context_->allocator->Malloc(in_tensor->ElementsNum() * sizeof(float16_t))); - Float32ToFloat16(reinterpret_cast(in_tensor->Data()), input_ptr_, in_tensor->ElementsNum()); + Float32ToFloat16(reinterpret_cast(in_tensor->MutableData()), input_ptr_, in_tensor->ElementsNum()); } else { - input_ptr_ = reinterpret_cast(in_tensor->Data()); + input_ptr_ = reinterpret_cast(in_tensor->MutableData()); } for (int i = 0; i < param->num_split_; i++) { if (in_tensor->data_type() == kNumberTypeFloat32) { output_ptr_[i] = reinterpret_cast( context_->allocator->Malloc(out_tensors_.at(i)->ElementsNum() * sizeof(float16_t))); - Float32ToFloat16(reinterpret_cast(out_tensors_.at(i)->Data()), output_ptr_[i], + Float32ToFloat16(reinterpret_cast(out_tensors_.at(i)->MutableData()), output_ptr_[i], out_tensors_.at(i)->ElementsNum()); } else { - output_ptr_[i] = reinterpret_cast(out_tensors_.at(i)->Data()); + output_ptr_[i] = reinterpret_cast(out_tensors_.at(i)->MutableData()); } } ret = ParallelLaunch(THREAD_POOL_DEFAULT, SplitRun, this, thread_n_num_); @@ -115,10 +115,9 @@ int SplitFp16CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuSplitFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuSplitFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.h index 9be678f747..f1be314b88 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class SplitFp16CPUKernel : public SplitBaseCPUKernel { public: - SplitFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + SplitFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : SplitBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SplitFp16CPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc index eca0714e18..c3c2f9a60b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc @@ -150,13 +150,13 @@ int TransposeFp16CPUKernel::Run() { } if (in_tensor->data_type() == kNumberTypeFloat || in_tensor->data_type() == kNumberTypeFloat32) { - in_data_ = reinterpret_cast(in_tensor->Data()); + in_data_ = reinterpret_cast(in_tensor->MutableData()); Float32ToFloat16(in_data_, fp16_in_data_, in_tensor->ElementsNum()); } else { - fp16_in_data_ = reinterpret_cast(in_tensor->Data()); + fp16_in_data_ = reinterpret_cast(in_tensor->MutableData()); } if (out_tensor->data_type() == kNumberTypeFloat16) { - fp16_out_data_ = reinterpret_cast(out_tensor->Data()); + fp16_out_data_ = reinterpret_cast(out_tensor->MutableData()); } in_shape_ = const_cast(in_tensor->shape().data()); @@ -170,7 +170,7 @@ int TransposeFp16CPUKernel::Run() { } if (out_tensor->data_type() == kNumberTypeFloat || out_tensor->data_type() == kNumberTypeFloat32) { - out_data_ = reinterpret_cast(out_tensor->Data()); + out_data_ = reinterpret_cast(out_tensor->MutableData()); Float16ToFloat32(fp16_out_data_, out_data_, out_tensor->ElementsNum()); } FreeFp16Buffer(); @@ -178,10 +178,9 @@ int TransposeFp16CPUKernel::Run() { return ret; } -kernel::LiteKernel *CpuTransposeFp16KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuTransposeFp16KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Transpose); if (opParameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h index 8b819db606..36f4c58e9d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class TransposeFp16CPUKernel : public LiteKernel { public: - explicit TransposeFp16CPUKernel(OpParameter *param, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit TransposeFp16CPUKernel(OpParameter *param, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(param, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) {} ~TransposeFp16CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc index 3b61a0c7ca..98885f5ebe 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc @@ -36,8 +36,8 @@ int ActivationCPUKernel::Init() { return RET_OK; } int ActivationCPUKernel::ReSize() { return RET_OK; } int ActivationCPUKernel::DoActivation(int task_id) { - auto input_addr = reinterpret_cast(in_tensors_.at(0)->Data()); - auto output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto input_addr = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto output_addr = reinterpret_cast(out_tensors_.at(0)->MutableData()); auto length = in_tensors_.at(0)->ElementsNum(); int stride = UP_DIV(length, thread_count_); @@ -91,10 +91,9 @@ int ActivationCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuActivationFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuActivationFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Activation); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/activation.h b/mindspore/lite/src/runtime/kernel/arm/fp32/activation.h index d5e7a050f2..fb8692d212 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/activation.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/activation.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class ActivationCPUKernel : public LiteKernel { public: - ActivationCPUKernel(OpParameter *param, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ActivationCPUKernel(OpParameter *param, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(param, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { type_ = (reinterpret_cast(param))->type_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc index c5cb1b6d07..25d0d8bf4f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc @@ -61,13 +61,13 @@ int AddNCPUKernel::Run() { return ret; } elements_num_ = in_tensors_[0]->ElementsNum(); - auto input0_data = reinterpret_cast(in_tensors_[0]->Data()); - auto input1_data = reinterpret_cast(in_tensors_[1]->Data()); - auto output_data = reinterpret_cast(out_tensors_[0]->Data()); + auto input0_data = reinterpret_cast(in_tensors_[0]->MutableData()); + auto input1_data = reinterpret_cast(in_tensors_[1]->MutableData()); + auto output_data = reinterpret_cast(out_tensors_[0]->MutableData()); if (static_cast(elements_num_) < op_parameter_->thread_num_) { ElementAdd(input0_data, input1_data, output_data, elements_num_); for (size_t i = 2; i < in_tensors_.size(); ++i) { - ElementAdd(reinterpret_cast(in_tensors_[i]->Data()), output_data, output_data, elements_num_); + ElementAdd(reinterpret_cast(in_tensors_[i]->MutableData()), output_data, output_data, elements_num_); } return RET_OK; } @@ -80,7 +80,7 @@ int AddNCPUKernel::Run() { return RET_ERROR; } for (size_t i = 2; i < in_tensors_.size(); ++i) { - in1_addr_ = reinterpret_cast(in_tensors_[i]->Data()); + in1_addr_ = reinterpret_cast(in_tensors_[i]->MutableData()); in2_addr_ = output_data; ret = ParallelLaunch(THREAD_POOL_DEFAULT, AddNLaunch, this, op_parameter_->thread_num_); if (ret != RET_OK) { @@ -91,10 +91,9 @@ int AddNCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuAddNFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuAddNFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/addn.h b/mindspore/lite/src/runtime/kernel/arm/fp32/addn.h index 51b25d4934..091529553e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/addn.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/addn.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class AddNCPUKernel : public LiteKernel { public: - AddNCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + AddNCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~AddNCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.cc index 8a72522e1e..725e303635 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.cc @@ -42,9 +42,7 @@ int ArgMinMaxCPUKernel::Init() { return ReSize(); } -int ArgMinMaxCPUKernel::ReSize() { - return ArgMinMaxBaseCPUKernel::ReSize(); -} +int ArgMinMaxCPUKernel::ReSize() { return ArgMinMaxBaseCPUKernel::ReSize(); } int ArgMinMaxCPUKernel::Run() { auto ret = Prepare(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.h b/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.h index be2e4a16a1..c91ede1168 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.h @@ -22,8 +22,8 @@ namespace mindspore::kernel { class ArgMinMaxCPUKernel : public ArgMinMaxBaseCPUKernel { public: - ArgMinMaxCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ArgMinMaxCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ArgMinMaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc index 9b9ea75fb8..727a063c5c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc @@ -94,7 +94,7 @@ int ArithmeticCPUKernel::ReSize() { break; } break; - case PrimitiveType_Div: + case PrimitiveType_Div: switch (arithmeticParameter_->activation_type_) { case schema::ActivationType_RELU: arithmeticParameter_->broadcasting_ = false; @@ -118,7 +118,7 @@ int ArithmeticCPUKernel::ReSize() { } int ArithmeticCPUKernel::BroadcastRun(float *input0, float *input1, float *output, int dim, int out_count, - int out_thread_stride) { + int out_thread_stride) { if (dim > break_pos_) { return arithmetic_run_(input0 + out_thread_stride, input1 + out_thread_stride, output + out_thread_stride, out_count); @@ -128,8 +128,8 @@ int ArithmeticCPUKernel::BroadcastRun(float *input0, float *input1, float *outpu int pos1_ = arithmeticParameter_->in_shape1_[dim] == 1 ? 0 : i; int error_code = BroadcastRun(input0 + pos0_ * arithmeticParameter_->in_strides0_[dim], - input1 + pos1_ * arithmeticParameter_->in_strides1_[dim], - output + i * arithmeticParameter_->out_strides_[dim], dim + 1, out_count, out_thread_stride); + input1 + pos1_ * arithmeticParameter_->in_strides1_[dim], + output + i * arithmeticParameter_->out_strides_[dim], dim + 1, out_count, out_thread_stride); if (error_code != RET_OK) { return error_code; } @@ -138,9 +138,9 @@ int ArithmeticCPUKernel::BroadcastRun(float *input0, float *input1, float *outpu } int ArithmeticCPUKernel::DoArithmetic(int task_id) { - auto input0_data = reinterpret_cast(in_tensors_[0]->Data()); - auto input1_data1 = reinterpret_cast(in_tensors_[1]->Data()); - auto output_data = reinterpret_cast(out_tensors_[0]->Data()); + auto input0_data = reinterpret_cast(in_tensors_[0]->MutableData()); + auto input1_data1 = reinterpret_cast(in_tensors_[1]->MutableData()); + auto output_data = reinterpret_cast(out_tensors_[0]->MutableData()); auto element_num = out_tensors_[0]->ElementsNum(); MS_ASSERT(thread_count_ != 0); @@ -218,10 +218,9 @@ int ArithmeticCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuArithmeticFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuArithmeticFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(parameter != nullptr); auto kernel = new (std::nothrow) ArithmeticCPUKernel(parameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h index c55bf35bfa..f979653fb6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h @@ -47,8 +47,8 @@ class ArithmeticCPUKernel : public LiteKernel { ArithmeticParameter *param); public: - ArithmeticCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ArithmeticCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { arithmeticParameter_ = reinterpret_cast(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc index db76c4e4ee..a7e413e981 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc @@ -69,7 +69,7 @@ int ArithmeticSelfCPUKernel::DoArithmeticSelf(int task_id) { } return RET_OK; } -int RestoreMulWeight(lite::tensor::Tensor *input_tensor) { +int RestoreMulWeight(lite::Tensor *input_tensor) { MS_ASSERT(input_tensor != nullptr); if (input_tensor->data_type() != kNumberTypeUInt8) { MS_LOG(ERROR) << "full connect input type error" << input_tensor->data_type(); @@ -79,8 +79,8 @@ int RestoreMulWeight(lite::tensor::Tensor *input_tensor) { MS_LOG(ERROR) << "no quant param"; return RET_ERROR; } - const auto* quant_data = static_cast(input_tensor->Data()); - auto* dequant_data = static_cast(malloc(input_tensor->DataSize() * sizeof(float))); + const auto *quant_data = static_cast(input_tensor->MutableData()); + auto *dequant_data = static_cast(malloc(input_tensor->ElementsNum() * sizeof(float))); if (dequant_data == nullptr) { MS_LOG(ERROR) << "malloc faile"; return RET_ERROR; @@ -92,15 +92,15 @@ int RestoreMulWeight(lite::tensor::Tensor *input_tensor) { MS_LOG(ERROR) << "Quant param not equal channel num " << input_tensor->GetQuantParams().size() << channels; return RET_ERROR; } - size_t per_channel_size = input_tensor->DataSize() / channels; + size_t per_channel_size = input_tensor->ElementsNum() / channels; auto quant_param = input_tensor->GetQuantParams(); for (size_t i = 0; i < channels; i++) { auto param = quant_param.at(i); auto scale = param.scale; auto zero_point = param.zeroPoint; for (size_t j = 0; j < per_channel_size; j++) { - dequant_data[per_channel_size * i + j] = static_cast( - (quant_data[per_channel_size * i + j] - zero_point) * scale); + dequant_data[per_channel_size * i + j] = + static_cast((quant_data[per_channel_size * i + j] - zero_point) * scale); } } } else { @@ -108,7 +108,7 @@ int RestoreMulWeight(lite::tensor::Tensor *input_tensor) { auto param = quant_param.front(); auto scale = param.scale; auto zero_point = param.zeroPoint; - for (int64_t j = 0; j < input_tensor->DataSize(); j++) { + for (int64_t j = 0; j < input_tensor->ElementsNum(); j++) { dequant_data[j] = static_cast((quant_data[j] - zero_point) * scale); } } @@ -123,8 +123,8 @@ int ArithmeticSelfCPUKernel::Run() { } auto input_tensor = in_tensors_.at(0); auto out_tensor = out_tensors_.at(0); - in_ptr_ = reinterpret_cast(input_tensor->Data()); - out_ptr_ = reinterpret_cast(out_tensor->Data()); + in_ptr_ = reinterpret_cast(input_tensor->MutableData()); + out_ptr_ = reinterpret_cast(out_tensor->MutableData()); ret = ParallelLaunch(THREAD_POOL_DEFAULT, ArithmeticSelfRuns, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "ArithmeticSelfRun error error_code[" << ret << "]"; @@ -133,8 +133,8 @@ int ArithmeticSelfCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuArithmeticSelfFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuArithmeticSelfFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h index 02539c3e64..2483f4d7e3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h @@ -43,8 +43,8 @@ class ArithmeticSelfCPUKernel : public LiteKernel { typedef int (*ArithmeticSelfRun)(float *input, float *output, int element_size); public: - explicit ArithmeticSelfCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit ArithmeticSelfCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { switch (parameter->type_) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.cc index d41cfb1752..a3cf8eebe5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.cc @@ -45,8 +45,8 @@ int BatchToSpaceCPUKernel::Run() { } auto input = in_tensors_[0]; auto output = out_tensors_[0]; - const float *input_data = reinterpret_cast(input->Data()); - float *output_data = reinterpret_cast(output->Data()); + const float *input_data = reinterpret_cast(input->MutableData()); + float *output_data = reinterpret_cast(output->MutableData()); auto in_shape = input->shape(); auto out_shape = output->shape(); BatchToSpaceParameter *param = reinterpret_cast(this->op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.h b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.h index 938224ff5a..4853599c01 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.h @@ -21,8 +21,8 @@ namespace mindspore::kernel { class BatchToSpaceCPUKernel : public BatchToSpaceBaseCPUKernel { public: - BatchToSpaceCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + BatchToSpaceCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : BatchToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc index 050b868d63..0c375c0c47 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc @@ -64,8 +64,8 @@ int BatchnormCPUKernel::InitConstTensor() { FreeMeanAndVariance(); return RET_ERROR; } - memcpy(mean_, in_tensors_[1]->Data(), in_tensors_[1]->Size()); - memcpy(variance_, in_tensors_[2]->Data(), in_tensors_[2]->Size()); + memcpy(mean_, in_tensors_[1]->MutableData(), in_tensors_[1]->Size()); + memcpy(variance_, in_tensors_[2]->MutableData(), in_tensors_[2]->Size()); return RET_OK; } @@ -84,7 +84,7 @@ int BatchnormCPUKernel::Run() { int BatchnormCPUKernel::DoExecute(int task_id) { auto param = reinterpret_cast(op_parameter_); - BatchNormFp32(in_tensors_.at(0)->Data(), mean_, variance_, param, task_id, out_tensors_.at(0)->Data()); + BatchNormFp32(in_tensors_.at(0)->MutableData(), mean_, variance_, param, task_id, out_tensors_.at(0)->MutableData()); return mindspore::lite::RET_OK; } @@ -97,10 +97,9 @@ int BatchNormRun(void *cdata, int task_id) { return ret; } -kernel::LiteKernel *CpuBatchnormKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuBatchnormKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); auto *kernel = new (std::nothrow) BatchnormCPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.h b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.h index e759058618..13a9672f1e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.h @@ -29,8 +29,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class BatchnormCPUKernel : public LiteKernel { public: - BatchnormCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + BatchnormCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} virtual ~BatchnormCPUKernel() { FreeMeanAndVariance(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc index 9be5440b1e..bd5603bb9a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc @@ -47,9 +47,9 @@ int BiasCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto in = reinterpret_cast(in_tensors_.at(0)->Data()); - auto bias = reinterpret_cast(in_tensors_.at(1)->Data()); - auto out = reinterpret_cast(out_tensors_.at(0)->Data()); + auto in = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto bias = reinterpret_cast(in_tensors_.at(1)->MutableData()); + auto out = reinterpret_cast(out_tensors_.at(0)->MutableData()); size_t data_size = in_tensors_.at(0)->ElementsNum(); MS_ASSERT(context_->allocator != nullptr); float *tile_in = reinterpret_cast(context_->allocator->Malloc(data_size * sizeof(float))); @@ -74,8 +74,8 @@ int BiasCPUKernel::Init() { return ReSize(); } -kernel::LiteKernel *CpuBiasFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, OpParameter *parameter, +kernel::LiteKernel *CpuBiasFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(parameter != nullptr); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/bias.h b/mindspore/lite/src/runtime/kernel/arm/fp32/bias.h index 10dab30752..6da898229b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/bias.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/bias.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class BiasCPUKernel : public LiteKernel { public: - BiasCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + BiasCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { bias_param_ = reinterpret_cast(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc index afcac73026..32762561ec 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc @@ -54,14 +54,14 @@ int BroadcastToCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input_data = reinterpret_cast(in_tensors_.at(0)->Data()); - auto output_data = reinterpret_cast(out_tensors_.at(0)->Data()); + auto input_data = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto output_data = reinterpret_cast(out_tensors_.at(0)->MutableData()); return BroadcastTo(input_data, &shape_info_, output_data); } -kernel::LiteKernel *CpuBroadcastToFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuBroadcastToFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.h b/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.h index 3c80af451b..28a9cd0199 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class BroadcastToCPUKernel : public LiteKernel { public: - BroadcastToCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + BroadcastToCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~BroadcastToCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc index 639986f337..ac45e4a306 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc @@ -66,15 +66,15 @@ int CastCPUKernel::DoCast(int thread_id) { auto offset = thread_id * stride_; auto output = out_tensors_.at(0); - auto output_data = output->Data(); + auto output_data = output->MutableData(); auto input_data_type = input->data_type(); auto output_data_type = output->data_type(); if (output_data_type != kNumberTypeFloat32) { if (input_data_type == kNumberTypeFloat32 && output_data_type == kNumberTypeInt32) { - Float32ToInt32(reinterpret_cast(input->Data()) + offset, + Float32ToInt32(reinterpret_cast(input->MutableData()) + offset, reinterpret_cast(output_data) + offset, data_num); } else if (input_data_type == kNumberTypeFloat32 && output_data_type == kNumberTypeFloat16) { - Float32ToFp16(reinterpret_cast(input->Data()) + offset, + Float32ToFp16(reinterpret_cast(input->MutableData()) + offset, reinterpret_cast(output_data) + offset, data_num); } else { MS_LOG(ERROR) << "Unsupported datatype from " << input_data_type << " to " << output_data_type; @@ -83,15 +83,15 @@ int CastCPUKernel::DoCast(int thread_id) { } else { switch (input_data_type) { case kNumberTypeUInt8: - Uint8ToFloat32(reinterpret_cast(input->Data()) + offset, + Uint8ToFloat32(reinterpret_cast(input->MutableData()) + offset, reinterpret_cast(output_data) + offset, data_num); break; case kNumberTypeInt32: - Int32ToFloat32(reinterpret_cast(input->Data()) + offset, + Int32ToFloat32(reinterpret_cast(input->MutableData()) + offset, reinterpret_cast(output_data) + offset, data_num); break; case kNumberTypeFloat16: - Fp16ToFloat32(reinterpret_cast(input->Data()) + offset, + Fp16ToFloat32(reinterpret_cast(input->MutableData()) + offset, reinterpret_cast(output_data) + offset, data_num); break; default: @@ -114,10 +114,9 @@ int CastCPUKernel::Run() { return ParallelLaunch(THREAD_POOL_DEFAULT, CastRun, this, op_parameter_->thread_num_); } -kernel::LiteKernel *CpuCastFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuCastFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/cast.h b/mindspore/lite/src/runtime/kernel/arm/fp32/cast.h index ef5347420e..ed327df714 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/cast.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/cast.h @@ -22,8 +22,8 @@ namespace mindspore::kernel { class CastCPUKernel : public LiteKernel { public: - CastCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + CastCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/concat.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/concat.cc index ad42fd95f5..13d5c3cc56 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/concat.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/concat.cc @@ -54,13 +54,13 @@ int ConcatCPUKernel::Run() { std::vector> shapes; for (size_t i = 0; i < input_num; ++i) { - inputs_addr[i] = in_tensors_[i]->Data(); + inputs_addr[i] = in_tensors_[i]->MutableData(); shapes.push_back(in_tensors_[i]->shape()); inputs_output_shape[i] = shapes[i].data(); } auto output_shape = out_tensors_.at(0)->shape(); inputs_output_shape[input_num] = output_shape.data(); - auto output_addr = out_tensors_.at(0)->Data(); + auto output_addr = out_tensors_.at(0)->MutableData(); Concat(reinterpret_cast(inputs_addr.data()), input_num, axis_, inputs_output_shape.data(), output_shape.size(), output_addr); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/concat.h b/mindspore/lite/src/runtime/kernel/arm/fp32/concat.h index b1ddaa333d..ec8310e9d7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/concat.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/concat.h @@ -27,8 +27,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class ConcatCPUKernel : public ConcatBaseCPUKernel { public: - ConcatCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ConcatCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConcatBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc index b3330d9479..6d5d5f9a73 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc @@ -61,7 +61,7 @@ int ConstantOfShapeCPUKernel::Run() { int thread_num = MSMIN(param_->op_parameter_.thread_num_, param_->element_sz_); param_->unit_ = UP_DIV(param_->element_sz_, thread_num); param_->op_parameter_.thread_num_ = thread_num; - out_ptr_ = reinterpret_cast(out_tensors_.front()->Data()); + out_ptr_ = reinterpret_cast(out_tensors_.front()->MutableData()); auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, ConstantOfShapeRun, this, thread_num); if (ret != RET_OK) { MS_LOG(ERROR) << "ConstantOfShapeRun error error_code[" << ret << "]"; @@ -70,8 +70,8 @@ int ConstantOfShapeCPUKernel::Run() { return ret; } -kernel::LiteKernel *CpuConstantOfShapeFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuConstantOfShapeFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.h b/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.h index 3ef626e54a..51e932afcd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.h @@ -26,8 +26,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class ConstantOfShapeCPUKernel : public LiteKernel { public: - ConstantOfShapeCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ConstantOfShapeCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param_ = reinterpret_cast(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc index a9c4705619..04cae95820 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc @@ -49,7 +49,7 @@ int ConvolutionCPUKernel::InitWeightBias() { oc_block_num = UP_DIV(out_channel, C8NUM); int pack_weight_size = oc_block_num * oc_block * ic4 * C4NUM * kernel_plane; - auto origin_weight = reinterpret_cast(filter_tensor->Data()); + auto origin_weight = reinterpret_cast(filter_tensor->MutableData()); packed_weight_ = reinterpret_cast(malloc(pack_weight_size * sizeof(float))); if (packed_weight_ == nullptr) { MS_LOG(ERROR) << "malloc packed weight failed."; @@ -66,7 +66,7 @@ int ConvolutionCPUKernel::InitWeightBias() { memset(bias_data_, 0, oc_block_num * oc_block * sizeof(float)); if (in_tensors_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->MutableData()); memcpy(bias_data_, ori_bias, out_channel * sizeof(float)); } else { MS_ASSERT(in_tensors_.size() == kInputSize1); @@ -81,6 +81,7 @@ int ConvolutionCPUKernel::InitTmpBuffer() { int ic4 = UP_DIV(conv_param_->input_channel_, C4NUM); size_t nhwc4_input_size = ic4 * C4NUM * conv_param_->input_batch_ * conv_param_->input_h_ * conv_param_->input_w_ * sizeof(float); + MS_ASSERT(nullptr != ctx_->allocator); nhwc4_input_ = ctx_->allocator->Malloc(nhwc4_input_size); if (nhwc4_input_ == nullptr) { MS_LOG(ERROR) << "malloc nhwc4 input failed."; @@ -110,7 +111,7 @@ int ConvolutionCPUKernel::InitTmpBuffer() { void ConvolutionCPUKernel::ConfigInputOutput() { // set output format auto output_tensor = out_tensors_.at(kOutputIndex); - output_tensor->SetFormat(schema::Format_NHWC); + output_tensor->SetFormat(schema::Format::Format_NHWC); // #ifdef ENABLE_ARM32 // gemm_func_ = IndirectGemmFp32_8x4; @@ -152,7 +153,7 @@ int ConvolutionCPUKernel::RunImpl(int task_id) { MS_LOG(ERROR) << "gemm_func is nullptr."; return RET_ERROR; } - auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); ConvFp32(reinterpret_cast(nhwc4_input_), packed_input_, packed_weight_, reinterpret_cast(bias_data_), tmp_output_block_, output_addr, task_id, conv_param_, gemm_func_); return RET_OK; @@ -182,7 +183,7 @@ int ConvolutionCPUKernel::Run() { } auto input_tensor = in_tensors_.at(kInputIndex); - auto ori_input_data = input_tensor->Data(); + auto ori_input_data = input_tensor->MutableData(); PackNHWCToNHWC4Fp32(ori_input_data, nhwc4_input_, conv_param_->input_batch_, conv_param_->input_h_ * conv_param_->input_w_, conv_param_->input_channel_); @@ -209,10 +210,9 @@ bool CheckIfUseSlideWindow(ConvParameter *conv_param) { return false; } -kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *op_parameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(op_parameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Conv2D); @@ -235,7 +235,7 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vectorData(); + auto *restore_data = weight_tensor->MutableData(); if (primitive->GetQuantType() == schema::QuantType_WeightQuant) { ConvolutionBaseCPUKernel::RestoreFilter(inputs.at(kWeightIndex)); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.h index 58af23b2c6..7d7ee30ecc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class ConvolutionCPUKernel : public ConvolutionBaseCPUKernel { public: - ConvolutionCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ConvolutionCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionCPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.cc index 66accb1684..49b25edc8f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.cc @@ -78,7 +78,7 @@ int Convolution1x1CPUKernel::InitConv1x1BiasWeight() { } memset(bias_data_, 0, size); if (in_tensors_.size() == 3) { - memcpy(bias_data_, in_tensors_[kBiasIndex]->Data(), output_channel * sizeof(float)); + memcpy(bias_data_, in_tensors_[kBiasIndex]->MutableData(), output_channel * sizeof(float)); } size = input_channel * UP_ROUND(output_channel, C8NUM) * sizeof(float); @@ -88,7 +88,8 @@ int Convolution1x1CPUKernel::InitConv1x1BiasWeight() { return RET_ERROR; } memset(weight_ptr_, 0, size); - RowMajor2Col8Major(reinterpret_cast(filter_tensor->Data()), weight_ptr_, output_channel, input_channel); + RowMajor2Col8Major(reinterpret_cast(filter_tensor->MutableData()), weight_ptr_, output_channel, + input_channel); return RET_OK; } @@ -165,8 +166,8 @@ int Convolution1x1CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto src_in = reinterpret_cast(in_tensors_[0]->Data()); - auto src_out = reinterpret_cast(out_tensors_[0]->Data()); + auto src_in = reinterpret_cast(in_tensors_[0]->MutableData()); + auto src_out = reinterpret_cast(out_tensors_[0]->MutableData()); pack_input_ = reinterpret_cast(ctx_->allocator->Malloc(matmul_param_->row_12_ * matmul_param_->deep_ * sizeof(float))); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.h index bd7b0e589a..dc04394628 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.h @@ -33,8 +33,8 @@ namespace mindspore::kernel { class Convolution1x1CPUKernel : public ConvolutionBaseCPUKernel { public: - Convolution1x1CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + Convolution1x1CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { matmul_param_ = new (std::nothrow) MatMulParameter(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc index 5d456fe501..2b612a5ab7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc @@ -68,7 +68,7 @@ int Convolution3x3CPUKernel::InitWeightBias() { return RET_ERROR; } memset(transformed_filter_addr_, 0, transformed_size); - auto weight_data = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); + auto weight_data = reinterpret_cast(in_tensors_.at(kWeightIndex)->MutableData()); ProcessFilter(weight_data, transformed_filter_addr_, conv_param_, oc_block, oc_block_num); // init bias @@ -80,7 +80,7 @@ int Convolution3x3CPUKernel::InitWeightBias() { } memset(bias_data_, 0, new_bias_size); if (in_tensors_.size() == kInputSize2) { - auto ori_bias_addr = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + auto ori_bias_addr = reinterpret_cast(in_tensors_.at(kBiasIndex)->MutableData()); memcpy(bias_data_, ori_bias_addr, output_channel * sizeof(float)); } else { MS_ASSERT(in_tensors_.size() == kInputSize1); @@ -141,7 +141,7 @@ int Convolution3x3CPUKernel::InitTmpBuffer() { void Convolution3x3CPUKernel::ConfigInputOutput() { auto output_tensor = out_tensors_.at(kOutputIndex); - output_tensor->SetFormat(schema::Format_NHWC); + output_tensor->SetFormat(schema::Format::Format_NHWC); // #ifdef ENABLE_ARM32 // gemm_func_ = IndirectGemmFp32_8x4; // #else @@ -214,7 +214,7 @@ int Convolution3x3Impl(void *cdata, int task_id) { } int Convolution3x3CPUKernel::PostProcess() { - auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); auto act_type = conv_param_->act_type_; switch (act_type) { case ActType_No: @@ -249,7 +249,7 @@ int Convolution3x3CPUKernel::Run() { return RET_ERROR; } auto input_tensor = in_tensors_.at(kInputIndex); - auto ori_input_data = input_tensor->Data(); + auto ori_input_data = input_tensor->MutableData(); PackNHWCToNHWC4Fp32(ori_input_data, nhwc4_input_, conv_param_->input_batch_, conv_param_->input_h_ * conv_param_->input_w_, conv_param_->input_channel_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.h index 53003839ec..07ac6dce03 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class Convolution3x3CPUKernel : public ConvolutionBaseCPUKernel { public: - Convolution3x3CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + Convolution3x3CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~Convolution3x3CPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc index 7a09430880..e9ac09a5b1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc @@ -39,7 +39,7 @@ ConvolutionDepthwiseCPUKernel::~ConvolutionDepthwiseCPUKernel() { int ConvolutionDepthwiseCPUKernel::InitWeightBias() { // init weight: k, h, w, c; k == group == output_channel, c == 1 auto weight_tensor = in_tensors_[kWeightIndex]; - auto origin_weight = reinterpret_cast(weight_tensor->Data()); + auto origin_weight = reinterpret_cast(weight_tensor->MutableData()); int channel = weight_tensor->Batch(); int pack_weight_size = weight_tensor->Batch() * weight_tensor->Height() * weight_tensor->Width(); @@ -59,7 +59,7 @@ int ConvolutionDepthwiseCPUKernel::InitWeightBias() { memset(bias_data_, 0, channel * sizeof(float)); if (in_tensors_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(bias_tensor->Data()); + auto ori_bias = reinterpret_cast(bias_tensor->MutableData()); memcpy(bias_data_, ori_bias, bias_tensor->ElementsNum() * sizeof(float)); } @@ -111,10 +111,10 @@ int ConvolutionDepthwiseCPUKernel::Run() { } auto input_tensor = in_tensors_.at(kInputIndex); - input_ptr_ = reinterpret_cast(input_tensor->Data()); + input_ptr_ = reinterpret_cast(input_tensor->MutableData()); auto output_tensor = out_tensors_.at(kOutputIndex); - output_ptr_ = reinterpret_cast(output_tensor->Data()); + output_ptr_ = reinterpret_cast(output_tensor->MutableData()); ret = ParallelLaunch(THREAD_POOL_DEFAULT, ConvDwRun, this, conv_param_->thread_num_); if (ret != RET_OK) { @@ -124,16 +124,15 @@ int ConvolutionDepthwiseCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D); auto *weight_tensor = inputs.at(kWeightIndex); - auto *restore_data = weight_tensor->Data(); + auto *restore_data = weight_tensor->MutableData(); if (primitive->GetQuantType() == schema::QuantType_WeightQuant) { ConvolutionBaseCPUKernel::RestoreFilter(inputs.at(kWeightIndex)); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.h index 890fac43d6..8139c995dd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class ConvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel { public: - ConvolutionDepthwiseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ConvolutionDepthwiseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionDepthwiseCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow.cc index 10ed18bb03..1df3711891 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow.cc @@ -41,7 +41,7 @@ ConvolutionDepthwiseSWCPUKernel::~ConvolutionDepthwiseSWCPUKernel() { int ConvolutionDepthwiseSWCPUKernel::InitWeightBias() { // init weight: o, h, w, i; o == group, i == 1 auto weight_tensor = in_tensors_[kWeightIndex]; - auto origin_weight = reinterpret_cast(weight_tensor->Data()); + auto origin_weight = reinterpret_cast(weight_tensor->MutableData()); int OC4 = UP_DIV(weight_tensor->Batch(), C4NUM); int pack_weight_size = C4NUM * OC4 * weight_tensor->Height() * weight_tensor->Width(); @@ -62,7 +62,7 @@ int ConvolutionDepthwiseSWCPUKernel::InitWeightBias() { memset(bias_data_, 0, C4NUM * OC4 * sizeof(float)); if (in_tensors_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(bias_tensor->Data()); + auto ori_bias = reinterpret_cast(bias_tensor->MutableData()); memcpy(bias_data_, ori_bias, bias_tensor->ElementsNum() * sizeof(float)); } @@ -151,7 +151,7 @@ int ConvolutionDepthwiseSWCPUKernel::Run() { return RET_ERROR; } auto input_tensor = in_tensors_.at(kInputIndex); - auto input_ptr = reinterpret_cast(input_tensor->Data()); + auto input_ptr = reinterpret_cast(input_tensor->MutableData()); if (need_align_) { PackNHWCToNHWC4Fp32(input_ptr, packed_input_, conv_param_->input_batch_, @@ -161,7 +161,7 @@ int ConvolutionDepthwiseSWCPUKernel::Run() { } auto output_tensor = out_tensors_.at(kOutputIndex); - auto output_ptr = reinterpret_cast(output_tensor->Data()); + auto output_ptr = reinterpret_cast(output_tensor->MutableData()); if (!need_align_) { packed_output_ = output_ptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow.h index 58e236efe8..d1e66efd30 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class ConvolutionDepthwiseSWCPUKernel : public ConvolutionBaseCPUKernel { public: - ConvolutionDepthwiseSWCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ConvolutionDepthwiseSWCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionDepthwiseSWCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc index 38a86cd63a..0e037c4b6c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc @@ -43,7 +43,7 @@ int ConvolutionSWCPUKernel::InitWeightBias() { int oc_block_num = UP_DIV(output_channel, C4NUM); int pack_weight_size = oc_block_num * oc_block * ic4 * C4NUM * kernel_plane; - auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); + auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->MutableData()); packed_weight_ = reinterpret_cast(malloc(pack_weight_size * sizeof(float))); if (packed_weight_ == nullptr) { MS_LOG(ERROR) << "malloc packed weight failed."; @@ -67,7 +67,7 @@ int ConvolutionSWCPUKernel::InitWeightBias() { } memset(bias_data_, 0, oc_block_num * oc_block * sizeof(float)); if (in_tensors_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->MutableData()); memcpy(bias_data_, ori_bias, output_channel * sizeof(float)); } else { MS_ASSERT(in_tensors_.size() == kInputSize1); @@ -92,7 +92,7 @@ int ConvolutionSWCPUKernel::InitTmpBuffer() { void ConvolutionSWCPUKernel::ConfigInputOutput() { // set output format auto output_tensor = out_tensors_.at(kOutputIndex); - output_tensor->SetFormat(schema::Format_NHWC); + output_tensor->SetFormat(schema::Format::Format_NHWC); } int ConvolutionSWCPUKernel::Init() { @@ -153,7 +153,7 @@ int ConvolutionSWCPUKernel::ReSize() { } int ConvolutionSWCPUKernel::RunImpl(int task_id) { - auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); ConvSWFp32(reinterpret_cast(nhwc4_input_), packed_weight_, reinterpret_cast(bias_data_), tmp_output_block_, output_addr, task_id, conv_param_, slidingWindow_param_); return RET_OK; @@ -183,7 +183,7 @@ int ConvolutionSWCPUKernel::Run() { return RET_ERROR; } auto input_tensor = in_tensors_.at(kInputIndex); - auto ori_input_data = input_tensor->Data(); + auto ori_input_data = input_tensor->MutableData(); PackNHWCToNHWC4Fp32(ori_input_data, nhwc4_input_, conv_param_->input_batch_, conv_param_->input_h_ * conv_param_->input_w_, conv_param_->input_channel_); @@ -195,7 +195,7 @@ int ConvolutionSWCPUKernel::Run() { } auto out_tensor = out_tensors_.front(); - auto out_data = reinterpret_cast(out_tensor->Data()); + auto out_data = reinterpret_cast(out_tensor->MutableData()); int oc4_res = conv_param_->output_channel_ % C4NUM; if (oc4_res != 0) { PackNHWC4ToNHWCFp32(tmp_output_block_, out_data, conv_param_->output_batch_, diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.h index 40fa9eb285..d2421f224d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.h @@ -27,8 +27,8 @@ namespace mindspore::kernel { class ConvolutionSWCPUKernel : public ConvolutionBaseCPUKernel { public: - ConvolutionSWCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ConvolutionSWCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc index e9ea6d40d1..5117085011 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc @@ -124,7 +124,7 @@ int ConvolutionWinogradCPUKernel::InitWeightBias() { MS_LOG(ERROR) << "Malloc filter matrix failed."; return RET_ERROR; } - auto weight_data = reinterpret_cast(filter_tensor->Data()); + auto weight_data = reinterpret_cast(filter_tensor->MutableData()); ret = WinogradFilterTransform(weight_data, trans_weight_, kernel_unit_, input_unit_, conv_param_, oc_block); if (ret != RET_OK) { MS_LOG(ERROR) << "winograd filter transfrom failed."; @@ -136,7 +136,7 @@ int ConvolutionWinogradCPUKernel::InitWeightBias() { bias_data_ = reinterpret_cast(malloc(new_bias_size)); memset(bias_data_, 0, new_bias_size); if (in_tensors_.size() == kInputSize2) { - auto ori_bias_addr = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + auto ori_bias_addr = reinterpret_cast(in_tensors_.at(kBiasIndex)->MutableData()); memcpy(bias_data_, ori_bias_addr, out_channel * sizeof(float)); } else { MS_ASSERT(in_tensors_.size() == kInputSize1); @@ -243,7 +243,7 @@ int ConvolutionWinogradCPUKernel::InitTmpBuffer() { int ConvolutionWinogradCPUKernel::ConfigInputOutput() { auto output_tensor = out_tensors_.at(kOutputIndex); - output_tensor->SetFormat(schema::Format_NHWC); + output_tensor->SetFormat(schema::Format::Format_NHWC); // choose input transformer function (4x4 unit or 8x8 unit) input_trans_func_ = GetInputTransFunc(input_unit_); @@ -344,7 +344,7 @@ int ConvolutionWinogradImpl(void *cdata, int task_id) { int ConvolutionWinogradCPUKernel::PostProcess() { auto out_tensor = out_tensors_.front(); - auto out_data = reinterpret_cast(out_tensor->Data()); + auto out_data = reinterpret_cast(out_tensor->MutableData()); auto act_type = conv_param_->act_type_; switch (act_type) { case ActType_No: @@ -380,7 +380,7 @@ int ConvolutionWinogradCPUKernel::Run() { } auto input_tensor = in_tensors_.at(kInputIndex); - auto ori_input_data = input_tensor->Data(); + auto ori_input_data = input_tensor->MutableData(); PackNHWCToNHWC4Fp32(ori_input_data, nhwc4_input_, conv_param_->input_batch_, conv_param_->input_h_ * conv_param_->input_w_, conv_param_->input_channel_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.h index 45e6f3f7b1..73ea6b0b8f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.h @@ -27,8 +27,8 @@ namespace mindspore::kernel { class ConvolutionWinogradCPUKernel : public ConvolutionBaseCPUKernel { public: - ConvolutionWinogradCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ConvolutionWinogradCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive, int output_unit) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive), output_unit_(output_unit), diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/crop.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/crop.cc index 711db31678..e9ec8e206a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/crop.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/crop.cc @@ -45,8 +45,8 @@ int CropCPUKernel::Init() { return RET_OK; } int CropCPUKernel::CropParallelRun(int thread_id) { auto input = in_tensors_[0]; auto output = out_tensors_[0]; - float *input_data = reinterpret_cast(input->Data()); - float *output_data = reinterpret_cast(output->Data()); + float *input_data = reinterpret_cast(input->MutableData()); + float *output_data = reinterpret_cast(output->MutableData()); auto param = reinterpret_cast(op_parameter_); Crop4D(input_data, output_data, input->shape().data(), output->shape().data(), param, thread_id); return RET_OK; @@ -62,8 +62,8 @@ int CropCPUKernel::Run() { auto output = out_tensors_[0]; auto param = reinterpret_cast(op_parameter_); if (output->shape()[1] < param->op_parameter_.thread_num_) { - float *input_data = reinterpret_cast(input->Data()); - float *output_data = reinterpret_cast(output->Data()); + float *input_data = reinterpret_cast(input->MutableData()); + float *output_data = reinterpret_cast(output->MutableData()); Crop4DNoParallel(input_data, output_data, input->shape().data(), output->shape().data(), param); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/crop.h b/mindspore/lite/src/runtime/kernel/arm/fp32/crop.h index e15b20aaea..278620df76 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/crop.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/crop.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class CropCPUKernel : public CropBaseCPUKernel { public: - CropCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + CropCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~CropCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc index 7cac6982ac..aec06b7b82 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc @@ -61,7 +61,7 @@ int DeConvolutionCPUKernel::InitWeightBias() { } memset(bias_data_, 0, UP_ROUND(output_channel, C4NUM) * sizeof(float)); if (in_tensors_.size() == 3) { - memcpy(bias_data_, in_tensors_[2]->Data(), output_channel * sizeof(float)); + memcpy(bias_data_, in_tensors_[2]->MutableData(), output_channel * sizeof(float)); } size_t weight_pack_size = input_channel * kernel_w_ * kernel_h_ * UP_ROUND(output_channel, C8NUM) * sizeof(float); @@ -71,7 +71,7 @@ int DeConvolutionCPUKernel::InitWeightBias() { return RET_ERROR; } memset(weight_ptr_, 0, weight_pack_size); - PackNHWCToC8HWN8Fp32(reinterpret_cast(in_tensors_[1]->Data()), weight_ptr_, input_channel, + PackNHWCToC8HWN8Fp32(reinterpret_cast(in_tensors_[1]->MutableData()), weight_ptr_, input_channel, kernel_w_ * kernel_h_, output_channel); return RET_OK; } @@ -181,8 +181,8 @@ int DeConvolutionCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - float *src_in = reinterpret_cast(in_tensors_[0]->Data()); - float *src_out = reinterpret_cast(out_tensors_[0]->Data()); + float *src_in = reinterpret_cast(in_tensors_[0]->MutableData()); + float *src_out = reinterpret_cast(out_tensors_[0]->MutableData()); int error_code = InitRunBuf(); if (error_code != RET_OK) { @@ -207,10 +207,9 @@ int DeConvolutionCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeConv2D); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h index 3cbfac3869..1f206ab7de 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h @@ -30,8 +30,8 @@ namespace mindspore::kernel { class DeConvolutionCPUKernel : public ConvolutionBaseCPUKernel { public: - DeConvolutionCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + DeConvolutionCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { matmul_param_ = new MatMulParameter(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc index 10a097a047..79814f2b2e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc @@ -54,7 +54,7 @@ int DeconvolutionDepthwiseCPUKernel::InitSlideParam() { int DeconvolutionDepthwiseCPUKernel::InitWeightBias() { // init weight: o, h, w, i; o == group, i == 1 auto weight_tensor = in_tensors_[kWeightIndex]; - auto origin_weight = reinterpret_cast(weight_tensor->Data()); + auto origin_weight = reinterpret_cast(weight_tensor->MutableData()); int OC4 = UP_DIV(weight_tensor->Batch(), C4NUM); int pack_weight_size = C4NUM * OC4 * weight_tensor->Height() * weight_tensor->Width(); @@ -73,7 +73,7 @@ int DeconvolutionDepthwiseCPUKernel::InitWeightBias() { } memset(bias_data_, 0, C4NUM * OC4 * sizeof(float)); if (in_tensors_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->MutableData()); memcpy(bias_data_, ori_bias, in_tensors_.at(kBiasIndex)->ElementsNum() * sizeof(float)); } @@ -163,7 +163,7 @@ int DeconvolutionDepthwiseCPUKernel::Run() { } auto input_tensor = in_tensors_.at(kInputIndex); - auto input_addr = reinterpret_cast(input_tensor->Data()); + auto input_addr = reinterpret_cast(input_tensor->MutableData()); if (need_align_) { PackNHWCToNHWC4Fp32(input_addr, packed_input_, conv_param_->input_batch_, @@ -172,7 +172,7 @@ int DeconvolutionDepthwiseCPUKernel::Run() { packed_input_ = input_addr; } - auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); if (!need_align_) { memset(output_addr, 0, out_tensors_.at(kOutputIndex)->ElementsNum() * sizeof(float)); packed_output_ = output_addr; @@ -193,10 +193,9 @@ int DeconvolutionDepthwiseCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuDeconvDwFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuDeconvDwFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeDepthwiseConv2D); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.h b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.h index b1e1ab9fca..dff725dee7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class DeconvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel { public: - DeconvolutionDepthwiseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + DeconvolutionDepthwiseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~DeconvolutionDepthwiseCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.cc index ef2f064ede..3e49a8d9d6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.cc @@ -54,11 +54,11 @@ int DepthToSpaceCPUKernel::Run() { } auto input = in_tensors_[0]; auto output = out_tensors_[0]; - const float *input_data = reinterpret_cast(input->Data()); - float *output_data = reinterpret_cast(output->Data()); + const float *input_data = reinterpret_cast(input->MutableData()); + float *output_data = reinterpret_cast(output->MutableData()); auto in_shape = input->shape(); DepthToSpaceParameter *param = reinterpret_cast(op_parameter_); - if (input->GetFormat() == schema::Format_NHWC) { + if (input->GetFormat() == schema::Format::Format_NHWC) { DepthToSpaceForNHWC(input_data, output_data, in_shape.data(), param); return RET_OK; } else { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.h b/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.h index 0cd952ae5c..e5d3c869ff 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.h @@ -22,8 +22,8 @@ namespace mindspore::kernel { class DepthToSpaceCPUKernel : public DepthToSpaceBaseCPUKernel { public: - DepthToSpaceCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + DepthToSpaceCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : DepthToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~DepthToSpaceCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process.cc index a3616eed19..884f656912 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process.cc @@ -34,7 +34,7 @@ int DetectionPostProcessCPUKernel::Init() { const auto quant_params = anchor_tensor->GetQuantParams(); const double scale = quant_params.at(0).scale; const int32_t zp = quant_params.at(0).zeroPoint; - auto anchor_uint8 = reinterpret_cast(anchor_tensor->Data()); + auto anchor_uint8 = reinterpret_cast(anchor_tensor->MutableData()); auto anchor_fp32 = new (std::nothrow) float[anchor_tensor->ElementsNum()]; if (anchor_fp32 == nullptr) { MS_LOG(ERROR) << "Malloc anchor failed"; @@ -50,7 +50,7 @@ int DetectionPostProcessCPUKernel::Init() { MS_LOG(ERROR) << "Malloc anchor failed"; return RET_ERROR; } - memcpy(parameter->anchors_, anchor_tensor->Data(), anchor_tensor->Size()); + memcpy(parameter->anchors_, anchor_tensor->MutableData(), anchor_tensor->Size()); } else { MS_LOG(ERROR) << "unsupported anchor data type " << anchor_tensor->data_type(); return RET_ERROR; @@ -60,7 +60,7 @@ int DetectionPostProcessCPUKernel::Init() { DetectionPostProcessCPUKernel::~DetectionPostProcessCPUKernel() { DetectionPostProcessParameter *parameter = reinterpret_cast(op_parameter_); - delete [](parameter->anchors_); + delete[](parameter->anchors_); } int DetectionPostProcessCPUKernel::ReSize() { return RET_OK; } @@ -71,14 +71,14 @@ int DetectionPostProcessCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input_boxes = reinterpret_cast(in_tensors_.at(0)->Data()); - auto input_scores = reinterpret_cast(in_tensors_.at(1)->Data()); + auto input_boxes = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto input_scores = reinterpret_cast(in_tensors_.at(1)->MutableData()); // output_classes and output_num use float type now - auto output_boxes = reinterpret_cast(out_tensors_.at(0)->Data()); - auto output_classes = reinterpret_cast(out_tensors_.at(1)->Data()); - auto output_scores = reinterpret_cast(out_tensors_.at(2)->Data()); - auto output_num = reinterpret_cast(out_tensors_.at(3)->Data()); + auto output_boxes = reinterpret_cast(out_tensors_.at(0)->MutableData()); + auto output_classes = reinterpret_cast(out_tensors_.at(1)->MutableData()); + auto output_scores = reinterpret_cast(out_tensors_.at(2)->MutableData()); + auto output_num = reinterpret_cast(out_tensors_.at(3)->MutableData()); MS_ASSERT(context_->allocator != nullptr); const int num_boxes = in_tensors_.at(0)->shape()[1]; @@ -109,8 +109,8 @@ int DetectionPostProcessCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuDetectionPostProcessFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuDetectionPostProcessFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process.h b/mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process.h index 36b990c556..8f2b4fda39 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process.h @@ -27,8 +27,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class DetectionPostProcessCPUKernel : public LiteKernel { public: - DetectionPostProcessCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + DetectionPostProcessCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param_ = reinterpret_cast(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc index bd54b2e2be..61bd1d02bc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc @@ -62,8 +62,8 @@ int EluCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - input_addr = reinterpret_cast(in_tensors_.front()->Data()); - output_addr = reinterpret_cast(out_tensors_.front()->Data()); + input_addr = reinterpret_cast(in_tensors_.front()->MutableData()); + output_addr = reinterpret_cast(out_tensors_.front()->MutableData()); auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, EluRun, this, elu_parameter_->thread_num_); if (ret != RET_OK) { @@ -73,8 +73,8 @@ int EluCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuEluFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, OpParameter *parameter, +kernel::LiteKernel *CpuEluFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/elu.h b/mindspore/lite/src/runtime/kernel/arm/fp32/elu.h index a2e1df3231..69a179729c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/elu.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/elu.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class EluCPUKernel : public LiteKernel { public: - explicit EluCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit EluCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} ~EluCPUKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc index ef832f6257..7214049392 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc @@ -95,12 +95,12 @@ int EmbeddingLookupCPUKernel::Run() { int dest_loc = 0; for (size_t i = 0; i < in_tensors_.size() - 1; i++) { - auto input_t = reinterpret_cast(in_tensors_.at(i)->Data()); + auto input_t = reinterpret_cast(in_tensors_.at(i)->MutableData()); memcpy(input_addr_ + dest_loc, input_t, sizeof(float) * in_tensors_.at(i)->ElementsNum()); dest_loc += in_tensors_.at(i)->ElementsNum(); } - output_addr_ = reinterpret_cast(out_tensors_.front()->Data()); - ids_addr_ = reinterpret_cast(in_tensors_.back()->Data()); + output_addr_ = reinterpret_cast(out_tensors_.front()->MutableData()); + ids_addr_ = reinterpret_cast(in_tensors_.back()->MutableData()); auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, EmbeddingLookupRun, this, embedding_lookup_parameter_->thread_num); context_->allocator->Free(input_addr_); @@ -111,8 +111,8 @@ int EmbeddingLookupCPUKernel::Run() { return ret; } -kernel::LiteKernel *CpuEmbeddingLookupFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuEmbeddingLookupFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.h b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.h index 5a8b39b78c..6296ea6d70 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class EmbeddingLookupCPUKernel : public LiteKernel { public: - explicit EmbeddingLookupCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit EmbeddingLookupCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} ~EmbeddingLookupCPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/exp.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/exp.cc index 1c6bd816dc..d87c83f800 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/exp.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/exp.cc @@ -74,8 +74,8 @@ int ExpCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - input_addr_ = reinterpret_cast(in_tensors_.front()->Data()); - output_addr_ = reinterpret_cast(out_tensors_.front()->Data()); + input_addr_ = reinterpret_cast(in_tensors_.front()->MutableData()); + output_addr_ = reinterpret_cast(out_tensors_.front()->MutableData()); exp_parameter_->element_num_ = in_tensors_.front()->ElementsNum(); auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, ExpRun, this, exp_parameter_->thread_num_); @@ -86,8 +86,8 @@ int ExpCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuExpFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, OpParameter *parameter, +kernel::LiteKernel *CpuExpFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/exp.h b/mindspore/lite/src/runtime/kernel/arm/fp32/exp.h index b9d3206dc1..584659a435 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/exp.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/exp.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class ExpCPUKernel : public LiteKernel { public: - explicit ExpCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit ExpCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} ~ExpCPUKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc index 3a49462bb2..b44d91593a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc @@ -72,8 +72,8 @@ int ExpandDimsCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - in_ptr_ = reinterpret_cast(in_tensors_.at(0)->Data()); - out_ptr_ = reinterpret_cast(out_tensors_.at(0)->Data()); + in_ptr_ = reinterpret_cast(in_tensors_.at(0)->MutableData()); + out_ptr_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, ExpandDimsRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "ExpandDimsRun error error_code[" << ret << "]"; @@ -82,8 +82,8 @@ int ExpandDimsCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuExpandsDimsFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuExpandsDimsFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.h b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.h index eb545601d4..9c3ddf56c7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.h @@ -29,8 +29,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class ExpandDimsCPUKernel : public LiteKernel { public: - ExpandDimsCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ExpandDimsCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~ExpandDimsCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc index 3ae36bf99d..833f65b754 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc @@ -74,9 +74,9 @@ int FillCPUKernel::Run() { } auto fillData = in_tensors_.at(in_tensors_.size() - 1); auto output = out_tensors_.front(); - auto fill_data = reinterpret_cast(fillData->Data()); + auto fill_data = reinterpret_cast(fillData->MutableData()); src_data_ = fill_data[0]; - out_ptr_ = reinterpret_cast(output->Data()); + out_ptr_ = reinterpret_cast(output->MutableData()); auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, FillRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "FillRun error error_code[" << ret << "]"; @@ -85,10 +85,9 @@ int FillCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuFillFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuFillFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); if (opParameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.h b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.h index b92948a453..62a77eb167 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.h @@ -27,8 +27,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class FillCPUKernel : public LiteKernel { public: - FillCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + FillCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~FillCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc index 3f1a677558..231c88333c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc @@ -49,16 +49,15 @@ int FlattenCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input = reinterpret_cast(in_tensors_[0]->Data()); - auto output = reinterpret_cast(out_tensors_[0]->Data()); + auto input = reinterpret_cast(in_tensors_[0]->MutableData()); + auto output = reinterpret_cast(out_tensors_[0]->MutableData()); Flatten(input, output, flatten_param_); return RET_OK; } -kernel::LiteKernel *CpuFlattenFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuFlattenFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); if (opParameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.h b/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.h index 62938a6b91..31be510ab1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.h @@ -27,8 +27,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class FlattenCPUKernel : public LiteKernel { public: - FlattenCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + FlattenCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { flatten_param_ = reinterpret_cast(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.cc index 226f609a98..126a31580f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.cc @@ -56,7 +56,7 @@ int FullconnectionCPUKernel::ReSize() { bias_ptr_ = reinterpret_cast(malloc(fc_param_->col_8_ * sizeof(float))); memset(bias_ptr_, 0, fc_param_->col_8_ * sizeof(float)); if (in_tensors_.size() == 3) { - memcpy(bias_ptr_, in_tensors_[2]->Data(), fc_param_->col_ * sizeof(float)); + memcpy(bias_ptr_, in_tensors_[2]->MutableData(), fc_param_->col_ * sizeof(float)); } a_c12_ptr_ = reinterpret_cast(malloc(fc_param_->row_12_ * fc_param_->deep_ * sizeof(float))); @@ -72,10 +72,10 @@ int FullconnectionCPUKernel::ReSize() { } memset(b_r8_ptr_, 0, fc_param_->col_8_ * fc_param_->deep_ * sizeof(float)); - fc_param_->a_const_ = (in_tensors_[0]->Data() != nullptr); - fc_param_->b_const_ = (in_tensors_[1]->Data() != nullptr); - if (fc_param_->a_const_) InitMatrixA(reinterpret_cast(in_tensors_[0]->Data()), a_c12_ptr_); - if (fc_param_->b_const_) InitMatrixB(reinterpret_cast(in_tensors_[1]->Data()), b_r8_ptr_); + fc_param_->a_const_ = (in_tensors_[0]->data_c() != nullptr); + fc_param_->b_const_ = (in_tensors_[1]->data_c() != nullptr); + if (fc_param_->a_const_) InitMatrixA(reinterpret_cast(in_tensors_[0]->MutableData()), a_c12_ptr_); + if (fc_param_->b_const_) InitMatrixB(reinterpret_cast(in_tensors_[1]->MutableData()), b_r8_ptr_); return RET_OK; } @@ -122,9 +122,9 @@ int FullconnectionCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto a_ptr = reinterpret_cast(in_tensors_.at(0)->Data()); - auto b_ptr = reinterpret_cast(in_tensors_.at(1)->Data()); - c_r_ptr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto a_ptr = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto b_ptr = reinterpret_cast(in_tensors_.at(1)->MutableData()); + c_r_ptr = reinterpret_cast(out_tensors_.at(0)->MutableData()); if (!fc_param_->a_const_) InitMatrixA(a_ptr, a_c12_ptr_); if (!fc_param_->b_const_) InitMatrixB(b_ptr, b_r8_ptr_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.h b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.h index 50bb60277f..776d370904 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class FullconnectionCPUKernel : public FullconnectionBaseCPUKernel { public: - FullconnectionCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + FullconnectionCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : FullconnectionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~FullconnectionCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc index 7f2a66f9ae..0f62d583eb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc @@ -56,22 +56,22 @@ int FusedBatchnormCPUKernel::InitConstTensor() { MS_LOG(ERROR) << "Memory allocation failed"; return RET_ERROR; } - memcpy(scale_, scale->Data(), scale->Size()); - memcpy(offset_, offset->Data(), offset->Size()); - memcpy(mean_, mean->Data(), mean->Size()); - memcpy(variance_, variance->Data(), variance->Size()); + memcpy(scale_, scale->MutableData(), scale->Size()); + memcpy(offset_, offset->MutableData(), offset->Size()); + memcpy(mean_, mean->MutableData(), mean->Size()); + memcpy(variance_, variance->MutableData(), variance->Size()); return RET_OK; } int FusedBatchnormCPUKernel::DoExecute(int task_id) { auto param = reinterpret_cast(op_parameter_); - FusedBatchNormFp32(in_tensors_.at(0)->Data(), scale_, offset_, mean_, variance_, param, task_id, - out_tensors_.at(0)->Data()); + FusedBatchNormFp32(in_tensors_.at(0)->MutableData(), scale_, offset_, mean_, variance_, param, task_id, + out_tensors_.at(0)->MutableData()); return mindspore::lite::RET_OK; } -kernel::LiteKernel *CpuFusedBatchnormKernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuFusedBatchnormKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.h b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.h index 0114a272d0..a60de87699 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class FusedBatchnormCPUKernel : public BatchnormCPUKernel { public: - FusedBatchnormCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + FusedBatchnormCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : BatchnormCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~FusedBatchnormCPUKernel() { FreeScaleAndOffset(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc index 84b3df489d..3f124619d0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc @@ -44,11 +44,11 @@ int GatherCPUKernel::DoGather(int task_id) { auto indices_tensor = in_tensors_.at(1); auto out_tensor = out_tensors_.at(0); - auto input_ptr = reinterpret_cast(input_tensor->Data()); - auto output_ptr = reinterpret_cast(out_tensor->Data()); + auto input_ptr = reinterpret_cast(input_tensor->MutableData()); + auto output_ptr = reinterpret_cast(out_tensor->MutableData()); - auto input_int32 = reinterpret_cast(input_tensor->Data()); - auto output_int32 = reinterpret_cast(out_tensor->Data()); + auto input_int32 = reinterpret_cast(input_tensor->MutableData()); + auto output_int32 = reinterpret_cast(out_tensor->MutableData()); auto in_shape = input_tensor->shape(); int in_rank = in_shape.size(); @@ -117,7 +117,7 @@ int GatherCPUKernel::Run() { return ret; } -int GatherCPUKernel::AssignIndicesData(bool isIndicesInt32, int indices_num, lite::tensor::Tensor *indices_tensor) { +int GatherCPUKernel::AssignIndicesData(bool isIndicesInt32, int indices_num, lite::Tensor *indices_tensor) { if (!isIndicesInt32) { indices_data_ = reinterpret_cast(context_->allocator->Malloc(sizeof(int32_t) * indices_num)); if (indices_data_ == nullptr) { @@ -126,23 +126,22 @@ int GatherCPUKernel::AssignIndicesData(bool isIndicesInt32, int indices_num, lit } if (indices_tensor->data_type() == kNumberTypeInt64) { for (int i = 0; i < indices_num; i++) { - indices_data_[i] = reinterpret_cast(indices_tensor->Data())[i]; + indices_data_[i] = reinterpret_cast(indices_tensor->MutableData())[i]; } } else { for (int i = 0; i < indices_num; i++) { - indices_data_[i] = reinterpret_cast(indices_tensor->Data())[i]; + indices_data_[i] = reinterpret_cast(indices_tensor->MutableData())[i]; } } } else { - indices_data_ = reinterpret_cast(indices_tensor->Data()); + indices_data_ = reinterpret_cast(indices_tensor->MutableData()); } return RET_OK; } -kernel::LiteKernel *CpuGatherFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuGatherFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Gather); if (opParameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gather.h b/mindspore/lite/src/runtime/kernel/arm/fp32/gather.h index e7c27e9395..48c7ef2021 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gather.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gather.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class GatherCPUKernel : public LiteKernel { public: - GatherCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + GatherCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~GatherCPUKernel() = default; @@ -37,7 +37,7 @@ class GatherCPUKernel : public LiteKernel { private: int *indices_data_ = nullptr; - int AssignIndicesData(bool isIndicesInt32, int indices_num, lite::tensor::Tensor *indices_tensor); + int AssignIndicesData(bool isIndicesInt32, int indices_num, lite::Tensor *indices_tensor); }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc index 961178e734..7f5698ba86 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc @@ -70,7 +70,7 @@ int GatherNdCPUKernel::ReSize() { auto in_shape = in_tensors_.front()->shape(); int in_rank = in_shape.size(); int idx_lastshape = indices_shape[indices_rank - 1]; - auto indices_ptr = reinterpret_cast(indices_tensor->Data()); + auto indices_ptr = reinterpret_cast(indices_tensor->MutableData()); area_ = 1; for (int i = idx_lastshape; i < in_rank; ++i) { area_ *= in_shape[i]; @@ -121,8 +121,8 @@ int GatherNdCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - in_ptr_ = reinterpret_cast(in_tensors_.front()->Data()); - out_ptr_ = reinterpret_cast(out_tensors_.front()->Data()); + in_ptr_ = reinterpret_cast(in_tensors_.front()->MutableData()); + out_ptr_ = reinterpret_cast(out_tensors_.front()->MutableData()); auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, GatherNdRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "gatherNd error error_code[" << ret << "]"; @@ -131,10 +131,9 @@ int GatherNdCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuGatherNdFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuGatherNdFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_GatherNd); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.h b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.h index 22261d1493..b3761e52ba 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.h @@ -29,8 +29,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class GatherNdCPUKernel : public LiteKernel { public: - GatherNdCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + GatherNdCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~GatherNdCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm.cc index 2fb00a6783..381f4cff47 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm.cc @@ -19,7 +19,6 @@ #include "include/errorcode.h" #include "nnacl/l2_norm.h" - using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; @@ -28,7 +27,7 @@ using mindspore::schema::PrimitiveType_L2Norm; namespace mindspore::kernel { int L2NormCPUKernel::Init() { - l2_norm_param_->data_num_ = in_tensors_.at(kInputIndex)->DataSize(); + l2_norm_param_->data_num_ = in_tensors_.at(kInputIndex)->ElementsNum(); auto shape = in_tensors_.at(kInputIndex)->shape(); l2_norm_param_->shape_ = reinterpret_cast(malloc(shape.size() * sizeof(int))); l2_norm_param_->shape_num_ = shape.size(); @@ -38,28 +37,24 @@ int L2NormCPUKernel::Init() { return RET_OK; } -kernel::LiteKernel * -CpuL2NormFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *param, const lite::Context *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { +kernel::LiteKernel *CpuL2NormFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *param, + const lite::Context *ctx, const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (param == nullptr) { MS_LOG(ERROR) << "input param is nullptr!"; return nullptr; } MS_ASSERT(desc.type == schema::PrimitiveType_L2Norm); - auto *kernel = new (std::nothrow) - L2NormCPUKernel(param, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) L2NormCPUKernel(param, inputs, outputs, ctx, primitive); if (kernel == nullptr) { MS_LOG(ERROR) << "new L2NormCPUKernel fail!"; return nullptr; } auto ret = kernel->Init(); if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ << ", type: " - << schema::EnumNamePrimitiveType( - static_cast(param->type_)); + MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ + << ", type: " << schema::EnumNamePrimitiveType(static_cast(param->type_)); delete kernel; return nullptr; } @@ -72,10 +67,8 @@ int L2NormCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto input_ptr = - reinterpret_cast(in_tensors_.at(kInputIndex)->Data()); - auto output_ptr = - reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + auto input_ptr = reinterpret_cast(in_tensors_.at(kInputIndex)->MutableData()); + auto output_ptr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); ret = L2NormFp32(input_ptr, output_ptr, l2_norm_param_); if (ret != 0) { MS_LOG_ERROR << "unsupported axis setting, more work will be done"; @@ -93,6 +86,5 @@ L2NormCPUKernel::~L2NormCPUKernel() { } } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_L2Norm, - CpuL2NormFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_L2Norm, CpuL2NormFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm.h b/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm.h index 0db39d736e..7c08beccb7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm.h @@ -29,10 +29,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class L2NormCPUKernel : public LiteKernel { public: - L2NormCPUKernel(OpParameter *parameter, - const std::vector &inputs, - const std::vector &outputs, - const Context *ctx, + L2NormCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { l2_norm_param_ = reinterpret_cast(op_parameter_); @@ -44,7 +42,7 @@ class L2NormCPUKernel : public LiteKernel { int Run() override; private: - L2NormParameter * l2_norm_param_; + L2NormParameter *l2_norm_param_; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/leaky_relu.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/leaky_relu.cc index ab8b01b598..7411ff8dcb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/leaky_relu.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/leaky_relu.cc @@ -62,8 +62,8 @@ int LeakyReluCPUKernel::Run() { } auto input = in_tensors_.at(0); prelu_param_->input_num_ = input->ElementsNum(); - input_data = reinterpret_cast(input->Data()); - output_data = reinterpret_cast(out_tensors_.at(0)->Data()); + input_data = reinterpret_cast(input->MutableData()); + output_data = reinterpret_cast(out_tensors_.at(0)->MutableData()); auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, LeakyReluRun, this, context_->thread_num_); if (ret != RET_OK) { @@ -73,10 +73,9 @@ int LeakyReluCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuLeakyReluFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *param, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuLeakyReluFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *param, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (param == nullptr) { MS_LOG(ERROR) << "input param is nullptr!"; @@ -90,8 +89,8 @@ kernel::LiteKernel *CpuLeakyReluFp32KernelCreator(const std::vectorInit(); if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast(param->type_)); + MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ + << ", type: " << schema::EnumNamePrimitiveType(static_cast(param->type_)); delete kernel; return nullptr; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/leaky_relu.h b/mindspore/lite/src/runtime/kernel/arm/fp32/leaky_relu.h index aaca8d13d7..981a5b474e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/leaky_relu.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/leaky_relu.h @@ -27,9 +27,9 @@ using mindspore::lite::Context; namespace mindspore::kernel { class LeakyReluCPUKernel : public LiteKernel { public: - LeakyReluCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, - const mindspore::lite::PrimitiveC *primitive) + LeakyReluCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { prelu_param_ = (reinterpret_cast(op_parameter_)); primitive_ = primitive; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc index 15de35e18b..157fe839ac 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc @@ -36,8 +36,8 @@ int LocalResponseNormCPUKernel::ReSize() { return RET_OK; } int LocalResponseNormCPUKernel::DoLocalResponseNorm(int task_id) { auto input_tensor = in_tensors_.front(); auto out_tensor = out_tensors_.front(); - auto input_ptr = reinterpret_cast(input_tensor->Data()); - auto output_ptr = reinterpret_cast(out_tensor->Data()); + auto input_ptr = reinterpret_cast(input_tensor->MutableData()); + auto output_ptr = reinterpret_cast(out_tensor->MutableData()); auto in_shape = input_tensor->shape(); MS_ASSERT(in_shape.size() == 4); @@ -87,8 +87,8 @@ int LocalResponseNormCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuLocalResponseNormFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuLocalResponseNormFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.h b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.h index 13915eca60..de16a5f3bb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class LocalResponseNormCPUKernel : public LiteKernel { public: - LocalResponseNormCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + LocalResponseNormCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~LocalResponseNormCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc index dc39cb5936..c29b18a3d0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc @@ -64,7 +64,7 @@ int LstmCPUKernel::InitWeightBias() { MS_LOG(ERROR) << "LstmCPUKernel malloc weight_i_ptr_ error."; return RET_ERROR; } - memcpy(weight_i_ptr_, weight_i->Data(), weight_i->ElementsNum() * sizeof(float)); + memcpy(weight_i_ptr_, weight_i->MutableData(), weight_i->ElementsNum() * sizeof(float)); auto weight_h = in_tensors_.at(2); MS_ASSERT(weight_h != nullptr); @@ -73,7 +73,7 @@ int LstmCPUKernel::InitWeightBias() { MS_LOG(ERROR) << "LstmCPUKernel malloc weight_h_ error."; return RET_ERROR; } - memcpy(weight_h_ptr_, weight_h->Data(), weight_h->ElementsNum() * sizeof(float)); + memcpy(weight_h_ptr_, weight_h->MutableData(), weight_h->ElementsNum() * sizeof(float)); // init bias int bias_num = lstm_parm_->bidirectional_ ? 2 * 4 * lstm_parm_->hidden_size_ : 4 * lstm_parm_->hidden_size_; @@ -83,7 +83,7 @@ int LstmCPUKernel::InitWeightBias() { return RET_ERROR; } - auto bias_data = reinterpret_cast(in_tensors_.at(3)->Data()); + auto bias_data = reinterpret_cast(in_tensors_.at(3)->MutableData()); const int state_bias_offset = 4 * lstm_parm_->hidden_size_; for (int i = 0; i < state_bias_offset; i++) { bias_ptr_[i] = bias_data[i] + bias_data[i + state_bias_offset]; @@ -142,22 +142,22 @@ int LstmCPUKernel::Run() { auto output = out_tensors_.at(0); MS_ASSERT(output != nullptr); - auto input_ptr = reinterpret_cast(input->Data()); - auto output_ptr = reinterpret_cast(output->Data()); + auto input_ptr = reinterpret_cast(input->MutableData()); + auto output_ptr = reinterpret_cast(output->MutableData()); auto output_hidden_state = out_tensors_[1]; - memcpy(output_hidden_state->Data(), hidden_state->Data(), hidden_state->ElementsNum() * sizeof(float)); + memcpy(output_hidden_state->MutableData(), hidden_state->MutableData(), hidden_state->ElementsNum() * sizeof(float)); auto output_cell_state = out_tensors_[2]; - memcpy(output_cell_state->Data(), cell_state->Data(), cell_state->ElementsNum() * sizeof(float)); + memcpy(output_cell_state->MutableData(), cell_state->MutableData(), cell_state->ElementsNum() * sizeof(float)); Lstm(output_ptr, input_ptr, weight_i_ptr_, weight_h_ptr_, bias_ptr_, - reinterpret_cast(output_hidden_state->Data()), reinterpret_cast(output_cell_state->Data()), - gate_buffer_, lstm_parm_); + reinterpret_cast(output_hidden_state->MutableData()), + reinterpret_cast(output_cell_state->MutableData()), gate_buffer_, lstm_parm_); return RET_OK; } -kernel::LiteKernel *CpuLstmKernelCreator(const std::vector &inputs, - const std::vector &outputs, OpParameter *parameter, +kernel::LiteKernel *CpuLstmKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.h b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.h index 80c9a6bab0..1b119b0bde 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class LstmCPUKernel : public LiteKernel { public: - LstmCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + LstmCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { lstm_parm_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.cc index 61392a80a4..adda18a315 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.cc @@ -81,13 +81,13 @@ int MatmulCPUKernel::ReSize() { } memset(b_r8_ptr_, 0, params_->col_8_ * params_->deep_ * sizeof(float)); - params_->a_const_ = (in_tensors_[0]->Data() != nullptr); - params_->b_const_ = (in_tensors_[1]->Data() != nullptr); + params_->a_const_ = (in_tensors_[0]->data_c() != nullptr); + params_->b_const_ = (in_tensors_[1]->data_c() != nullptr); if (params_->a_const_ == true) { - InitMatrixA(reinterpret_cast(in_tensors_[0]->Data()), a_c12_ptr_); + InitMatrixA(reinterpret_cast(in_tensors_[0]->data_c()), a_c12_ptr_); } if (params_->b_const_ == true) { - InitMatrixB(reinterpret_cast(in_tensors_[1]->Data()), b_r8_ptr_); + InitMatrixB(reinterpret_cast(in_tensors_[1]->data_c()), b_r8_ptr_); } bias_ptr_ = reinterpret_cast(malloc(params_->col_8_ * sizeof(float))); @@ -97,7 +97,7 @@ int MatmulCPUKernel::ReSize() { } memset(bias_ptr_, 0, params_->col_8_ * sizeof(float)); if (in_tensors_.size() == 3) { - memcpy(bias_ptr_, in_tensors_[2]->Data(), params_->col_ * sizeof(float)); + memcpy(bias_ptr_, in_tensors_[2]->data_c(), params_->col_ * sizeof(float)); } return RET_OK; @@ -163,9 +163,9 @@ int MatmulCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto a_src = reinterpret_cast(in_tensors_[0]->Data()); - auto b_src = reinterpret_cast(in_tensors_[1]->Data()); - auto c_src = reinterpret_cast(out_tensors_[0]->Data()); + auto a_src = reinterpret_cast(in_tensors_[0]->data_c()); + auto b_src = reinterpret_cast(in_tensors_[1]->data_c()); + auto c_src = reinterpret_cast(out_tensors_[0]->data_c()); if (params_->a_const_ == false) { InitMatrixA(a_src, a_c12_ptr_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.h b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.h index e93e170e11..f310371a62 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class MatmulCPUKernel : public MatmulBaseCPUKernel { public: - explicit MatmulCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit MatmulCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : MatmulBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~MatmulCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc index f19c547893..fd64392f9e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc @@ -38,22 +38,21 @@ int Nchw2NhwcCPUKernel::Run() { if (input->shape().size() == 4) { if (input->data_type() == kNumberTypeFloat32) { - PackNCHWToNHWCFp32(input->Data(), output->Data(), output->Batch(), output->Height() * output->Width(), - output->Channel()); + PackNCHWToNHWCFp32(input->MutableData(), output->MutableData(), output->Batch(), + output->Height() * output->Width(), output->Channel()); } else if (input->data_type() == kNumberTypeInt8) { - PackNCHWToNHWCInt8(input->Data(), output->Data(), output->Batch(), output->Height() * output->Width(), - output->Channel()); + PackNCHWToNHWCInt8(input->MutableData(), output->MutableData(), output->Batch(), + output->Height() * output->Width(), output->Channel()); } } else { - memcpy(output->Data(), input->Data(), input->ElementsNum() * sizeof(float)); + memcpy(output->MutableData(), input->MutableData(), input->ElementsNum() * sizeof(float)); } return RET_OK; } -kernel::LiteKernel *CpuNchw2NhwcFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuNchw2NhwcFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Nchw2Nhwc); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.h b/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.h index b789afdb68..dfd5398f51 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.h @@ -28,8 +28,8 @@ namespace mindspore::kernel { class Nchw2NhwcCPUKernel : public LiteKernel { public: - Nchw2NhwcCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + Nchw2NhwcCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~Nchw2NhwcCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc index 27b50108cb..61c3a14140 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc @@ -38,22 +38,21 @@ int Nhwc2NchwCPUKernel::Run() { if (input->shape().size() == 4) { if (input->data_type() == kNumberTypeFloat32) { - PackNHWCToNCHWFp32(input->Data(), output->Data(), output->Batch(), output->Height() * output->Width(), - output->Channel()); + PackNHWCToNCHWFp32(input->MutableData(), output->MutableData(), output->Batch(), + output->Height() * output->Width(), output->Channel()); } else if (input->data_type() == kNumberTypeInt8) { - PackNHWCToNCHWInt8(input->Data(), output->Data(), output->Batch(), output->Height() * output->Width(), - output->Channel()); + PackNHWCToNCHWInt8(input->MutableData(), output->MutableData(), output->Batch(), + output->Height() * output->Width(), output->Channel()); } } else { - memcpy(output->Data(), input->Data(), input->ElementsNum() * sizeof(float)); + memcpy(output->MutableData(), input->MutableData(), input->ElementsNum() * sizeof(float)); } return RET_OK; } -kernel::LiteKernel *CpuNhwc2NchwFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuNhwc2NchwFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Nhwc2Nchw); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.h b/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.h index d784fa7944..375d617ad0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.h @@ -28,8 +28,8 @@ namespace mindspore::kernel { class Nhwc2NchwCPUKernel : public LiteKernel { public: - Nhwc2NchwCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + Nhwc2NchwCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~Nhwc2NchwCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc index 7c9891733e..bde05cc01c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc @@ -96,13 +96,13 @@ int RunOneHot(void *cdata, int task_id) { } int OneHotCPUKernel::OneHotImpl(int task_id) { - auto indices_data = static_cast(in_tensors_.at(0)->Data()); + auto indices_data = static_cast(in_tensors_.at(0)->MutableData()); auto output = out_tensors_.at(0); if (output == nullptr) { MS_LOG(ERROR) << "OneHot output nullptr"; return RET_NULL_PTR; } - auto output_data = static_cast(output->Data()); + auto output_data = static_cast(output->MutableData()); auto ret = GetParams(); if (ret != RET_OK) { @@ -126,7 +126,7 @@ int OneHotCPUKernel::GetParams() { MS_LOG(ERROR) << "OneHot inputs[1] depth nullptr"; return RET_NULL_PTR; } - const int *depth = static_cast(depth_tensor->Data()); + const int *depth = static_cast(depth_tensor->MutableData()); if (depth == nullptr) { return RET_NULL_PTR; } @@ -137,7 +137,7 @@ int OneHotCPUKernel::GetParams() { MS_LOG(ERROR) << "OneHot inputs[2] on_value nullptr"; return RET_NULL_PTR; } - const float *on_value = static_cast(on_value_tensor->Data()); + const float *on_value = static_cast(on_value_tensor->MutableData()); if (on_value == nullptr) { return RET_NULL_PTR; } @@ -148,7 +148,7 @@ int OneHotCPUKernel::GetParams() { MS_LOG(ERROR) << "OneHot inputs[3] off_value nullptr"; return RET_NULL_PTR; } - const float *off_value = static_cast(off_value_tensor->Data()); + const float *off_value = static_cast(off_value_tensor->MutableData()); if (off_value == nullptr) { return RET_NULL_PTR; } @@ -174,10 +174,9 @@ int OneHotCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuOneHotFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuOneHotFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "OneHot opParameter nullptr."; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.h b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.h index 930c300928..3713726922 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class OneHotCPUKernel : public LiteKernel { public: - OneHotCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + OneHotCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc index 51d9e9d1a1..500823dd24 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc @@ -82,8 +82,8 @@ int PadCPUKernel::RunImpl(int task_id) { auto input = in_tensors_.at(0); auto output = out_tensors_.at(0); - auto input_data = reinterpret_cast(input->Data()); - auto output_data = reinterpret_cast(output->Data()); + auto input_data = reinterpret_cast(input->MutableData()); + auto output_data = reinterpret_cast(output->MutableData()); Pad(input_data, output_data, in_, out_, pad_param_->paddings_, task_id, context_->thread_num_); @@ -97,9 +97,9 @@ int PadCPUKernel::Run() { return prepare_ret; } auto output = out_tensors_.at(0); - int output_size = output->DataSize(); + int output_size = output->ElementsNum(); - auto output_data = reinterpret_cast(output->Data()); + auto output_data = reinterpret_cast(output->MutableData()); memset(output_data, 0, output_size * sizeof(float)); int error_code = ParallelLaunch(THREAD_POOL_DEFAULT, PadImpl, this, context_->thread_num_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pad.h b/mindspore/lite/src/runtime/kernel/arm/fp32/pad.h index 9b3ae6ded0..8818cf5be1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pad.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class PadCPUKernel : public LiteKernel { public: - PadCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + PadCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), context_(ctx) { pad_param_ = reinterpret_cast(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.cc index a7c3b4377f..0734ae64e8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.cc @@ -51,8 +51,8 @@ int PoolingCPUKernel::ReSize() { } int PoolingCPUKernel::RunImpl(int task_id) { - auto input_ptr = reinterpret_cast(in_tensors_.at(kInputIndex)->Data()); - auto output_ptr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + auto input_ptr = reinterpret_cast(in_tensors_.at(kInputIndex)->MutableData()); + auto output_ptr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); float minf = -FLT_MAX; float maxf = FLT_MAX; if (pooling_param_->act_type_ == ActType_Relu) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.h b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.h index 13939086b6..2d74ad7f71 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class PoolingCPUKernel : public PoolingBaseCPUKernel { public: - PoolingCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + PoolingCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : PoolingBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~PoolingCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/power.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/power.cc index 089cb8ba59..d0e4defef2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/power.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/power.cc @@ -55,15 +55,15 @@ int PowerCPUKernel::Run() { } int PowerCPUKernel::RunImpl(int task_id) { - auto x_addr = reinterpret_cast(in_tensors_[0]->Data()); - auto output_addr = reinterpret_cast(out_tensors_[0]->Data()); + auto x_addr = reinterpret_cast(in_tensors_[0]->MutableData()); + auto output_addr = reinterpret_cast(out_tensors_[0]->MutableData()); auto size = in_tensors_[0]->ElementsNum(); int stride = UP_DIV(size, thread_count_); int len = MSMIN(stride, size - stride * task_id); float *exp_addr = nullptr; bool broadcast = true; if (in_tensors_.size() == 2) { - exp_addr = reinterpret_cast(in_tensors_[1]->Data()); + exp_addr = reinterpret_cast(in_tensors_[1]->MutableData()); broadcast = in_tensors_[0]->shape() == in_tensors_[1]->shape() ? false : true; } float *cur_exp = nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/power.h b/mindspore/lite/src/runtime/kernel/arm/fp32/power.h index c08a06d1bd..ff9043ab4c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/power.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/power.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class PowerCPUKernel : public PowerBaseCPUKernel { public: - PowerCPUKernel(OpParameter *param, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + PowerCPUKernel(OpParameter *param, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : PowerBaseCPUKernel(param, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_), diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc index 87835aa047..22298fdc0b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc @@ -114,8 +114,8 @@ int PReluCPUKernel::Run() { } MS_ASSERT(in_shape.size() >= 2); auto input_tensor = in_tensors_[0]; - ori_input_ = reinterpret_cast(input_tensor->Data()); - output_data_ = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + ori_input_ = reinterpret_cast(input_tensor->MutableData()); + output_data_ = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); if (prelu_param_->channelShared) { auto ret = ProcessShareChannelInput(); @@ -133,7 +133,7 @@ int PReluCPUKernel::Run() { // negative slope tensor auto negative_slope_tensor = in_tensors_.at(1); - prelu_param_->slope_ = reinterpret_cast(negative_slope_tensor->Data()); + prelu_param_->slope_ = reinterpret_cast(negative_slope_tensor->MutableData()); auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, PReluRun, this, prelu_param_->op_parameter_.thread_num_); if (ret != RET_OK) { @@ -147,8 +147,8 @@ int PReluCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuPReluFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, OpParameter *param, +kernel::LiteKernel *CpuPReluFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *param, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (param == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.h b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.h index 9c875f76bb..49a66bdbfb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class PReluCPUKernel : public LiteKernel { public: - PReluCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + PReluCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { prelu_param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc index 5d19341a0a..23024761bf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc @@ -40,15 +40,14 @@ int RangeCPUKernel::Run() { size_t start = (reinterpret_cast(op_parameter_))->start_; size_t limit = (reinterpret_cast(op_parameter_))->limit_; size_t delta = (reinterpret_cast(op_parameter_))->delta_; - auto output_ptr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto output_ptr = reinterpret_cast(out_tensors_.at(0)->MutableData()); Range(output_ptr, start, limit, delta); return RET_OK; } -kernel::LiteKernel *CpuRangeFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuRangeFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Range); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/range.h b/mindspore/lite/src/runtime/kernel/arm/fp32/range.h index 8938e1791c..3d4a961701 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/range.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/range.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class RangeCPUKernel : public LiteKernel { public: - explicit RangeCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit RangeCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~RangeCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc index 5c23b27845..e08e689f2e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc @@ -37,17 +37,16 @@ int RankCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto output_ptr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto output_ptr = reinterpret_cast(out_tensors_.at(0)->MutableData()); auto in_shape = in_tensors_[0]->shape(); auto rank = in_shape.size(); Rank(output_ptr, rank); return RET_OK; } -kernel::LiteKernel *CpuRankFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuRankFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Rank); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/rank.h b/mindspore/lite/src/runtime/kernel/arm/fp32/rank.h index 36bbca742a..fc101570d3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/rank.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/rank.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class RankCPUKernel : public LiteKernel { public: - explicit RankCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit RankCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~RankCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.cc index 27125a0d4e..41adc5be8e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.cc @@ -112,7 +112,7 @@ int ReduceCPUKernel::Run() { return prepare_ret; } tmp_shape_ = in_tensors_.at(0)->shape(); - src_data_ = static_cast(in_tensors_.at(0)->Data()); + src_data_ = static_cast(in_tensors_.at(0)->MutableData()); for (size_t i = 0; i < data_buffers_.size(); ++i) { dst_data_ = data_buffers_[i]; int axis = axes_[i]; @@ -144,7 +144,7 @@ int ReduceCPUKernel::Run() { inner_size_ *= tmp_shape_[i]; } axis_size_ = tmp_shape_[last_reduce_axis]; - dst_data_ = reinterpret_cast(out_tensors_.at(0)->Data()); + dst_data_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); auto error_code = ParallelLaunch(THREAD_POOL_DEFAULT, ReduceImpl, this, context_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Reduce run error, error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.h index b1fa2920ca..309d8e8cd5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.h @@ -31,8 +31,8 @@ class ReduceCPUKernel : public ReduceBaseCPUKernel { const int *src_shape, float *dst_data, const int tid, const int thread_num); public: - ReduceCPUKernel(OpParameter *param, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ReduceCPUKernel(OpParameter *param, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ReduceBaseCPUKernel(param, inputs, outputs, ctx, primitive) {} ~ReduceCPUKernel() { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.cc index 15adf3e113..d3705caa89 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.cc @@ -41,8 +41,8 @@ int ReshapeCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto input_ptr = in_tensors_.at(kInputIndex)->Data(); - auto output_ptr = out_tensors_.at(kOutputIndex)->Data(); + auto input_ptr = in_tensors_.at(kInputIndex)->MutableData(); + auto output_ptr = out_tensors_.at(kOutputIndex)->MutableData(); size_t data_size = in_tensors_.at(kInputIndex)->Size(); Reshape(input_ptr, output_ptr, data_size); return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.h index cc00a80b28..01002beadd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class ReshapeCPUKernel : public ReshapeBaseCPUKernel { public: - ReshapeCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ReshapeCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ReshapeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ReshapeCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/resize.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/resize.cc index 7d5dd83699..2846ef2e9a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/resize.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/resize.cc @@ -147,11 +147,11 @@ int ResizeImpl(void *cdata, int task_id) { int ResizeCPUKernel::RunImpl(int task_id) { auto input = in_tensors_.at(0); - auto input_data = reinterpret_cast(input->Data()); + auto input_data = reinterpret_cast(input->MutableData()); if (input_data == nullptr) { return RET_NULL_PTR; } - auto output_data = reinterpret_cast(out_tensors_.at(0)->Data()); + auto output_data = reinterpret_cast(out_tensors_.at(0)->MutableData()); if (output_data == nullptr) { return RET_NULL_PTR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/resize.h b/mindspore/lite/src/runtime/kernel/arm/fp32/resize.h index ebc0142496..fccb71d202 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/resize.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/resize.h @@ -26,8 +26,8 @@ using mindspore::schema::ResizeMethod; namespace mindspore::kernel { class ResizeCPUKernel : public ResizeBaseCPUKernel { public: - ResizeCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ResizeCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ResizeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc index 4eb82488cc..66d403d7fd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc @@ -130,8 +130,8 @@ int ReverseCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - in_ptr_ = reinterpret_cast(in_tensors_[0]->Data()); - out_ptr_ = reinterpret_cast(out_tensors_[0]->Data()); + in_ptr_ = reinterpret_cast(in_tensors_[0]->MutableData()); + out_ptr_ = reinterpret_cast(out_tensors_[0]->MutableData()); ret = ParallelLaunch(THREAD_POOL_DEFAULT, ReverseRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "Reverse run error error_code[" << ret << "]"; @@ -140,10 +140,9 @@ int ReverseCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuReverseFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuReverseFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "opParameter is NULL! "; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h index c401619938..ae546474f9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class ReverseCPUKernel : public LiteKernel { public: - ReverseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ReverseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~ReverseCPUKernel() { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc index 7ef4c84497..37cf864c27 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc @@ -92,15 +92,15 @@ int ReverseSequenceCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - float *input0 = reinterpret_cast(in_tensors_.at(0)->Data()); - int *input1 = reinterpret_cast(in_tensors_.at(1)->Data()); - float *output = reinterpret_cast(out_tensors_.at(0)->Data()); + float *input0 = reinterpret_cast(in_tensors_.at(0)->MutableData()); + int *input1 = reinterpret_cast(in_tensors_.at(1)->MutableData()); + float *output = reinterpret_cast(out_tensors_.at(0)->MutableData()); ReverseSequence(input0, input1, output, reinterpret_cast(op_parameter_)); return RET_OK; } -kernel::LiteKernel *CpuReverseSequenceFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuReverseSequenceFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.h index ae7e71229f..de885eba7f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class ReverseSequenceCPUKernel : public LiteKernel { public: - ReverseSequenceCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ReverseSequenceCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~ReverseSequenceCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc index 9256ada127..039596ae1d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc @@ -88,9 +88,9 @@ int ROIPoolingCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail! ret: " << ret; return ret; } - in_ptr_ = reinterpret_cast(in_tensors_.front()->Data()); - out_ptr_ = reinterpret_cast(out_tensors_.front()->Data()); - roi_ptr_ = reinterpret_cast(in_tensors_.at(1)->Data()); + in_ptr_ = reinterpret_cast(in_tensors_.front()->MutableData()); + out_ptr_ = reinterpret_cast(out_tensors_.front()->MutableData()); + roi_ptr_ = reinterpret_cast(in_tensors_.at(1)->MutableData()); ret = ParallelLaunch(THREAD_POOL_DEFAULT, ROIPoolingRun, this, param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ROIPooling error: error_code[" << ret << "]"; @@ -99,10 +99,9 @@ int ROIPoolingCPUKernel::Run() { return ret; } -kernel::LiteKernel *CpuROIPoolingFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuROIPoolingFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.h b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.h index b870d1669d..69c5cf5a70 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class ROIPoolingCPUKernel : public LiteKernel { public: - ROIPoolingCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ROIPoolingCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param_ = reinterpret_cast(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc index ccdbb6080a..b9a41da299 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc @@ -43,7 +43,7 @@ ScaleCPUKernel::~ScaleCPUKernel() { int ScaleCPUKernel::InitScaleOffset() { auto scale_tensor = in_tensors_.at(1); - float *scale_ptr = reinterpret_cast(in_tensors_.at(1)->Data()); + float *scale_ptr = reinterpret_cast(in_tensors_.at(1)->MutableData()); if (scale_ptr != nullptr) { scale_param_->const_scale_ = true; scale_ = reinterpret_cast(malloc(scale_tensor->ElementsNum() * sizeof(float))); @@ -65,7 +65,7 @@ int ScaleCPUKernel::InitScaleOffset() { memset(offset_, 0, scale_param_->axis_size_ * sizeof(float)); if (in_tensors_.size() == 3) { auto offset_tensor = in_tensors_.at(2); - memcpy(offset_, offset_tensor->Data(), offset_tensor->ElementsNum() * sizeof(float)); + memcpy(offset_, offset_tensor->MutableData(), offset_tensor->ElementsNum() * sizeof(float)); } return RET_OK; } @@ -154,13 +154,13 @@ int ScaleCPUKernel::Run() { return ret; } auto in_tensor = in_tensors_.front(); - input_ptr_ = reinterpret_cast(in_tensor->Data()); + input_ptr_ = reinterpret_cast(in_tensor->MutableData()); if (scale_ == nullptr) { auto scale_tensor = in_tensors_[1]; - scale_ = reinterpret_cast(scale_tensor->Data()); + scale_ = reinterpret_cast(scale_tensor->MutableData()); } auto out_tensor = out_tensors_.front(); - output_ptr_ = reinterpret_cast(out_tensor->Data()); + output_ptr_ = reinterpret_cast(out_tensor->MutableData()); ret = ParallelLaunch(THREAD_POOL_DEFAULT, ScaleRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { @@ -169,7 +169,7 @@ int ScaleCPUKernel::Run() { } return RET_OK; } -int RestoreScaleWeight(lite::tensor::Tensor *input_tensor) { +int RestoreScaleWeight(lite::Tensor *input_tensor) { MS_ASSERT(input_tensor != nullptr); if (input_tensor->data_type() != kNumberTypeUInt8) { MS_LOG(ERROR) << "mat mul input type error" << input_tensor->data_type(); @@ -179,8 +179,8 @@ int RestoreScaleWeight(lite::tensor::Tensor *input_tensor) { MS_LOG(ERROR) << "no quant param"; return RET_ERROR; } - const auto* quant_data = static_cast(input_tensor->Data()); - auto* dequant_data = static_cast(malloc(input_tensor->DataSize() * sizeof(float))); + const auto *quant_data = static_cast(input_tensor->MutableData()); + auto *dequant_data = static_cast(malloc(input_tensor->ElementsNum() * sizeof(float))); if (dequant_data == nullptr) { MS_LOG(ERROR) << "malloc faile"; return RET_ERROR; @@ -192,15 +192,15 @@ int RestoreScaleWeight(lite::tensor::Tensor *input_tensor) { MS_LOG(ERROR) << "Quant param not equal channel num " << input_tensor->GetQuantParams().size() << channels; return RET_ERROR; } - size_t per_channel_size = input_tensor->DataSize() / channels; + size_t per_channel_size = input_tensor->ElementsNum() / channels; auto quant_param = input_tensor->GetQuantParams(); for (size_t i = 0; i < channels; i++) { auto param = quant_param.at(i); auto scale = param.scale; auto zero_point = param.zeroPoint; for (size_t j = 0; j < per_channel_size; j++) { - dequant_data[per_channel_size * i + j] = static_cast( - (quant_data[per_channel_size * i + j] - zero_point) * scale); + dequant_data[per_channel_size * i + j] = + static_cast((quant_data[per_channel_size * i + j] - zero_point) * scale); } } } else { @@ -208,21 +208,20 @@ int RestoreScaleWeight(lite::tensor::Tensor *input_tensor) { auto param = quant_param.front(); auto scale = param.scale; auto zero_point = param.zeroPoint; - for (int64_t j = 0; j < input_tensor->DataSize(); j++) { + for (int64_t j = 0; j < input_tensor->ElementsNum(); j++) { dequant_data[j] = static_cast((quant_data[j] - zero_point) * scale); } } input_tensor->SetData(dequant_data); return RET_OK; } -kernel::LiteKernel *CpuScaleFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuScaleFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Scale); auto *weight_tensor = inputs.at(kWeightIndex); - auto *restore_data = weight_tensor->Data(); + auto *restore_data = weight_tensor->MutableData(); if (primitive->GetQuantType() == schema::QuantType_WeightQuant) { RestoreScaleWeight(inputs.at(kWeightIndex)); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scale.h b/mindspore/lite/src/runtime/kernel/arm/fp32/scale.h index 42b1daba5d..01b334f10b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scale.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scale.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class ScaleCPUKernel : public LiteKernel { public: - ScaleCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ScaleCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { scale_param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc index 04917fdcf2..1145680b15 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc @@ -46,12 +46,12 @@ int ScatterNDCPUKernel::ReSize() { auto indices = in_tensors_.at(kScatterIndicesIndex); auto update = in_tensors_.at(kScatterUpdateIndex); - update_ptr_ = reinterpret_cast(update->Data()); - output_ptr_ = reinterpret_cast(out_tensors_.at(0)->Data()); + update_ptr_ = reinterpret_cast(update->MutableData()); + output_ptr_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); // check indices shape auto shape_rank = shape->ElementsNum(); - auto shape_data = reinterpret_cast(shape->Data()); + auto shape_data = reinterpret_cast(shape->MutableData()); auto indice_unit_rank = indices->shape().back(); if (indice_unit_rank > shape_rank) { MS_LOG(ERROR) << "Value of last dimension of indices is greater than shape rank."; @@ -107,7 +107,7 @@ int ScatterNDCPUKernel::ReSize() { num_unit_ *= update_shape[i]; } - int *indices_ptr = reinterpret_cast(indices->Data()); + int *indices_ptr = reinterpret_cast(indices->MutableData()); for (int i = 0; i < num_unit_; i++) { int tmp_stride = 0; for (int j = 0; j < indice_unit_rank; j++) { @@ -162,10 +162,9 @@ int ScatterNDCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuScatterNDFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuScatterNDFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_ScatterND); if (opParameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.h b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.h index 48dd88f82f..b84110c9c2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class ScatterNDCPUKernel : public LiteKernel { public: - explicit ScatterNDCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit ScatterNDCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~ScatterNDCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc index 5c67b293b0..77f328c183 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc @@ -42,22 +42,21 @@ int ShapeCPUKernel::Run() { MS_LOG(ERROR) << "null pointer dereferencing."; return RET_ERROR; } - if (in_tensor->Data() == nullptr || out_tensor->Data() == nullptr) { + if (in_tensor->MutableData() == nullptr || out_tensor->MutableData() == nullptr) { MS_LOG(ERROR) << "null pointer dereferencing."; return RET_ERROR; } for (size_t i = 0; i < in_tensor->shape().size(); i++) { - reinterpret_cast(out_tensor->Data())[i] = in_tensor->shape()[i]; + reinterpret_cast(out_tensor->MutableData())[i] = in_tensor->shape()[i]; } return RET_OK; } -kernel::LiteKernel *CpuShapeFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuShapeFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Shape); if (opParameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.h b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.h index 087c6ed1e9..5e79144893 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class ShapeCPUKernel : public LiteKernel { public: - ShapeCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ShapeCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~ShapeCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/slice.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/slice.cc index bfe8c5acc4..d0f3322447 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/slice.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/slice.cc @@ -75,8 +75,8 @@ int SliceCPUKernel::Init() { } int SliceCPUKernel::SliceParallelRun(int thread_id) { - const float *input_data = reinterpret_cast(in_tensors_[0]->Data()); - float *output_data = reinterpret_cast(out_tensors_[0]->Data()); + const float *input_data = reinterpret_cast(in_tensors_[0]->MutableData()); + float *output_data = reinterpret_cast(out_tensors_[0]->MutableData()); SliceParameter *param = reinterpret_cast(op_parameter_); DoSlice(input_data, output_data, param, thread_id); return RET_OK; @@ -100,8 +100,8 @@ int SliceCPUKernel::Run() { PadSliceParameterTo4D(param); } - const float *input_data = reinterpret_cast(in_tensors_[0]->Data()); - float *output_data = reinterpret_cast(out_tensors_[0]->Data()); + const float *input_data = reinterpret_cast(in_tensors_[0]->MutableData()); + float *output_data = reinterpret_cast(out_tensors_[0]->MutableData()); if (param->size_[1] < param->op_parameter_.thread_num_) { DoSliceNoParallel(input_data, output_data, param); return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/slice.h b/mindspore/lite/src/runtime/kernel/arm/fp32/slice.h index 22f99dde09..15a876a91a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/slice.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/slice.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class SliceCPUKernel : public SliceBaseCPUKernel { public: - SliceCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + SliceCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : SliceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SliceCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.cc index 280bdada8e..3abff61d56 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.cc @@ -77,8 +77,8 @@ int SoftmaxCPUKernel::Run() { return RET_ERROR; } memset(sum_data_, 0, in_plane_size_ * out_plane_size_ * sizeof(float)); - auto input_ptr = reinterpret_cast(in_tensors_.at(kInputIndex)->Data()); - auto output_ptr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + auto input_ptr = reinterpret_cast(in_tensors_.at(kInputIndex)->MutableData()); + auto output_ptr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); Softmax(input_ptr, output_ptr, sum_data_, softmax_param_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.h b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.h index ed7ea27fa0..640c268e2f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class SoftmaxCPUKernel : public SoftmaxBaseCPUKernel { public: - SoftmaxCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + SoftmaxCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : SoftmaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive), sum_data_(nullptr) {} ~SoftmaxCPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc index 5bf1e4fa0f..dcccada248 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc @@ -40,7 +40,7 @@ size_t EnumElement(int *shape, int n_dims) { } return total; } -} +} // namespace int SpaceToBatchCPUKernel::Init() { SpaceToBatchParameter *param = reinterpret_cast(this->op_parameter_); @@ -73,7 +73,7 @@ void SpaceToBatchCPUKernel::FreeTmpBuffer() { } int SpaceToBatchCPUKernel::ReSize() { - if (in_tensors_[0]->GetFormat() != schema::Format_NHWC) { + if (in_tensors_[0]->GetFormat() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "space_to_batch only support NHWC now!"; return RET_FORMAT_ERR; } @@ -102,8 +102,7 @@ int SpaceToBatchCPUKernel::ReSize() { FreeTmpBuffer(); return RET_ERROR; } - pedding_input_ = - reinterpret_cast(context_->allocator->Malloc(num_elements_padded * sizeof(float))); + pedding_input_ = reinterpret_cast(context_->allocator->Malloc(num_elements_padded * sizeof(float))); if (pedding_input_ == nullptr) { MS_LOG(ERROR) << "malloc pedding buffer fail!"; return RET_ERROR; @@ -121,14 +120,14 @@ int SpaceToBatchCPUKernel::Run() { } auto input = in_tensors_[0]; auto output = out_tensors_[0]; - const float *input_ptr_ = reinterpret_cast(input->Data()); - float *output_ptr_ = reinterpret_cast(output->Data()); + const float *input_ptr_ = reinterpret_cast(input->MutableData()); + float *output_ptr_ = reinterpret_cast(output->MutableData()); SpaceToBatchParameter *param = reinterpret_cast(this->op_parameter_); auto in_shape = input->shape(); auto out_shape = output->shape(); if (param->need_paddings_) { - DoSpaceToBatchPaddingNHWC(input_ptr_, pedding_input_, in_shape.data(), param->paddings_, - padded_in_shape_.data(), pedding_h_data_, pedding_w_data_); + DoSpaceToBatchPaddingNHWC(input_ptr_, pedding_input_, in_shape.data(), param->paddings_, padded_in_shape_.data(), + pedding_h_data_, pedding_w_data_); DoSpaceToBatchNHWC(pedding_input_, output_ptr_, param, padded_in_shape_.data(), out_shape.data()); return RET_OK; } else { @@ -137,10 +136,9 @@ int SpaceToBatchCPUKernel::Run() { } } // namespace mindspore::kernel -kernel::LiteKernel *CpuSpaceToBatchFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *param, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuSpaceToBatchFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *param, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (param == nullptr) { MS_LOG(ERROR) << "Input param is nullptr!"; @@ -155,8 +153,8 @@ kernel::LiteKernel *CpuSpaceToBatchFp32KernelCreator(const std::vectorInit(); if (ret != RET_OK) { delete kernel; - MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast(param->type_)); + MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ + << ", type: " << schema::EnumNamePrimitiveType(static_cast(param->type_)); return nullptr; } return kernel; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.h b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.h index 2135d27c78..f84149c3ee 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.h @@ -22,8 +22,8 @@ namespace mindspore::kernel { class SpaceToBatchCPUKernel : public LiteKernel { public: - SpaceToBatchCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + SpaceToBatchCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc index e08f383894..d1dc8bafe3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc @@ -45,7 +45,7 @@ int SpaceToDepthCPUKernel::Init() { } int SpaceToDepthCPUKernel::ReSize() { - if (in_tensors_[0]->GetFormat() != schema::Format_NHWC) { + if (in_tensors_[0]->GetFormat() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "space_to_depth only support NHWC now!"; return RET_FORMAT_ERR; } @@ -90,9 +90,9 @@ int SpaceToDepthCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - input_ptr_ = reinterpret_cast(in_tensors_[0]->Data()); - output_ptr_ = reinterpret_cast(out_tensors_[0]->Data()); - if (in_tensors_[0]->GetFormat() == schema::Format_NHWC) { + input_ptr_ = reinterpret_cast(in_tensors_[0]->MutableData()); + output_ptr_ = reinterpret_cast(out_tensors_[0]->MutableData()); + if (in_tensors_[0]->GetFormat() == schema::Format::Format_NHWC) { ret = ParallelLaunch(THREAD_POOL_DEFAULT, SpaceToDepthRun, this, thread_h_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "SpaceToDepth error error_code[" << ret << "]"; @@ -106,8 +106,8 @@ int SpaceToDepthCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuSpaceToDepthFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuSpaceToDepthFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h index 3db95679a8..4c5da3db3d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class SpaceToDepthCPUKernel : public LiteKernel { public: - SpaceToDepthCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + SpaceToDepthCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~SpaceToDepthCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc index 3e4d0b92ca..1384d18ef2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc @@ -60,13 +60,13 @@ int SparseToDenseCPUKernel::Run() { auto input3 = in_tensors_.at(3); auto output0 = out_tensors_.at(0); - input_data_ = reinterpret_cast(input->Data()); - total_number_ = reinterpret_cast(input1->Data()); - snum_ = reinterpret_cast(input2->Data()); - dnum_ = reinterpret_cast(input3->Data()); + input_data_ = reinterpret_cast(input->MutableData()); + total_number_ = reinterpret_cast(input1->MutableData()); + snum_ = reinterpret_cast(input2->MutableData()); + dnum_ = reinterpret_cast(input3->MutableData()); sp_num_ = static_cast(input->ElementsNum() / 2); - output_data = reinterpret_cast(out_tensors_.at(0)->Data()); + output_data = reinterpret_cast(out_tensors_.at(0)->MutableData()); std::vector temp_shape = output0->shape(); output_shape_ = reinterpret_cast(temp_shape.data()); @@ -78,8 +78,8 @@ int SparseToDenseCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuSparseToDenseFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuSparseToDenseFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.h b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.h index 9dcfdc2a94..6f429760ac 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class SparseToDenseCPUKernel : public LiteKernel { public: - SparseToDenseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + SparseToDenseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { s2d_param_ = (reinterpret_cast(op_parameter_)); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/split.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/split.cc index cb56abae50..5f990ab98a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/split.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/split.cc @@ -79,9 +79,9 @@ int SplitCPUKernel::Run() { return RET_ERROR; } auto in_tensor = in_tensors_.front(); - input_ptr_ = reinterpret_cast(in_tensor->Data()); + input_ptr_ = reinterpret_cast(in_tensor->MutableData()); for (int i = 0; i < param->num_split_; i++) { - output_ptr_[i] = reinterpret_cast(out_tensors_.at(i)->Data()); + output_ptr_[i] = reinterpret_cast(out_tensors_.at(i)->MutableData()); } ret = ParallelLaunch(THREAD_POOL_DEFAULT, SplitRun, this, thread_n_num_); if (ret != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/split.h b/mindspore/lite/src/runtime/kernel/arm/fp32/split.h index 90be466618..7191b93635 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/split.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/split.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class SplitCPUKernel : public SplitBaseCPUKernel { public: - SplitCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + SplitCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : SplitBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SplitCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc index f666a4db29..9836960533 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc @@ -40,12 +40,12 @@ int SqueezeCPUKernel::Run() { size_t data_size = in_tensors_.front()->Size(); if (in_tensors_.front()->data_type() == kNumberTypeInt32) { - auto input_ptr = reinterpret_cast(in_tensors_.front()->Data()); - auto output_ptr = reinterpret_cast(out_tensors_.front()->Data()); + auto input_ptr = reinterpret_cast(in_tensors_.front()->MutableData()); + auto output_ptr = reinterpret_cast(out_tensors_.front()->MutableData()); ret = DoSqueezeInt32(input_ptr, output_ptr, data_size); } else { - auto input_ptr = reinterpret_cast(in_tensors_.front()->Data()); - auto output_ptr = reinterpret_cast(out_tensors_.front()->Data()); + auto input_ptr = reinterpret_cast(in_tensors_.front()->MutableData()); + auto output_ptr = reinterpret_cast(out_tensors_.front()->MutableData()); ret = DoSqueeze(input_ptr, output_ptr, data_size); } @@ -56,10 +56,9 @@ int SqueezeCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuSqueezeFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuSqueezeFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Squeeze); if (parameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.h b/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.h index ce6ffaa74f..1c381b3ec3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class SqueezeCPUKernel : public LiteKernel { public: - explicit SqueezeCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit SqueezeCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~SqueezeCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc index 72ba9a7127..04eedd8e19 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc @@ -50,33 +50,32 @@ int StackCPUKernel::Run() { size_t inputs_num = in_tensors_.size(); auto input0 = in_tensors_[0]; if (inputs_num == 1) { - auto *output_data = reinterpret_cast(out_tensors_[0]->Data()); - DoStackOneInput(reinterpret_cast(input0->Data()), output_data, input0->Size()); + auto *output_data = reinterpret_cast(out_tensors_[0]->MutableData()); + DoStackOneInput(reinterpret_cast(input0->MutableData()), output_data, input0->Size()); return RET_OK; } auto input0_shape = in_tensors_[0]->shape(); if (in_tensors_[0]->data_type() == kNumberTypeFloat32 || in_tensors_[0]->data_type() == kNumberTypeFloat) { - auto *output_data = reinterpret_cast(out_tensors_[0]->Data()); + auto *output_data = reinterpret_cast(out_tensors_[0]->MutableData()); float *inputs[inputs_num]; for (size_t i = 0; i < inputs_num; ++i) { - inputs[i] = reinterpret_cast(in_tensors_[i]->Data()); + inputs[i] = reinterpret_cast(in_tensors_[i]->MutableData()); } DoStack(inputs, inputs_num, input0_shape.data(), input0_shape.size(), axis_, output_data); } else { - auto *output_data = reinterpret_cast(out_tensors_[0]->Data()); + auto *output_data = reinterpret_cast(out_tensors_[0]->MutableData()); int32_t *inputs[inputs_num]; for (size_t i = 0; i < inputs_num; ++i) { - inputs[i] = reinterpret_cast(in_tensors_[i]->Data()); + inputs[i] = reinterpret_cast(in_tensors_[i]->MutableData()); } DoStackInt32(inputs, inputs_num, input0_shape.data(), input0_shape.size(), axis_, output_data); } return RET_OK; } -kernel::LiteKernel *CpuStackFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuStackFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *op_parameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/stack.h b/mindspore/lite/src/runtime/kernel/arm/fp32/stack.h index 77a4f64f8a..70fbb75989 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/stack.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/stack.h @@ -22,8 +22,8 @@ namespace mindspore::kernel { class StackCPUKernel : public LiteKernel { public: - StackCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + StackCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc index 73a1ac797c..1c62aaee7c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc @@ -56,15 +56,15 @@ int TileCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto input_addr = reinterpret_cast(in_tensors_.at(0)->Data()); - auto output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto input_addr = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto output_addr = reinterpret_cast(out_tensors_.at(0)->MutableData()); Tile(input_addr, output_addr, reinterpret_cast(op_parameter_)); return RET_OK; } -kernel::LiteKernel *CpuTileFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, OpParameter *parameter, +kernel::LiteKernel *CpuTileFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.h b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.h index 49e9d93e7a..74a1377780 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class TileCPUKernel : public LiteKernel { public: - explicit TileCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit TileCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~TileCPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc index 1ce3ef0df1..dd64072941 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc @@ -34,7 +34,7 @@ int TopKCPUKernel::Init() { } int TopKCPUKernel::ReSize() { - lite::tensor::Tensor *input = in_tensors_.at(0); + lite::Tensor *input = in_tensors_.at(0); TopkParameter *parameter = reinterpret_cast(op_parameter_); parameter->last_dim_size_ = input->shape()[input->shape().size() - 1]; parameter->loop_num_ = 1; @@ -50,9 +50,9 @@ int TopKCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input_data = reinterpret_cast(in_tensors_.at(0)->Data()); - auto output_data = reinterpret_cast(out_tensors_.at(0)->Data()); - auto output_index = reinterpret_cast(out_tensors_.at(1)->Data()); + auto input_data = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto output_data = reinterpret_cast(out_tensors_.at(0)->MutableData()); + auto output_index = reinterpret_cast(out_tensors_.at(1)->MutableData()); MS_ASSERT(context_->allocator != nullptr); TopkParameter *parameter = reinterpret_cast(op_parameter_); @@ -66,8 +66,8 @@ int TopKCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuTopKFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, OpParameter *parameter, +kernel::LiteKernel *CpuTopKFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/topk.h b/mindspore/lite/src/runtime/kernel/arm/fp32/topk.h index 9a8d0785cc..8bd4288b19 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/topk.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/topk.h @@ -23,12 +23,11 @@ namespace mindspore::kernel { class TopKCPUKernel : public LiteKernel { public: - explicit TopKCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit TopKCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} - ~TopKCPUKernel() override { - } + ~TopKCPUKernel() override {} int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc index 283906e3d3..702046c65f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc @@ -53,9 +53,28 @@ int TransposeCPUKernel::ReSize() { param->strides_[i] = in_shape[i + 1] * param->strides_[i + 1]; param->out_strides_[i] = out_shape[i + 1] * param->out_strides_[i + 1]; } + if (this->in_shape_ != nullptr) { + free(this->in_shape_); + } + if (this->out_shape_ != nullptr) { + free(this->out_shape_); + } + in_shape_ = reinterpret_cast(malloc(in_shape.size() * sizeof(int))); + out_shape_ = reinterpret_cast(malloc(out_shape.size() * sizeof(int))); + memcpy(in_shape_, in_shape.data(), in_shape.size() * sizeof(int)); + memcpy(out_shape_, out_shape.data(), in_shape.size() * sizeof(int)); return RET_OK; } +TransposeCPUKernel::~TransposeCPUKernel() { + if (this->in_shape_ != nullptr) { + free(this->in_shape_); + } + if (this->out_shape_ != nullptr) { + free(this->out_shape_); + } +} + int TransposeCPUKernel::TransposeParallel(int task_id) { int num_unit_thread = MSMIN(thread_h_stride_, num_unit_ - task_id * thread_h_stride_); if (num_unit_thread <= 0) { @@ -96,10 +115,8 @@ int TransposeCPUKernel::Run() { MS_LOG(ERROR) << "null pointer dreferencing."; return RET_ERROR; } - in_data_ = reinterpret_cast(in_tensor->Data()); - out_data_ = reinterpret_cast(out_tensor->Data()); - in_shape_ = const_cast(in_tensor->shape().data()); - out_shape_ = const_cast(out_tensor->shape().data()); + in_data_ = reinterpret_cast(in_tensor->MutableData()); + out_data_ = reinterpret_cast(out_tensor->MutableData()); ret = ParallelLaunch(THREAD_POOL_DEFAULT, TransposeRun, this, thread_h_num_); if (ret != RET_OK) { @@ -109,10 +126,9 @@ int TransposeCPUKernel::Run() { return ret; } // namespace mindspore::kernel -kernel::LiteKernel *CpuTransposeFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuTransposeFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Transpose); if (opParameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.h b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.h index 0fcd67f789..77746ef9de 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.h @@ -26,11 +26,11 @@ namespace mindspore::kernel { class TransposeCPUKernel : public LiteKernel { public: - explicit TransposeCPUKernel(OpParameter *param, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit TransposeCPUKernel(OpParameter *param, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(param, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) {} - ~TransposeCPUKernel() override = default; + : LiteKernel(param, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) {} + ~TransposeCPUKernel() override; int Init() override; int ReSize() override; @@ -44,8 +44,8 @@ class TransposeCPUKernel : public LiteKernel { int num_unit_; float *in_data_; float *out_data_; - int *in_shape_; - int *out_shape_; + int *in_shape_ = nullptr; + int *out_shape_ = nullptr; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc index b947b522cf..5e1306b87f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc @@ -33,9 +33,9 @@ int UniqueCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input = reinterpret_cast(in_tensors_.at(0)->Data()); - auto output0 = reinterpret_cast(out_tensors_.at(0)->Data()); - auto output1 = reinterpret_cast(out_tensors_.at(1)->Data()); + auto input = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto output0 = reinterpret_cast(out_tensors_.at(0)->MutableData()); + auto output1 = reinterpret_cast(out_tensors_.at(1)->MutableData()); int output0_len = 0; Unique(input, in_tensors_.at(0)->ElementsNum(), output0, &output0_len, output1); @@ -46,9 +46,9 @@ int UniqueCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuUniqueFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, +kernel::LiteKernel *CpuUniqueFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, + const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(parameter); MS_ASSERT(desc.type == PrimitiveType_Unique); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unique.h b/mindspore/lite/src/runtime/kernel/arm/fp32/unique.h index d9072cd7bc..7f3530cd7e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unique.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unique.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class UniqueCPUKernel : public LiteKernel { public: - UniqueCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + UniqueCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~UniqueCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc index 496c8e3f8a..8ec52e594b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc @@ -71,8 +71,8 @@ int UnsqueezeCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - in_ptr_ = reinterpret_cast(in_tensors_.at(0)->Data()); - out_ptr_ = reinterpret_cast(out_tensors_.at(0)->Data()); + in_ptr_ = reinterpret_cast(in_tensors_.at(0)->MutableData()); + out_ptr_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); ret = ParallelLaunch(THREAD_POOL_DEFAULT, UnsqueezeRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "UnsqueezeRun error error_code[" << ret << "]"; @@ -81,10 +81,9 @@ int UnsqueezeCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuUnsqueezeFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuUnsqueezeFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(parameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Unsqueeze); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.h b/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.h index 119662a1e5..9c487590a1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.h @@ -26,8 +26,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class UnsqueezeCPUKernel : public LiteKernel { public: - UnsqueezeCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + UnsqueezeCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~UnsqueezeCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.cc index e12754a640..7a0608dc70 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.cc @@ -69,18 +69,18 @@ int UnstackCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - float *input = reinterpret_cast(in_tensors_.at(0)->Data()); + float *input = reinterpret_cast(in_tensors_.at(0)->MutableData()); size_t out_num = out_tensors_.size(); for (size_t i = 0; i < out_num; i++) { - output_addr_array_[i] = reinterpret_cast(out_tensors_.at(i)->Data()); + output_addr_array_[i] = reinterpret_cast(out_tensors_.at(i)->MutableData()); } Unistack(input, output_addr_array_, reinterpret_cast(op_parameter_)); return RET_OK; } -kernel::LiteKernel *CpuUnstackFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, +kernel::LiteKernel *CpuUnstackFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, + const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(parameter != nullptr); MS_ASSERT(desc.type == PrimitiveType_Unstack); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.h b/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.h index 4280afaf4c..ad07753547 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class UnstackCPUKernel : public LiteKernel { public: - UnstackCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + UnstackCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~UnstackCPUKernel() { free(output_addr_array_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/where.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/where.cc index 3a35179a20..232664158c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/where.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/where.cc @@ -60,10 +60,10 @@ int WhereCPUKernel::Run() { int num1_ = input1->ElementsNum(); int num2_ = input2->ElementsNum(); - input_data = reinterpret_cast(input->Data()); - input_data1 = reinterpret_cast(input1->Data()); - input_data2 = reinterpret_cast(input2->Data()); - output_data = reinterpret_cast(out_tensors_.at(0)->Data()); + input_data = reinterpret_cast(input->MutableData()); + input_data1 = reinterpret_cast(input1->MutableData()); + input_data2 = reinterpret_cast(input2->MutableData()); + output_data = reinterpret_cast(out_tensors_.at(0)->MutableData()); int num_max = num > num1_ ? num : (num1_ > num2_ ? num1_ : num2_); where_param_->num_ = num; where_param_->num1_ = num1_; @@ -87,10 +87,9 @@ int WhereCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuWhereFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuWhereFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/where.h b/mindspore/lite/src/runtime/kernel/arm/fp32/where.h index 0b704a87b9..8129fd41cb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/where.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/where.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class WhereCPUKernel : public LiteKernel { public: - WhereCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + WhereCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { where_param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc index a11f9ab864..9aa22abc0c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc @@ -36,16 +36,15 @@ int ZerosLikeCPUKernel::Run() { return RET_ERROR; } auto input = in_tensors_.at(0); - auto input_data = reinterpret_cast(input->Data()); - auto output_data = reinterpret_cast(out_tensors_.at(0)->Data()); + auto input_data = reinterpret_cast(input->MutableData()); + auto output_data = reinterpret_cast(out_tensors_.at(0)->MutableData()); ApproximateZerosLike(input_data, output_data, input->ElementsNum()); return RET_OK; } -kernel::LiteKernel *CpuZerosLikeFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuZerosLikeFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "input opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.h b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.h index 92ac4b8202..0c9cabb39a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.h @@ -22,8 +22,8 @@ namespace mindspore::kernel { class ZerosLikeCPUKernel : public LiteKernel { public: - ZerosLikeCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ZerosLikeCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc index 0e15719b89..898a4e6afa 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc @@ -37,9 +37,9 @@ int ActivationGradCPUKernel::Init() { return RET_OK; } int ActivationGradCPUKernel::ReSize() { return RET_OK; } int ActivationGradCPUKernel::DoActivation(int task_id) { - auto yt_addr = reinterpret_cast(in_tensors_.at(0)->Data()); - auto input_addr = reinterpret_cast(in_tensors_.at(1)->Data()); - auto output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto yt_addr = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto input_addr = reinterpret_cast(in_tensors_.at(1)->MutableData()); + auto output_addr = reinterpret_cast(out_tensors_.at(0)->MutableData()); int length = in_tensors_.at(0)->ElementsNum(); auto error_code = RET_OK; @@ -93,8 +93,8 @@ int ActivationGradCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuActivationGradFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuActivationGradFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h index 7f001e7109..920d034315 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class ActivationGradCPUKernel : public LiteKernel { public: - explicit ActivationGradCPUKernel(OpParameter *param, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit ActivationGradCPUKernel(OpParameter *param, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(param, inputs, outputs, ctx, primitive) { param_act_grad_ = reinterpret_cast(param); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc index eb6c316d68..4c88b3e7ca 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc @@ -38,16 +38,15 @@ int ApplyMomentumCPUKernel::Run() { return prepare_ret; } - auto weight = reinterpret_cast(in_tensors_[0]->Data()); - auto accumulate = reinterpret_cast(in_tensors_[1]->Data()); - float learning_rate = reinterpret_cast(in_tensors_[2]->Data())[0]; - auto gradient = reinterpret_cast(in_tensors_[3]->Data()); - float moment = reinterpret_cast(in_tensors_[4]->Data())[0]; + auto weight = reinterpret_cast(in_tensors_[0]->MutableData()); + auto accumulate = reinterpret_cast(in_tensors_[1]->MutableData()); + float learning_rate = reinterpret_cast(in_tensors_[2]->MutableData())[0]; + auto gradient = reinterpret_cast(in_tensors_[3]->MutableData()); + float moment = reinterpret_cast(in_tensors_[4]->MutableData())[0]; size_t elem_num = in_tensors_[0]->ElementsNum(); // align format - if (in_tensors_[3]->shape().size() == 4 && - in_tensors_[3]->GetFormat() == schema::Format_NCHW && + if (in_tensors_[3]->shape().size() == 4 && in_tensors_[3]->GetFormat() == schema::Format_NCHW && in_tensors_[0]->GetFormat() == schema::Format_KHWC) { PackNCHWToNHWCFp32(gradient, workspace, in_tensors_[0]->Batch(), in_tensors_[0]->Height() * in_tensors_[0]->Width(), in_tensors_[0]->Channel()); @@ -65,8 +64,8 @@ int ApplyMomentumCPUKernel::Run() { int ApplyMomentumCPUKernel::Init() { // Only for test with uninitialized Data size_t elem_num = in_tensors_[0]->ElementsNum(); - auto accumulate = reinterpret_cast(in_tensors_[1]->Data()); - for (size_t i =0; i < elem_num; i++) accumulate[i] = 0.0; + auto accumulate = reinterpret_cast(in_tensors_[1]->MutableData()); + for (int i = 0; i < elem_num; i++) accumulate[i] = 0.0; workspace = new float[elem_num]; return 0; @@ -83,10 +82,11 @@ OpParameter *PopulateApplyMomentumParameter(const lite::Primitive *primitive) { } #endif -kernel::LiteKernel *CpuApplyMomentumFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::PrimitiveC *primitive) { +kernel::LiteKernel *CpuApplyMomentumFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, + OpParameter *opParameter, const lite::Context *ctx, + const kernel::KernelKey &desc, + const lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_ApplyMomentum); auto *kernel = new (std::nothrow) ApplyMomentumCPUKernel(opParameter, inputs, outputs, ctx, primitive); MS_ASSERT(kernel != nullptr); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.h index c2d9f6ed31..f688fb3fcb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.h @@ -24,18 +24,18 @@ namespace mindspore::kernel { class ApplyMomentumCPUKernel : public LiteKernel { public: - explicit ApplyMomentumCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, - const mindspore::lite::PrimitiveC *primitive) + explicit ApplyMomentumCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} - ~ApplyMomentumCPUKernel() override {delete [] workspace;} + ~ApplyMomentumCPUKernel() override { delete[] workspace; } int Init() override; int ReSize() override; int Run() override; private: - float *workspace; + float *workspace; }; // OpParameter *PopulateApplyMomentumParameter(const lite::Primitive *primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc index 8901c459c2..d23304f09e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc @@ -112,16 +112,16 @@ void ArithmeticGradCPUKernel::ArithmeticGradSub(float *dy, int dy_size, float *d void ArithmeticGradCPUKernel::ArithmeticGradMul(float *dy, int dy_size, float *dx1, int dx1_size, float *dx2, int dx2_size) { - auto x1_data = reinterpret_cast(in_tensors_[1]->Data()); - auto x2_data = reinterpret_cast(in_tensors_[2]->Data()); + auto x1_data = reinterpret_cast(in_tensors_[1]->MutableData()); + auto x2_data = reinterpret_cast(in_tensors_[2]->MutableData()); ElementMul(dy, x1_data, dx2, dy_size); ElementMul(dy, x2_data, dx1, dy_size); } void ArithmeticGradCPUKernel::ArithmeticGradMul1L(float *dy, int dy_size, float *dx1, int dx1_size, float *dx2, int dx2_size) { - auto x1_data = reinterpret_cast(in_tensors_[1]->Data()); - auto x2_data = reinterpret_cast(in_tensors_[2]->Data()); + auto x1_data = reinterpret_cast(in_tensors_[1]->MutableData()); + auto x2_data = reinterpret_cast(in_tensors_[2]->MutableData()); ElementMul(dy, x1_data, tile_data0, dy_size); ReduceSumByAxes(tile_data0, arithmeticParameter_->in_shape0_, dx2, arithmeticParameter_->in_shape1_, arithmeticParameter_->ndim_); @@ -131,8 +131,8 @@ void ArithmeticGradCPUKernel::ArithmeticGradMul1L(float *dy, int dy_size, float void ArithmeticGradCPUKernel::ArithmeticGradMul2L(float *dy, int dy_size, float *dx1, int dx1_size, float *dx2, int dx2_size) { - auto x1_data = reinterpret_cast(in_tensors_[1]->Data()); - auto x2_data = reinterpret_cast(in_tensors_[2]->Data()); + auto x1_data = reinterpret_cast(in_tensors_[1]->MutableData()); + auto x2_data = reinterpret_cast(in_tensors_[2]->MutableData()); ElementMul(dy, x2_data, tile_data0, dy_size); ReduceSumByAxes(tile_data0, arithmeticParameter_->in_shape0_, dx1, arithmeticParameter_->in_shape1_, arithmeticParameter_->ndim_); @@ -142,16 +142,16 @@ void ArithmeticGradCPUKernel::ArithmeticGradMul2L(float *dy, int dy_size, float void ArithmeticGradCPUKernel::ArithmeticGradDiv(float *dy, int dy_size, float *dx1, int dx1_size, float *dx2, int dx2_size) { - auto x1 = reinterpret_cast(in_tensors_[1]->Data()); - auto x2 = reinterpret_cast(in_tensors_[2]->Data()); + auto x1 = reinterpret_cast(in_tensors_[1]->MutableData()); + auto x2 = reinterpret_cast(in_tensors_[2]->MutableData()); ElementDiv(dy, x2, dx1, dy_size); ElementMulAndDivNegSquare(dy, x1, x2, dx2, dy_size); } void ArithmeticGradCPUKernel::ArithmeticGradDiv1L(float *dy, int dy_size, float *dx1, int dx1_size, float *dx2, int dx2_size) { - auto x1_data = reinterpret_cast(in_tensors_[1]->Data()); - auto x2_data = reinterpret_cast(in_tensors_[2]->Data()); + auto x1_data = reinterpret_cast(in_tensors_[1]->MutableData()); + auto x2_data = reinterpret_cast(in_tensors_[2]->MutableData()); ElementMul(x2_data, x2_data, dx2, dx2_size); ElementMul(x1_data, dy, dx1, dy_size); // use dx1 buffer @@ -168,8 +168,8 @@ void ArithmeticGradCPUKernel::ArithmeticGradDiv1L(float *dy, int dy_size, float void ArithmeticGradCPUKernel::ArithmeticGradDiv2L(float *dy, int dy_size, float *dx1, int dx1_size, float *dx2, int dx2_size) { - auto x1_data = reinterpret_cast(in_tensors_[1]->Data()); - auto x2_data = reinterpret_cast(in_tensors_[2]->Data()); + auto x1_data = reinterpret_cast(in_tensors_[1]->MutableData()); + auto x2_data = reinterpret_cast(in_tensors_[2]->MutableData()); // dx1 = dy/x2 ElementDiv(dy, x2_data, tile_data0, dy_size); // first multiply into temp @@ -184,9 +184,9 @@ void ArithmeticGradCPUKernel::ArithmeticGradDiv2L(float *dy, int dy_size, float int ArithmeticGradCPUKernel::ReSize() { return RET_OK; } int ArithmeticGradCPUKernel::Run() { - auto dy = reinterpret_cast(in_tensors_[0]->Data()); - auto dx1 = reinterpret_cast(out_tensors_[0]->Data()); - auto dx2 = reinterpret_cast(out_tensors_[1]->Data()); + auto dy = reinterpret_cast(in_tensors_[0]->MutableData()); + auto dx1 = reinterpret_cast(out_tensors_[0]->MutableData()); + auto dx2 = reinterpret_cast(out_tensors_[1]->MutableData()); size_t dy_size = in_tensors_.at(0)->ElementsNum(); size_t dx1_size = out_tensors_.at(0)->ElementsNum(); @@ -195,8 +195,8 @@ int ArithmeticGradCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuArithmeticGradFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuArithmeticGradFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h index f5548c05e5..570fae8582 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h @@ -36,8 +36,8 @@ class ArithmeticGradCPUKernel : public LiteKernel { typedef void (ArithmeticGradCPUKernel::*ArithmeticGradOperation)(float *, int, float *, int, float *, int); public: - explicit ArithmeticGradCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit ArithmeticGradCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), tile_data0(NULL), tile_data1(NULL), tile_data2(NULL) { switch (Type()) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc index f6f914c34e..0265a26b42 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc @@ -51,8 +51,8 @@ int BiasGradCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto in = reinterpret_cast(in_tensors_.at(0)->Data()); - auto out = reinterpret_cast(out_tensors_.at(0)->Data()); + auto in = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto out = reinterpret_cast(out_tensors_.at(0)->MutableData()); size_t nhw_size = 1; size_t channels = bias_param->in_shape0_[bias_param->ndim_ - 1]; // C in NHWC @@ -69,10 +69,9 @@ int BiasGradCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuBiasGradFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuBiasGradFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_BiasGrad); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h index 99427556f9..e7a8b40a0b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class BiasGradCPUKernel : public LiteKernel { public: - explicit BiasGradCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit BiasGradCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { bias_param = reinterpret_cast(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc index 6aba3fe8c6..00d57b65f9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc @@ -93,12 +93,12 @@ int BNGradCPUKernel::Run() { float *variance_delta = mean_delta + channels; float *mean_add_delta = variance_delta + channels; - float *x = reinterpret_cast(input_x->Data()); - float *yt = reinterpret_cast(input_yt->Data()); - float *scale = reinterpret_cast(input_scale->Data()); - float *dx = reinterpret_cast(output_dx->Data()); - float *dscale = reinterpret_cast(output_scale->Data()); - float *dbias = reinterpret_cast(output_bias->Data()); + float *x = reinterpret_cast(input_x->MutableData()); + float *yt = reinterpret_cast(input_yt->MutableData()); + float *scale = reinterpret_cast(input_scale->MutableData()); + float *dx = reinterpret_cast(output_dx->MutableData()); + float *dscale = reinterpret_cast(output_scale->MutableData()); + float *dbias = reinterpret_cast(output_bias->MutableData()); std::copy(yt, yt + batch * channels * spatial, dx); meanVar(x, batch, spatial, channels, eps, mean, invar); @@ -114,10 +114,9 @@ int BNGradCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuBNGradFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuBNGradFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_BNGrad); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h index b0aecc7b31..c01ade2bf9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h @@ -21,16 +21,13 @@ #include "src/lite_kernel.h" #include "ir/anf.h" - namespace mindspore::kernel { - - class BNGradCPUKernel : public LiteKernel { public: - explicit BNGradCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, - const mindspore::lite::PrimitiveC *primitive) + explicit BNGradCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~BNGradCPUKernel() override { delete workspace; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc index 30c4294ad7..b27a264ef7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc @@ -61,9 +61,9 @@ int ConvolutionTrainCPUKernel::Run() { auto *input_w = in_tensors_.at(kWeightIndex); auto *out_y = out_tensors_.at(kOutputIndex); - auto x_addr = reinterpret_cast(input_x->Data()); - auto y_addr = reinterpret_cast(out_y->Data()); - auto w_addr = reinterpret_cast(input_w->Data()); + auto x_addr = reinterpret_cast(input_x->MutableData()); + auto y_addr = reinterpret_cast(out_y->MutableData()); + auto w_addr = reinterpret_cast(input_w->MutableData()); int i, j; int nweights = input_w->ElementsNum(); @@ -98,10 +98,10 @@ int ConvolutionTrainCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuConvTrainFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::PrimitiveC *primitive) { +kernel::LiteKernel *CpuConvTrainFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, + const lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Conv2D); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h index 5a44c11e3a..f5947b324f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h @@ -24,11 +24,11 @@ namespace mindspore::kernel { class ConvolutionTrainCPUKernel : public LiteKernel { public: - explicit ConvolutionTrainCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, - const lite::PrimitiveC *primitive) + explicit ConvolutionTrainCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, + const lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} - ~ConvolutionTrainCPUKernel() override { delete [] workspace; } + ~ConvolutionTrainCPUKernel() override { delete[] workspace; } int Init() override; int ReSize() override; @@ -38,10 +38,10 @@ class ConvolutionTrainCPUKernel : public LiteKernel { float *workspace; }; -kernel::LiteKernel *CpuConvTrainFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::PrimitiveC *primitive); +kernel::LiteKernel *CpuConvTrainFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, + const lite::PrimitiveC *primitive); } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_CONVOLUTION_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc index 95ee076756..3134deec3c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc @@ -78,9 +78,9 @@ int ConvolutionGradFilterCPUKernel::Run() { auto *input_x = in_tensors_.at(1); auto *out_dw = out_tensors_.at(0); - auto x_addr = reinterpret_cast(input_x->Data()); - auto dy_addr = reinterpret_cast(input_dy->Data()); - auto dw_addr = reinterpret_cast(out_dw->Data()); + auto x_addr = reinterpret_cast(input_x->MutableData()); + auto dy_addr = reinterpret_cast(input_dy->MutableData()); + auto dw_addr = reinterpret_cast(out_dw->MutableData()); int i, j; int nweights = out_dw->ElementsNum(); @@ -159,8 +159,8 @@ OpParameter *PopulateConvolutionGradFilterParameter(const lite::Primitive *primi return reinterpret_cast(param); } #endif -kernel::LiteKernel *CpuConvGradFilterFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuConvGradFilterFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h index efc1cb6604..5d1da23efc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h @@ -1,4 +1,4 @@ - /** +/** * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,11 +24,11 @@ namespace mindspore::kernel { class ConvolutionGradFilterCPUKernel : public LiteKernel { public: - explicit ConvolutionGradFilterCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit ConvolutionGradFilterCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} - ~ConvolutionGradFilterCPUKernel() override { delete [] workspace; } + ~ConvolutionGradFilterCPUKernel() override { delete[] workspace; } int Init() override; int ReSize() override; @@ -38,7 +38,6 @@ class ConvolutionGradFilterCPUKernel : public LiteKernel { float *workspace = nullptr; }; - } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_CONVOLUTION_GRAD_FILTER_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc index 0d2a4faf25..d31a14bcbb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc @@ -76,9 +76,9 @@ int ConvolutionGradInputCPUKernel::Run() { auto *input_w = in_tensors_.at(1); auto *out_dx = out_tensors_.at(0); - auto dy_addr = reinterpret_cast(input_dy->Data()); - auto w_addr = reinterpret_cast(input_w->Data()); - auto dx_addr = reinterpret_cast(out_dx->Data()); + auto dy_addr = reinterpret_cast(input_dy->MutableData()); + auto w_addr = reinterpret_cast(input_w->MutableData()); + auto dx_addr = reinterpret_cast(out_dx->MutableData()); int i, j; int nweights = input_w->ElementsNum(); @@ -156,8 +156,8 @@ OpParameter *PopulateConvolutionGradInputParameter(const lite::Primitive *primit } #endif -kernel::LiteKernel *CpuConvGradInputFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuConvGradInputFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h index 93930fded8..3c091f3e82 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h @@ -24,11 +24,11 @@ namespace mindspore::kernel { class ConvolutionGradInputCPUKernel : public LiteKernel { public: - explicit ConvolutionGradInputCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit ConvolutionGradInputCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} - ~ConvolutionGradInputCPUKernel() override { delete [] workspace; } + ~ConvolutionGradInputCPUKernel() override { delete[] workspace; } int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/depend.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/depend.cc index a9bc417462..811c43c2d7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/depend.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/depend.cc @@ -28,9 +28,7 @@ using mindspore::schema::PrimitiveType_Depend; namespace mindspore::kernel { -int DependCPUKernel::Init() { - return RET_OK; -} +int DependCPUKernel::Init() { return RET_OK; } int DependCPUKernel::ReSize() { return 0; } @@ -41,22 +39,21 @@ int DependCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto in = reinterpret_cast(in_tensors_.at(0)->Data()); - auto out = reinterpret_cast(out_tensors_.at(0)->Data()); + auto in = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto out = reinterpret_cast(out_tensors_.at(0)->MutableData()); memcpy(out, in, in_tensors_.at(0)->Size()); #endif return RET_OK; } -kernel::LiteKernel *CpuDependFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::PrimitiveC *primitive) { +kernel::LiteKernel *CpuDependFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, + const lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Depend); - auto *kernel = - new (std::nothrow) DependCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) DependCPUKernel(opParameter, inputs, outputs, ctx, primitive); MS_ASSERT(kernel != nullptr); auto ret = kernel->Init(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/depend.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/depend.h index 2b222ecbaf..3b32a3804b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/depend.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/depend.h @@ -26,9 +26,9 @@ namespace mindspore::kernel { class DependCPUKernel : public LiteKernel { public: - explicit DependCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, - const lite::PrimitiveC *primitive) + explicit DependCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, + const lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param = parameter; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/make_tuple.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/make_tuple.h index bbbc22f902..176ea09858 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/make_tuple.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/make_tuple.h @@ -26,9 +26,9 @@ namespace mindspore::kernel { class MakeTupleCPUKernel : public LiteKernel { public: - explicit MakeTupleCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + explicit MakeTupleCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, + const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param = parameter; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc index 9c6c4b90fd..ccee7786e6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc @@ -73,12 +73,12 @@ int PoolingGradCPUKernel::Run() { return prepare_ret; } PoolingParameter *pool_param = reinterpret_cast(op_parameter_); - auto input_ptr = reinterpret_cast(in_tensors_.at(0)->Data()); - auto output_ptr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto input_ptr = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto output_ptr = reinterpret_cast(out_tensors_.at(0)->MutableData()); if (pool_param->pool_mode_ == PoolMode_MaxPool) { - auto dx_ptr = reinterpret_cast(in_tensors_.at(1)->Data()); - auto dy_ptr = reinterpret_cast(in_tensors_.at(2)->Data()); + auto dx_ptr = reinterpret_cast(in_tensors_.at(1)->MutableData()); + auto dy_ptr = reinterpret_cast(in_tensors_.at(2)->MutableData()); MaxPoolingGrad(input_ptr, dx_ptr, dy_ptr, output_ptr, pool_param); } else { AvgPoolingGrad(input_ptr, output_ptr, pool_param); @@ -86,8 +86,8 @@ int PoolingGradCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuPoolingGradFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuPoolingGradFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h index 7b26658dce..ae97ba28e7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h @@ -29,13 +29,12 @@ using mindspore::schema::RoundMode; class PoolingGradCPUKernel : public LiteKernel { public: - explicit PoolingGradCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit PoolingGradCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~PoolingGradCPUKernel() override = default; - int Init() override; int ReSize() override; int Run() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc index 0b5ba98281..2bef37ba1e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc @@ -31,9 +31,9 @@ int PowerGradCPUKernel::Init() { return RET_OK; } int PowerGradCPUKernel::ReSize() { return RET_OK; } int PowerGradCPUKernel::Run() { - auto dy_addr = reinterpret_cast(in_tensors_.at(0)->Data()); - auto x_addr = reinterpret_cast(in_tensors_.at(1)->Data()); - auto dx_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto dy_addr = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto x_addr = reinterpret_cast(in_tensors_.at(1)->MutableData()); + auto dx_addr = reinterpret_cast(out_tensors_.at(0)->MutableData()); auto size = in_tensors_.at(0)->ElementsNum(); float exp = power_ - 1; @@ -47,11 +47,9 @@ int PowerGradCPUKernel::Run() { return RET_OK; } - -kernel::LiteKernel *CpuPowerGradFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuPowerGradFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_PowerGrad); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h index 5d4104d5e3..16a46e99f3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class PowerGradCPUKernel : public LiteKernel { public: - PowerGradCPUKernel(OpParameter *param, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + PowerGradCPUKernel(OpParameter *param, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(param, inputs, outputs, ctx, primitive) { PowerParameter *power_param = reinterpret_cast(param); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc index a19be8f550..3b03661df7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc @@ -81,12 +81,12 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Run() { return ret; } - auto ins = reinterpret_cast(in_tensors_.at(0)->Data()); - auto labels = reinterpret_cast(in_tensors_.at(1)->Data()); - float *out = reinterpret_cast(out_tensors_.at(0)->Data()); + auto ins = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto labels = reinterpret_cast(in_tensors_.at(1)->MutableData()); + float *out = reinterpret_cast(out_tensors_.at(0)->MutableData()); float *grads = NULL; if (is_train() && out_tensors_.size() > 1) { - grads = reinterpret_cast(out_tensors_.at(1)->Data()); + grads = reinterpret_cast(out_tensors_.at(1)->MutableData()); } size_t data_size = in_tensors_.at(0)->ElementsNum(); float *losses = new (std::nothrow) float[data_size]; @@ -143,8 +143,8 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Init() { return RET_OK; } -kernel::LiteKernel *CpuSoftmaxCrossEntropyFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuSoftmaxCrossEntropyFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h index d961f06960..991329736b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h @@ -29,17 +29,20 @@ namespace mindspore::kernel { class SparseSoftmaxCrossEntropyWithLogitsCPUKernel : public LossKernel { public: explicit SparseSoftmaxCrossEntropyWithLogitsCPUKernel(OpParameter *parameter, - const std::vector &inputs, - const std::vector &outputs, + const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LossKernel(parameter, inputs, outputs, ctx, primitive) { param = reinterpret_cast(parameter); } - ~SparseSoftmaxCrossEntropyWithLogitsCPUKernel() override { delete[] losses_; delete[] sum_data_; } + ~SparseSoftmaxCrossEntropyWithLogitsCPUKernel() override { + delete[] losses_; + delete[] sum_data_; + } void ForwardPostExecute(const int *labels, const float *losses, float *output) const; - void GradPostExecute(const int *labels, const float *losses, float* grads, float *output) const; + void GradPostExecute(const int *labels, const float *losses, float *grads, float *output) const; int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc index 34b78f31b8..47f4427c58 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc @@ -28,9 +28,7 @@ using mindspore::schema::PrimitiveType_TupleGetItem; namespace mindspore::kernel { -int TupleGetItemCPUKernel::Init() { - return RET_OK; -} +int TupleGetItemCPUKernel::Init() { return RET_OK; } int TupleGetItemCPUKernel::ReSize() { return 0; } @@ -40,22 +38,21 @@ int TupleGetItemCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto in = reinterpret_cast(in_tensors_.at(0)->Data()); - auto out = reinterpret_cast(out_tensors_.at(0)->Data()); + auto in = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto out = reinterpret_cast(out_tensors_.at(0)->MutableData()); memcpy(out, in, in_tensors_.at(0)->Size()); return RET_OK; } -kernel::LiteKernel *CpuTupleGetItemFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::PrimitiveC *primitive) { +kernel::LiteKernel *CpuTupleGetItemFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, + OpParameter *opParameter, const lite::Context *ctx, + const kernel::KernelKey &desc, const lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_TupleGetItem); - auto *kernel = - new (std::nothrow) TupleGetItemCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) TupleGetItemCPUKernel(opParameter, inputs, outputs, ctx, primitive); MS_ASSERT(kernel != nullptr); auto ret = kernel->Init(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.h index 27100ecaaf..34a85849be 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.h @@ -26,9 +26,9 @@ namespace mindspore::kernel { class TupleGetItemCPUKernel : public LiteKernel { public: - explicit TupleGetItemCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, - const lite::PrimitiveC *primitive) + explicit TupleGetItemCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, + const lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param = parameter; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/activation.cc b/mindspore/lite/src/runtime/kernel/arm/int8/activation.cc index 3041732aa3..d4a2f5e26a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/activation.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/activation.cc @@ -30,10 +30,9 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Activation; namespace mindspore::kernel { -kernel::LiteKernel *CpuActivationInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *parameter, const lite::Context *ctx, - const KernelKey &desc, +kernel::LiteKernel *CpuActivationInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, + const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr) { MS_LOG(ERROR) << "parameter is nullptr"; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc index f7eea6ccd1..c61308d5a2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc @@ -29,9 +29,9 @@ using mindspore::schema::PrimitiveType_Add; namespace mindspore::kernel { int QuantizedAddCPUKernel::Init() { - lite::tensor::Tensor *input0 = in_tensors_.at(0); - lite::tensor::Tensor *input1 = in_tensors_.at(1); - lite::tensor::Tensor *output = out_tensors_.at(0); + lite::Tensor *input0 = in_tensors_.at(0); + lite::Tensor *input1 = in_tensors_.at(1); + lite::Tensor *output = out_tensors_.at(0); MS_ASSERT(input0); MS_ASSERT(input1); MS_ASSERT(output); @@ -81,9 +81,9 @@ int QuantizedAddCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - input0_data_ = static_cast(in_tensors_.at(0)->Data()); - input1_data_ = static_cast(in_tensors_.at(1)->Data()); - output_data_ = static_cast(out_tensors_.at(0)->Data()); + input0_data_ = static_cast(in_tensors_.at(0)->MutableData()); + input1_data_ = static_cast(in_tensors_.at(1)->MutableData()); + output_data_ = static_cast(out_tensors_.at(0)->MutableData()); elements_num_ = in_tensors_.at(0)->ElementsNum(); count_unit_ = thread_count_ > 1 ? UP_DIV(elements_num_, thread_count_) : elements_num_; @@ -99,9 +99,10 @@ int QuantizedAddCPUKernel::Run() { tile_para.in_shape1_[i] = in_tensors_.at(1)->DimensionSize(i); tile_para.out_shape_[i] = out_tensors_.at(0)->DimensionSize(i); } - TileDimensionsUint8(static_cast(in_tensors_.at(0)->Data()), - static_cast(in_tensors_.at(1)->Data()), reinterpret_cast(input0_data_), - reinterpret_cast(input1_data_), &tile_para); + TileDimensionsUint8(static_cast(in_tensors_.at(0)->MutableData()), + static_cast(in_tensors_.at(1)->MutableData()), + reinterpret_cast(input0_data_), reinterpret_cast(input1_data_), + &tile_para); ret = ParallelLaunch(THREAD_POOL_DEFAULT, AddInt8Run, this, thread_count_); ctx_->allocator->Free(input0_data_); ctx_->allocator->Free(input1_data_); @@ -128,8 +129,8 @@ int QuantizedAddCPUKernel::DoExecute(int tId) { return lite::RET_OK; } -kernel::LiteKernel *CpuAddInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, OpParameter *parameter, +kernel::LiteKernel *CpuAddInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h index 77d76fbc18..64b22045c6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class QuantizedAddCPUKernel : public LiteKernel { public: - explicit QuantizedAddCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit QuantizedAddCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx_->thread_num_) {} ~QuantizedAddCPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc index 02ebbee4b9..5e9b629659 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc @@ -56,8 +56,8 @@ int ArgMinMaxInt8CPUKernel::Run() { } auto input = in_tensors_.at(0); - const int8_t *input_data = reinterpret_cast(in_tensors_.at(0)->Data()); - int8_t *output_data = reinterpret_cast(out_tensors_.at(0)->Data()); + const int8_t *input_data = reinterpret_cast(in_tensors_.at(0)->MutableData()); + int8_t *output_data = reinterpret_cast(out_tensors_.at(0)->MutableData()); auto in_shape = input->shape().data(); auto param = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h index 0fa4166d4f..47b12ec30c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class ArgMinMaxInt8CPUKernel : public ArgMinMaxBaseCPUKernel { public: - ArgMinMaxInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ArgMinMaxInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ArgMinMaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc index 02fa869545..dc5e1c57bd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc @@ -94,14 +94,12 @@ int ArithmeticInt8CPUKernel::Init() { return ReSize(); } -int ArithmeticInt8CPUKernel::ReSize() { - return RET_OK; -} +int ArithmeticInt8CPUKernel::ReSize() { return RET_OK; } int ArithmeticInt8CPUKernel::DoArithmetic(int thread_id) { - auto input0_data = reinterpret_cast(in_tensors_[0]->Data()); - auto input1_data1 = reinterpret_cast(in_tensors_[1]->Data()); - auto output_data = reinterpret_cast(out_tensors_[0]->Data()); + auto input0_data = reinterpret_cast(in_tensors_[0]->MutableData()); + auto input1_data1 = reinterpret_cast(in_tensors_[1]->MutableData()); + auto output_data = reinterpret_cast(out_tensors_[0]->MutableData()); auto element_num = out_tensors_[0]->ElementsNum(); auto param = reinterpret_cast(op_parameter_); if (param->broadcasting_ && arithmetic_run_ != nullptr) { @@ -139,8 +137,8 @@ int ArithmeticInt8CPUKernel::Run() { } auto param = reinterpret_cast(op_parameter_); if (param->broadcasting_) { - auto input_data0 = reinterpret_cast(in_tensors_[0]->Data()); - auto input_data1 = reinterpret_cast(in_tensors_[1]->Data()); + auto input_data0 = reinterpret_cast(in_tensors_[0]->MutableData()); + auto input_data1 = reinterpret_cast(in_tensors_[1]->MutableData()); tile_data0_ = reinterpret_cast(context_->allocator->Malloc(out_tensors_[0]->Size())); tile_data1_ = reinterpret_cast(context_->allocator->Malloc(out_tensors_[0]->Size())); if (tile_data0_ == nullptr || tile_data1_ == nullptr) { @@ -162,10 +160,9 @@ int ArithmeticInt8CPUKernel::Run() { return ret; } -kernel::LiteKernel *CpuArithmeticInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuArithmeticInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr) { MS_LOG(ERROR) << "Input parameter is null!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.h index 104b742060..df9aafb6c7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.h @@ -28,8 +28,8 @@ class ArithmeticInt8CPUKernel : public LiteKernel { ArithmeticQuantArg *quant_arg); public: - ArithmeticInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ArithmeticInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~ArithmeticInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc index 43c3a36123..da306907f8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc @@ -102,8 +102,8 @@ int ArithmeticSelfInt8CPUKernel::Run() { } auto input_tensor = in_tensors_.at(0); auto out_tensor = out_tensors_.at(0); - in_ptr_ = reinterpret_cast(input_tensor->Data()); - out_ptr_ = reinterpret_cast(out_tensor->Data()); + in_ptr_ = reinterpret_cast(input_tensor->MutableData()); + out_ptr_ = reinterpret_cast(out_tensor->MutableData()); ret = ParallelLaunch(THREAD_POOL_DEFAULT, ArithmeticSelfInt8Runs, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "ArithmeticSelfRun error error_code[" << ret << "]"; @@ -112,8 +112,8 @@ int ArithmeticSelfInt8CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuArithmeticSelfInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *CpuArithmeticSelfInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h index 79e15be205..db577fe84d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h @@ -42,8 +42,8 @@ class ArithmeticSelfInt8CPUKernel : public LiteKernel { typedef int (*ArithmeticSelfInt8Run)(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); public: - explicit ArithmeticSelfInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + explicit ArithmeticSelfInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { switch (parameter->type_) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc index 574c385782..31c0d14445 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc @@ -54,8 +54,8 @@ int BatchToSpaceInt8CPUKernel::Run() { } auto input = in_tensors_[0]; auto output = out_tensors_[0]; - const int8_t *input_data = reinterpret_cast(input->Data()); - int8_t *output_data = reinterpret_cast(output->Data()); + const int8_t *input_data = reinterpret_cast(input->MutableData()); + int8_t *output_data = reinterpret_cast(output->MutableData()); auto in_shape = input->shape(); auto out_shape = output->shape(); BatchToSpaceParameter *param = reinterpret_cast(this->op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h index 0f37d68903..0c59f8efb5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h @@ -22,8 +22,8 @@ namespace mindspore::kernel { class BatchToSpaceInt8CPUKernel : public BatchToSpaceBaseCPUKernel { public: - BatchToSpaceInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + BatchToSpaceInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : BatchToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.cc index 86ab079f56..a332c193f1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.cc @@ -47,8 +47,8 @@ int BatchnormInt8CPUKernel::InitConstTensor() { auto variance = in_tensors_[2]; auto output = out_tensors_[0]; - auto mean_ptr = reinterpret_cast(mean->Data()); - auto var_ptr = reinterpret_cast(variance->Data()); + auto mean_ptr = reinterpret_cast(mean->MutableData()); + auto var_ptr = reinterpret_cast(variance->MutableData()); alpha_addr_ = reinterpret_cast(malloc(mean->ElementsNum() * sizeof(float))); if (alpha_addr_ == nullptr) { MS_LOG(ERROR) << "Malloc buffer failed."; @@ -88,10 +88,10 @@ int BatchnormInt8CPUKernel::InitFusedConstTensor() { auto variance = in_tensors_[4]; auto output = out_tensors_[0]; - auto scale_ptr = reinterpret_cast(scale->Data()); - auto offset_ptr = reinterpret_cast(offset->Data()); - auto mean_ptr = reinterpret_cast(mean->Data()); - auto var_ptr = reinterpret_cast(variance->Data()); + auto scale_ptr = reinterpret_cast(scale->MutableData()); + auto offset_ptr = reinterpret_cast(offset->MutableData()); + auto mean_ptr = reinterpret_cast(mean->MutableData()); + auto var_ptr = reinterpret_cast(variance->MutableData()); alpha_addr_ = reinterpret_cast(malloc(mean->ElementsNum() * sizeof(float))); if (alpha_addr_ == nullptr) { @@ -190,8 +190,8 @@ int BatchnormInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail! Ret error code: " << prepare_ret; return prepare_ret; } - in_addr_ = reinterpret_cast(in_tensors_.at(0)->Data()); - out_addr_ = reinterpret_cast(out_tensors_.at(0)->Data()); + in_addr_ = reinterpret_cast(in_tensors_.at(0)->MutableData()); + out_addr_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); int ret = ParallelLaunch(THREAD_POOL_DEFAULT, BatchNormInt8Run, this, batchnorm_param_->op_parameter_.thread_num_); if (ret != RET_OK) { @@ -201,10 +201,9 @@ int BatchnormInt8CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuBatchnormInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuBatchnormInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_BatchNorm); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.h index d1b92d62f8..22f933703d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class BatchnormInt8CPUKernel : public LiteKernel { public: - BatchnormInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + BatchnormInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { batchnorm_param_ = reinterpret_cast(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.cc index f7abe0df3a..94d0b38e42 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.cc @@ -51,9 +51,9 @@ int BiasAddInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto in = reinterpret_cast(in_tensors_.at(0)->Data()); - auto bias = reinterpret_cast(in_tensors_.at(1)->Data()); - auto out = reinterpret_cast(out_tensors_.at(0)->Data()); + auto in = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto bias = reinterpret_cast(in_tensors_.at(1)->MutableData()); + auto out = reinterpret_cast(out_tensors_.at(0)->MutableData()); size_t data_size = in_tensors_.at(0)->ElementsNum(); auto tile_in = static_cast(ctx_->allocator->Malloc(data_size)); auto tile_bias = static_cast(ctx_->allocator->Malloc(data_size)); @@ -68,9 +68,9 @@ int BiasAddInt8CPUKernel::Run() { return NNACL_OK; } -kernel::LiteKernel *CpuBiasAddInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, +kernel::LiteKernel *CpuBiasAddInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, + const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { MS_LOG(ERROR) << "parameter or context is nullptr"; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h index 78a6d44d32..93b3455adf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class BiasAddInt8CPUKernel : public LiteKernel { public: - BiasAddInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + BiasAddInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx) {} ~BiasAddInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc index 8aad2d5716..e3fcfd2eb5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc @@ -69,7 +69,10 @@ int ConcatInt8CPUKernel::ReSize() { concat_param_->input_num_ = input_num; concat_param_->input_shapes_ = reinterpret_cast(malloc(sizeof(int *) * input_num)); for (size_t i = 0; i < input_num; i++) { - concat_param_->input_shapes_[i] = reinterpret_cast(in_tensors_.at(i)->shape().data()); + auto in_shape = in_tensors_.at(i)->shape(); + concat_param_->input_shapes_[i] = reinterpret_cast(malloc(in_shape.size() * sizeof(int))); + memcpy(reinterpret_cast(const_cast(concat_param_->input_shapes_[i])), in_shape.data(), + sizeof(int) * in_shape.size()); } before_axis_size = 1; @@ -79,8 +82,12 @@ int ConcatInt8CPUKernel::ReSize() { int64_t after_axis_size = 1; auto output_tensor = out_tensors_.at(kOutputIndex); - size_t output_dim = output_tensor->shape().size(); - concat_param_->output_shapes_ = output_tensor->shape().data(); + auto out_shape = output_tensor->shape(); + size_t output_dim = out_shape.size(); + concat_param_->output_shapes_ = reinterpret_cast(malloc(output_dim * sizeof(int))); + memcpy(reinterpret_cast(const_cast(concat_param_->output_shapes_)), output_tensor->shape().data(), + sizeof(int) * output_dim); + for (size_t i = axis_ + 1; i < output_dim; i++) { after_axis_size *= concat_param_->output_shapes_[i]; } @@ -100,9 +107,9 @@ int ConcatInt8CPUKernel::Run() { concat_param_->count_unit_ = count_unit_; for (int i = 0; i < input_num; i++) { - input_data_[i] = static_cast(in_tensors_.at(i)->Data()); + input_data_[i] = static_cast(in_tensors_.at(i)->MutableData()); } - output_data_ = reinterpret_cast(out_tensors_.at(0)->Data()); + output_data_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); ret = ParallelLaunch(THREAD_POOL_DEFAULT, ConcatInt8Run, this, thread_count_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.h index 0f8780fd2f..8d1fa317e9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class ConcatInt8CPUKernel : public ConcatBaseCPUKernel { public: - ConcatInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ConcatInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConcatBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConcatInt8CPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.cc index c817d04168..ed8cabb85f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.cc @@ -92,10 +92,10 @@ int Convolution1x1Int8CPUKernel::InitWeightBias() { } memset(packed_weight_, 0, size); if (support_optimize_) { - RowMajor2Row8x4MajorInt8(reinterpret_cast(filter_tensor->Data()), packed_weight_, output_channel, + RowMajor2Row8x4MajorInt8(reinterpret_cast(filter_tensor->MutableData()), packed_weight_, output_channel, input_channel); } else { - RowMajor2Row4x16MajorInt8(reinterpret_cast(filter_tensor->Data()), packed_weight_, output_channel, + RowMajor2Row4x16MajorInt8(reinterpret_cast(filter_tensor->MutableData()), packed_weight_, output_channel, input_channel); } @@ -110,11 +110,11 @@ int Convolution1x1Int8CPUKernel::InitWeightBias() { } memset(bias_data_, 0, size); if (in_tensors_.size() == 3) { - memcpy(bias_data_, in_tensors_[kBiasIndex]->Data(), output_channel * sizeof(int32_t)); + memcpy(bias_data_, in_tensors_[kBiasIndex]->MutableData(), output_channel * sizeof(int32_t)); } int32_t *bias_data = reinterpret_cast(bias_data_); - int8_t *weight = reinterpret_cast(filter_tensor->Data()); + int8_t *weight = reinterpret_cast(filter_tensor->MutableData()); int32_t input_zp = conv_param_->conv_quant_arg_.input_quant_args_[0].zp_; for (int oc = 0; oc < output_channel; oc++) { int32_t weight_sum_value = 0; @@ -330,8 +330,8 @@ int Convolution1x1Int8CPUKernel::Run() { return RET_ERROR; } - int8_t *src_in = reinterpret_cast(in_tensors_[0]->Data()); - int8_t *src_out = reinterpret_cast(out_tensors_[0]->Data()); + int8_t *src_in = reinterpret_cast(in_tensors_[0]->MutableData()); + int8_t *src_out = reinterpret_cast(out_tensors_[0]->MutableData()); for (int batch_index = 0; batch_index < conv_param_->input_batch_; batch_index++) { Pre1x1Trans(src_in + batch_index * conv_param_->input_h_ * conv_param_->input_w_ * conv_param_->input_channel_, diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h index 6ffd5aa4a7..634aa29ff3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h @@ -30,8 +30,8 @@ namespace mindspore::kernel { class Convolution1x1Int8CPUKernel : public ConvolutionBaseCPUKernel { public: - Convolution1x1Int8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + Convolution1x1Int8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~Convolution1x1Int8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc index 188ec44d62..16460af883 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc @@ -90,7 +90,7 @@ int Convolution3x3Int8CPUKernel::InitWeightBias() { return RET_ERROR; } memset(transformed_filter_addr_, 0, transformed_size); - auto weight_data = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); + auto weight_data = reinterpret_cast(in_tensors_.at(kWeightIndex)->MutableData()); ProcessFilterUint8(weight_data, transformed_filter_addr_, conv_param_); // init bias @@ -102,7 +102,7 @@ int Convolution3x3Int8CPUKernel::InitWeightBias() { } memset(bias_data_, 0, new_bias_size); if (in_tensors_.size() == kInputSize2) { - auto ori_bias_addr = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + auto ori_bias_addr = reinterpret_cast(in_tensors_.at(kBiasIndex)->MutableData()); memcpy(bias_data_, ori_bias_addr, output_channel * sizeof(int32_t)); } else { MS_ASSERT(in_tensors_.size() == kInputSize1); @@ -150,7 +150,7 @@ int Convolution3x3Int8CPUKernel::InitTmpBuffer() { void Convolution3x3Int8CPUKernel::ConfigInputOutput() { auto output_tensor = out_tensors_.at(kOutputIndex); - output_tensor->SetFormat(schema::Format_NHWC); + output_tensor->SetFormat(schema::Format::Format_NHWC); } int Convolution3x3Int8CPUKernel::Init() { @@ -204,7 +204,7 @@ int Convolution3x3Int8CPUKernel::ReSize() { } int Convolution3x3Int8CPUKernel::RunImpl(int task_id) { - auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); Conv3x3Int8(input_data_, transformed_filter_addr_, reinterpret_cast(bias_data_), output_addr, tile_buffer_, block_unit_buffer_, tmp_dst_buffer_, tmp_out_, task_id, conv_param_); return RET_OK; @@ -232,7 +232,7 @@ int Convolution3x3Int8CPUKernel::Run() { MS_LOG(ERROR) << "Init tmp buffer failed."; return RET_ERROR; } - auto input_addr = reinterpret_cast(in_tensors_.at(kInputIndex)->Data()); + auto input_addr = reinterpret_cast(in_tensors_.at(kInputIndex)->MutableData()); PackInputToC8Int8(input_addr, input_data_, conv_param_); int error_code = ParallelLaunch(THREAD_POOL_DEFAULT, Convolution3x3Int8Impl, this, thread_count_); @@ -243,7 +243,7 @@ int Convolution3x3Int8CPUKernel::Run() { } // get real output auto out_tensor = out_tensors_.front(); - auto out_data = reinterpret_cast(out_tensor->Data()); + auto out_data = reinterpret_cast(out_tensor->MutableData()); PackNC4HW4ToNHWCInt8(tmp_out_, out_data, conv_param_->output_batch_, conv_param_->output_h_ * conv_param_->output_w_, conv_param_->output_channel_); FreeTmpBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.h index aba4428b9c..da90bbd4a0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class Convolution3x3Int8CPUKernel : public ConvolutionBaseCPUKernel { public: - Convolution3x3Int8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + Convolution3x3Int8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~Convolution3x3Int8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc index fdb3cd0ddb..a0871e196f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc @@ -40,7 +40,7 @@ ConvolutionDepthwiseInt8CPUKernel::~ConvolutionDepthwiseInt8CPUKernel() { int ConvolutionDepthwiseInt8CPUKernel::InitWeightBias() { // init weight, int8 -> int16 auto weight_tensor = in_tensors_[kWeightIndex]; - auto origin_weight = reinterpret_cast(weight_tensor->Data()); + auto origin_weight = reinterpret_cast(weight_tensor->MutableData()); int channel = weight_tensor->Batch(); int pack_weight_size = channel * weight_tensor->Height() * weight_tensor->Width(); auto tmp_weight = reinterpret_cast(malloc(pack_weight_size * sizeof(int8_t))); @@ -70,7 +70,7 @@ int ConvolutionDepthwiseInt8CPUKernel::InitWeightBias() { memset(bias_data_, 0, channel * sizeof(int32_t)); if (in_tensors_.size() == kInputSize2) { auto bias_tensor = in_tensors_.at(kBiasIndex); - auto ori_bias = reinterpret_cast(bias_tensor->Data()); + auto ori_bias = reinterpret_cast(bias_tensor->MutableData()); memcpy(bias_data_, ori_bias, bias_tensor->ElementsNum() * sizeof(int32_t)); } @@ -145,10 +145,10 @@ int ConvolutionDepthwiseInt8CPUKernel::Run() { } auto input_tensor = in_tensors_.at(kInputIndex); - input_ptr_ = reinterpret_cast(input_tensor->Data()); + input_ptr_ = reinterpret_cast(input_tensor->MutableData()); auto output_tensor = out_tensors_.at(kOutputIndex); - output_ptr_ = reinterpret_cast(output_tensor->Data()); + output_ptr_ = reinterpret_cast(output_tensor->MutableData()); ret = ParallelLaunch(THREAD_POOL_DEFAULT, ConvDwInt8Run, this, conv_param_->thread_num_); if (ret != RET_OK) { @@ -160,10 +160,9 @@ int ConvolutionDepthwiseInt8CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuConvDwInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuConvDwInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.h index b8661236bc..e3ec53b5b7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class ConvolutionDepthwiseInt8CPUKernel : public ConvolutionBaseCPUKernel { public: - ConvolutionDepthwiseInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ConvolutionDepthwiseInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionDepthwiseInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.cc index 3e4d3274b9..b251eed3c9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.cc @@ -44,7 +44,7 @@ int ConvolutionDepthwiseSWInt8CPUKernel::InitWeightBias() { // init weight, int8 -> int16 // o, h, w, i -> o/8, h, w, i, 8; o == group, i == 1 auto weight_tensor = in_tensors_[kWeightIndex]; - auto origin_weight = reinterpret_cast(weight_tensor->Data()); + auto origin_weight = reinterpret_cast(weight_tensor->MutableData()); int OC4 = UP_DIV(weight_tensor->Batch(), C4NUM); int pack_weight_size = C4NUM * OC4 * weight_tensor->Height() * weight_tensor->Width(); packed_weight_ = reinterpret_cast(malloc(pack_weight_size * sizeof(int16_t))); @@ -63,7 +63,7 @@ int ConvolutionDepthwiseSWInt8CPUKernel::InitWeightBias() { memset(bias_data_, 0, C4NUM * OC4 * sizeof(int32_t)); if (in_tensors_.size() == kInputSize2) { auto bias_tensor = in_tensors_.at(kBiasIndex); - auto ori_bias = reinterpret_cast(bias_tensor->Data()); + auto ori_bias = reinterpret_cast(bias_tensor->MutableData()); memcpy(bias_data_, ori_bias, bias_tensor->ElementsNum() * sizeof(int32_t)); } @@ -156,10 +156,10 @@ int ConvolutionDepthwiseSWInt8CPUKernel::Run() { } auto input_tensor = in_tensors_.at(kInputIndex); - auto input_addr = reinterpret_cast(input_tensor->Data()); + auto input_addr = reinterpret_cast(input_tensor->MutableData()); PackDepthwiseInt8Input(input_addr, packed_input_, conv_param_); - auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); if (!need_align_) { packed_output_ = output_addr; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h index 4c373c2466..634812dc70 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class ConvolutionDepthwiseSWInt8CPUKernel : public ConvolutionBaseCPUKernel { public: - ConvolutionDepthwiseSWInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ConvolutionDepthwiseSWInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionDepthwiseSWInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc index 211bad75dc..99e7588845 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc @@ -77,7 +77,7 @@ int ConvolutionInt8CPUKernel::InitWeightBias() { int32_t input_zp = conv_param_->conv_quant_arg_.input_quant_args_[0].zp_; // init weight - auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); + auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->MutableData()); packed_weight_ = reinterpret_cast(malloc(pack_weight_size)); if (packed_weight_ == nullptr) { MS_LOG(ERROR) << "malloc packed_weight_ failed."; @@ -96,7 +96,7 @@ int ConvolutionInt8CPUKernel::InitWeightBias() { } memset(bias_data_, 0, oc4 * C4NUM * sizeof(int32_t)); if (in_tensors_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->MutableData()); memcpy(bias_data_, ori_bias, output_channel * sizeof(int32_t)); } else { MS_ASSERT(in_tensors_.size() == kInputSize1); @@ -164,7 +164,7 @@ int ConvolutionInt8CPUKernel::InitWeightBiasOpt() { int32_t input_zp = conv_param_->conv_quant_arg_.input_quant_args_[0].zp_; // init weight - auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); + auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->MutableData()); packed_weight_ = reinterpret_cast(malloc(pack_weight_size)); if (packed_weight_ == nullptr) { MS_LOG(ERROR) << "malloc packed_weight_ failed."; @@ -183,7 +183,7 @@ int ConvolutionInt8CPUKernel::InitWeightBiasOpt() { } memset(bias_data_, 0, oc4 * C4NUM * sizeof(int32_t)); if (in_tensors_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->MutableData()); memcpy(bias_data_, ori_bias, output_channel * sizeof(int32_t)); } else { MS_ASSERT(in_tensors_.size() == kInputSize1); @@ -237,7 +237,7 @@ int ConvolutionInt8CPUKernel::InitTmpBufferOpt() { void ConvolutionInt8CPUKernel::ConfigInputOutput() { auto output_tensor = out_tensors_.at(kOutputIndex); - output_tensor->SetFormat(schema::Format_NHWC); + output_tensor->SetFormat(schema::Format::Format_NHWC); auto input_tensor = in_tensors_.at(kInputIndex); auto ret = CheckLayout(input_tensor); if (ret != RET_OK) { @@ -324,7 +324,7 @@ int ConvolutionInt8CPUKernel::ReSize() { } int ConvolutionInt8CPUKernel::RunImpl(int task_id) { - auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); if (support_optimize_) { ConvInt8Opt(reinterpret_cast(nhwc4_input_), packed_input_, packed_weight_, reinterpret_cast(bias_data_), tmp_dst_, tmp_out_, output_addr, input_sum_, task_id, @@ -369,7 +369,7 @@ int ConvolutionInt8CPUKernel::Run() { } auto input_tensor = in_tensors_.at(kInputIndex); - auto ori_input_data = input_tensor->Data(); + auto ori_input_data = input_tensor->MutableData(); convert_func_(ori_input_data, nhwc4_input_, conv_param_->input_batch_, conv_param_->input_h_ * conv_param_->input_w_, conv_param_->input_channel_); @@ -383,10 +383,9 @@ int ConvolutionInt8CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuConvInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuConvInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Conv2D); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.h index 13841fd765..8970943929 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class ConvolutionInt8CPUKernel : public ConvolutionBaseCPUKernel { public: - ConvolutionInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ConvolutionInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionInt8CPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc index afc1c6545d..adc684734e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc @@ -100,8 +100,8 @@ int CropInt8Run(void *cdata, int task_id) { int CropInt8CPUKernel::DoExecute(int task_id) { auto input_tensor = in_tensors_.at(kInputIndex); auto out_tensor = out_tensors_.at(kOutputIndex); - int8_t *input_data = reinterpret_cast(input_tensor->Data()); - int8_t *output_data = reinterpret_cast(out_tensor->Data()); + int8_t *input_data = reinterpret_cast(input_tensor->MutableData()); + int8_t *output_data = reinterpret_cast(out_tensor->MutableData()); Crop(input_data, output_data, task_id, crop_para_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h index 3cbcaba8eb..233d1811d8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class CropInt8CPUKernel : public CropBaseCPUKernel { public: - CropInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + CropInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { crop_para_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc index ba4dca80fc..535f41b025 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc @@ -44,7 +44,7 @@ int DeconvolutionDepthwiseInt8CPUKernel::InitWeightBias() { // init weight: int8 -> int16 // o, h, w, i -> o/8, h, w, i, 8; o == group, i == 1 auto weight_tensor = in_tensors_[kWeightIndex]; - auto origin_weight = reinterpret_cast(weight_tensor->Data()); + auto origin_weight = reinterpret_cast(weight_tensor->MutableData()); int OC4 = UP_DIV(weight_tensor->Batch(), C4NUM); int pack_weight_size = C4NUM * OC4 * weight_tensor->Height() * weight_tensor->Width(); packed_weight_ = reinterpret_cast(malloc(pack_weight_size * sizeof(int16_t))); @@ -63,7 +63,7 @@ int DeconvolutionDepthwiseInt8CPUKernel::InitWeightBias() { memset(bias_data_, 0, C4NUM * OC4 * sizeof(int32_t)); if (in_tensors_.size() == kInputSize2) { auto bias_tensor = in_tensors_.at(kBiasIndex); - auto ori_bias = reinterpret_cast(bias_tensor->Data()); + auto ori_bias = reinterpret_cast(bias_tensor->MutableData()); memcpy(bias_data_, ori_bias, bias_tensor->ElementsNum() * sizeof(int32_t)); } conv_param_->thread_num_ = MSMIN(thread_count_, OC4); @@ -187,10 +187,10 @@ int DeconvolutionDepthwiseInt8CPUKernel::Run() { // pack input, assume input format: NHWC -> NHWC4 auto input_tensor = in_tensors_.at(kInputIndex); - auto input_addr = reinterpret_cast(input_tensor->Data()); + auto input_addr = reinterpret_cast(input_tensor->MutableData()); PackDepthwiseInt8Input(input_addr, packed_input_, conv_param_); - auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); if (!need_align_) { memset(output_addr, 0, out_tensors_.at(kOutputIndex)->ElementsNum() * sizeof(int8_t)); packed_output_ = output_addr; @@ -212,10 +212,9 @@ int DeconvolutionDepthwiseInt8CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuDeconvDwInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuDeconvDwInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeDepthwiseConv2D); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h index b6ad5245ae..1dd84ce824 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class DeconvolutionDepthwiseInt8CPUKernel : public ConvolutionBaseCPUKernel { public: - DeconvolutionDepthwiseInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + DeconvolutionDepthwiseInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~DeconvolutionDepthwiseInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc index 2cdf490445..3752d9c3e7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc @@ -142,7 +142,7 @@ int DeConvInt8CPUKernel::InitBiasWeight() { } memset(bias_data_, 0, size); if (in_tensors_.size() == 3) { - memcpy(bias_data_, in_tensors_[0]->Data(), conv_param_->output_channel_ * sizeof(int32_t)); + memcpy(bias_data_, in_tensors_[0]->MutableData(), conv_param_->output_channel_ * sizeof(int32_t)); } size = UP_ROUND(conv_param_->output_channel_, C4NUM) * UP_ROUND(conv_param_->input_channel_, C16NUM) * @@ -153,9 +153,9 @@ int DeConvInt8CPUKernel::InitBiasWeight() { return RET_ERROR; } memset(weight_ptr_, static_cast(conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_), size); - DeConvWeightTransInt8(reinterpret_cast(in_tensors_[1]->Data()), weight_ptr_, conv_param_->input_channel_, - conv_param_->output_channel_, conv_param_->kernel_h_ * conv_param_->kernel_w_, - support_optimize_); + DeConvWeightTransInt8(reinterpret_cast(in_tensors_[1]->MutableData()), weight_ptr_, + conv_param_->input_channel_, conv_param_->output_channel_, + conv_param_->kernel_h_ * conv_param_->kernel_w_, support_optimize_); size = UP_ROUND(conv_param_->output_channel_, C4NUM) * conv_param_->kernel_h_ * conv_param_->kernel_w_; weight_sum_ = reinterpret_cast(malloc(size * sizeof(int32_t))); @@ -265,8 +265,8 @@ int DeConvInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - int8_t *src_in = reinterpret_cast(in_tensors_[0]->Data()); - int8_t *src_out = reinterpret_cast(out_tensors_[0]->Data()); + int8_t *src_in = reinterpret_cast(in_tensors_[0]->MutableData()); + int8_t *src_out = reinterpret_cast(out_tensors_[0]->MutableData()); int error_code = InitRunBuf(); if (error_code != RET_OK) { @@ -293,10 +293,9 @@ int DeConvInt8CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuDeConvInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuDeConvInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeConv2D); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h index 51a4b97210..4aec0d79cb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h @@ -32,8 +32,8 @@ namespace mindspore::kernel { class DeConvInt8CPUKernel : public ConvolutionBaseCPUKernel { public: - DeConvInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + DeConvInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~DeConvInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.cc index 5e1753b4f9..dd37e370be 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.cc @@ -58,8 +58,8 @@ int DepthToSpaceInt8CPUKernel::Run() { } auto input = in_tensors_[0]; auto output = out_tensors_[0]; - const int8_t *input_data = reinterpret_cast(input->Data()); - int8_t *output_data = reinterpret_cast(output->Data()); + const int8_t *input_data = reinterpret_cast(input->MutableData()); + int8_t *output_data = reinterpret_cast(output->MutableData()); auto in_shape = input->shape(); DepthToSpaceParameter *param = reinterpret_cast(op_parameter_); if (in_quant_arg_.scale_ == out_quant_arg_.scale_ && in_quant_arg_.zp_ == out_quant_arg_.zp_) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h index 2b569ae8eb..f6bd116152 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h @@ -23,8 +23,8 @@ namespace mindspore::kernel { class DepthToSpaceInt8CPUKernel : public DepthToSpaceBaseCPUKernel { public: - DepthToSpaceInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + DepthToSpaceInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : DepthToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc index 0249cccdf4..b36d46a610 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc @@ -29,9 +29,9 @@ using mindspore::schema::PrimitiveType_Div; namespace mindspore::kernel { int DivInt8CPUKernel::Init() { - lite::tensor::Tensor *input0 = in_tensors_.at(0); - lite::tensor::Tensor *input1 = in_tensors_.at(1); - lite::tensor::Tensor *output = out_tensors_.at(0); + lite::Tensor *input0 = in_tensors_.at(0); + lite::Tensor *input1 = in_tensors_.at(1); + lite::Tensor *output = out_tensors_.at(0); MS_ASSERT(input0); MS_ASSERT(input1); MS_ASSERT(output); @@ -58,14 +58,12 @@ int DivInt8CPUKernel::Init() { return ReSize(); } -int DivInt8CPUKernel::ReSize() { - return RET_OK; -} +int DivInt8CPUKernel::ReSize() { return RET_OK; } int DivInt8CPUKernel::DoExecute(int task_id) { - auto input0_data_ = static_cast(in_tensors_.at(0)->Data()); - auto input1_data_ = static_cast(in_tensors_.at(1)->Data()); - auto output_data_ = static_cast(out_tensors_.at(0)->Data()); + auto input0_data_ = static_cast(in_tensors_.at(0)->MutableData()); + auto input1_data_ = static_cast(in_tensors_.at(1)->MutableData()); + auto output_data_ = static_cast(out_tensors_.at(0)->MutableData()); auto element_num = out_tensors_[0]->ElementsNum(); MS_ASSERT(op_parameter_->thread_num_ != 0); @@ -119,9 +117,9 @@ int DivInt8CPUKernel::Run() { context_->allocator->Free(tile1_data_); return RET_ERROR; } - TileDimensionsUint8(static_cast(in_tensors_.at(0)->Data()), - static_cast(in_tensors_.at(1)->Data()), reinterpret_cast(tile0_data_), - reinterpret_cast(tile1_data_), &tile_para); + TileDimensionsUint8(static_cast(in_tensors_.at(0)->MutableData()), + static_cast(in_tensors_.at(1)->MutableData()), + reinterpret_cast(tile0_data_), reinterpret_cast(tile1_data_), &tile_para); } ret = ParallelLaunch(THREAD_POOL_DEFAULT, DivInt8Run, this, op_parameter_->thread_num_); if (broadcast_) { @@ -134,8 +132,8 @@ int DivInt8CPUKernel::Run() { return ret; } -kernel::LiteKernel *CpuDivInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, OpParameter *parameter, +kernel::LiteKernel *CpuDivInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.h index fb2d7a12b3..e505ee6e8e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class DivInt8CPUKernel : public LiteKernel { public: - explicit DivInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit DivInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~DivInt8CPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.cc index e0ebe39441..80a2d5f710 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.cc @@ -56,13 +56,13 @@ int FullconnectionInt8CPUKernel::ReSize() { weight_bias_sums_ = reinterpret_cast(ctx_->allocator->Malloc(c4_ * sizeof(int))); if (!weight_bias_sums_) return RET_MEMORY_FAILED; memset(weight_bias_sums_, 0, c4_ * sizeof(int)); - auto weight_data = reinterpret_cast(in_tensors_[1]->Data()); + auto weight_data = reinterpret_cast(in_tensors_[1]->MutableData()); RowMajor2Row4x16Major(weight_data, fc_param_->col_, fc_param_->deep_, b_c16x4_ptr_, d16_); if (in_tensors_.size() == 3) { auto bias_len = fc_param_->col_8_ * sizeof(int); bias_ptr_ = reinterpret_cast(ctx_->allocator->Malloc(bias_len)); if (!bias_ptr_) return RET_MEMORY_FAILED; - memcpy(bias_ptr_, in_tensors_[2]->Data(), bias_len); + memcpy(bias_ptr_, in_tensors_[2]->MutableData(), bias_len); } else { bias_ptr_ = NULL; } @@ -104,7 +104,7 @@ int FullconnectionInt8CPUKernel::RunImpl(int task_id) { auto &p = fc_param_; auto cur_b = b_c16x4_ptr_ + task_id * thread_stride_ * C4NUM * d16_; auto cur_bias = weight_bias_sums_ + task_id * thread_stride_ * C4NUM; - auto output_ptr = reinterpret_cast(out_tensors_[0]->Data()); + auto output_ptr = reinterpret_cast(out_tensors_[0]->MutableData()); auto cur_c = output_ptr + task_id * thread_stride_ * C4NUM; #ifdef ENABLE_ARM64 MatmulInt8Neon64(a_r4x16_ptr_, cur_b, cur_c, r4_, cur_oc * C4NUM, d16_, input_sums_, cur_bias, q.out_act_min, @@ -134,7 +134,7 @@ int FullconnectionInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input_ptr = reinterpret_cast(in_tensors_[0]->Data()); + auto input_ptr = reinterpret_cast(in_tensors_[0]->MutableData()); RowMajor2Row4x16Major(input_ptr, fc_param_->row_, fc_param_->deep_, a_r4x16_ptr_, d16_); CalcInputSums(input_ptr, fc_param_->row_, fc_param_->deep_, quant_params_.weight.zp_, input_sums_, RowMajor); ParallelLaunch(THREAD_POOL_DEFAULT, FcInt8Run, this, thread_count_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h index 9e2aca294c..b3773f4de0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class FullconnectionInt8CPUKernel : public FullconnectionBaseCPUKernel { public: - FullconnectionInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + FullconnectionInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : FullconnectionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~FullconnectionInt8CPUKernel() override { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.cc index f5539b9195..d2fa55c808 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.cc @@ -78,7 +78,7 @@ int GatherNdInt8CPUKernel::ReSize() { auto in_shape = in_tensors_.front()->shape(); int in_rank = in_shape.size(); int idx_lastshape = indices_shape[indices_rank - 1]; - auto indices_ptr = reinterpret_cast(indices_tensor->Data()); + auto indices_ptr = reinterpret_cast(indices_tensor->MutableData()); area_ = 1; for (int i = idx_lastshape; i < in_rank; ++i) { area_ *= in_shape[i]; @@ -130,8 +130,8 @@ int GatherNdInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - in_ptr_ = reinterpret_cast(in_tensors_.front()->Data()); - out_ptr_ = reinterpret_cast(out_tensors_.front()->Data()); + in_ptr_ = reinterpret_cast(in_tensors_.front()->MutableData()); + out_ptr_ = reinterpret_cast(out_tensors_.front()->MutableData()); auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, GatherNdInt8Run, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "gatherNd error error_code[" << ret << "]"; @@ -140,10 +140,9 @@ int GatherNdInt8CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuGatherNdInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuGatherNdInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_GatherNd); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.h index b4b03886cf..1871295ac1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class GatherNdInt8CPUKernel : public LiteKernel { public: - GatherNdInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + GatherNdInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~GatherNdInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc index 2b897e38c4..34ab40cd02 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc @@ -53,9 +53,9 @@ int GatherInt8CPUKernel::DoGather(int task_id) { auto indices_tensor = in_tensors_.at(1); auto out_tensor = out_tensors_.at(0); - auto input_ptr = reinterpret_cast(input_tensor->Data()); - auto output_ptr = reinterpret_cast(out_tensor->Data()); - auto indices_ptr = reinterpret_cast(out_tensor->Data()); + auto input_ptr = reinterpret_cast(input_tensor->MutableData()); + auto output_ptr = reinterpret_cast(out_tensor->MutableData()); + auto indices_ptr = reinterpret_cast(out_tensor->MutableData()); auto in_shape = input_tensor->shape(); int in_rank = in_shape.size(); @@ -119,10 +119,9 @@ int GatherInt8CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuGatherInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuGatherInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Gather); if (opParameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.h index 9633d6d5a0..3062b31c3a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class GatherInt8CPUKernel : public LiteKernel { public: - GatherInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + GatherInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~GatherInt8CPUKernel() { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.cc index 8ece51bc81..095e508a36 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.cc @@ -30,8 +30,8 @@ using mindspore::schema::ActivationType_HSWISH; namespace mindspore::kernel { int HswishInt8CPUKernel::Init() { - lite::tensor::Tensor *input = in_tensors_.at(0); - lite::tensor::Tensor *output = out_tensors_.at(0); + lite::Tensor *input = in_tensors_.at(0); + lite::Tensor *output = out_tensors_.at(0); MS_ASSERT(input); MS_ASSERT(output); @@ -67,8 +67,8 @@ void HswishInt8CPUKernel::MultiplierInt32ToInt16(int32_t input, int16_t *output) int HswishInt8CPUKernel::ReSize() { return RET_OK; } int HswishInt8CPUKernel::DoActivation(int task_id) { - auto input_addr = reinterpret_cast(in_tensors_.at(0)->Data()); - auto output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto input_addr = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto output_addr = reinterpret_cast(out_tensors_.at(0)->MutableData()); auto length = in_tensors_.at(0)->ElementsNum(); int stride = UP_DIV(length, thread_count_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h index ce11075b21..5319d9ffc5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class HswishInt8CPUKernel : public LiteKernel { public: - HswishInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + HswishInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~HswishInt8CPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc index d38e16c517..c2c80c5d12 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc @@ -104,8 +104,8 @@ int LeakyReluInt8CPUKernel::Run() { int LeakyReluInt8CPUKernel::DoExecute(int task_id) { auto input_tensor = in_tensors_.at(kInputIndex); auto out_tensor = out_tensors_.at(kOutputIndex); - int8_t *input_data = reinterpret_cast(input_tensor->Data()); - int8_t *output_data = reinterpret_cast(out_tensor->Data()); + int8_t *input_data = reinterpret_cast(input_tensor->MutableData()); + int8_t *output_data = reinterpret_cast(out_tensor->MutableData()); auto ret = DoLeakReluInt8(input_data, output_data, &quant_prelu_parm_, task_id); if (ret != NNACL_OK) { MS_LOG(ERROR) << "DoLeakReluInt8 failed"; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.h index ba0282a096..85caa0a8a3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.h @@ -26,12 +26,11 @@ namespace mindspore::kernel { class LeakyReluInt8CPUKernel : public LeakyReluBaseCPUKernel { public: - LeakyReluInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + LeakyReluInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) - : LeakyReluBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { - } - ~LeakyReluInt8CPUKernel() override;; + : LeakyReluBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + ~LeakyReluInt8CPUKernel() override; int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.cc index 9c3bafe389..09059d7624 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.cc @@ -127,9 +127,9 @@ int MatmulInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto a_ptr = reinterpret_cast(in_tensors_[0]->Data()); - auto b_ptr = reinterpret_cast(in_tensors_[1]->Data()); - auto c_ptr = reinterpret_cast(out_tensors_[0]->Data()); + auto a_ptr = reinterpret_cast(in_tensors_[0]->MutableData()); + auto b_ptr = reinterpret_cast(in_tensors_[1]->MutableData()); + auto c_ptr = reinterpret_cast(out_tensors_[0]->MutableData()); auto a_stride = params_->row_ * params_->deep_; auto b_stride = params_->deep_ * params_->col_; auto c_stride = params_->row_ * params_->col_; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h index d728d2aecd..89e16d59f6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h @@ -27,8 +27,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class MatmulInt8CPUKernel : public MatmulBaseCPUKernel { public: - MatmulInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + MatmulInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : MatmulBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~MatmulInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc index d4cad12b42..80303ecd48 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc @@ -30,9 +30,9 @@ using mindspore::schema::PrimitiveType_Mul; namespace mindspore::kernel { int MulInt8CPUKernel::Init() { - lite::tensor::Tensor *input0 = in_tensors_.at(0); - lite::tensor::Tensor *input1 = in_tensors_.at(1); - lite::tensor::Tensor *output = out_tensors_.at(0); + lite::Tensor *input0 = in_tensors_.at(0); + lite::Tensor *input1 = in_tensors_.at(1); + lite::Tensor *output = out_tensors_.at(0); MS_ASSERT(input0); MS_ASSERT(input1); MS_ASSERT(output); @@ -67,9 +67,9 @@ int MulInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - input0_data_ = static_cast(in_tensors_.at(0)->Data()); - input1_data_ = static_cast(in_tensors_.at(1)->Data()); - output_data_ = static_cast(out_tensors_.at(0)->Data()); + input0_data_ = static_cast(in_tensors_.at(0)->MutableData()); + input1_data_ = static_cast(in_tensors_.at(1)->MutableData()); + output_data_ = static_cast(out_tensors_.at(0)->MutableData()); elements_num_ = in_tensors_.at(0)->ElementsNum(); count_unit_ = thread_count_ > 1 ? UP_DIV(elements_num_, thread_count_) : elements_num_; @@ -84,8 +84,8 @@ int MulInt8CPUKernel::Run() { tile_para.in_shape1_[i] = in_tensors_.at(1)->DimensionSize(i); tile_para.out_shape_[i] = out_tensors_.at(0)->DimensionSize(i); } - TileDimensionsInt8(static_cast(in_tensors_.at(0)->Data()), - static_cast(in_tensors_.at(1)->Data()), input0_data_, input1_data_, &tile_para); + TileDimensionsInt8(static_cast(in_tensors_.at(0)->MutableData()), + static_cast(in_tensors_.at(1)->MutableData()), input0_data_, input1_data_, &tile_para); ret = ParallelLaunch(THREAD_POOL_DEFAULT, MulInt8Run, this, thread_count_); ctx_->allocator->Free(input0_data_); ctx_->allocator->Free(input1_data_); @@ -115,9 +115,9 @@ int MulInt8CPUKernel::DoExecute(int task_id) { return lite::RET_OK; } -kernel::LiteKernel *CpuMulInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, const KernelKey &desc, +kernel::LiteKernel *CpuMulInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Mul); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.h index 9f00e2e8e1..71be6231af 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class MulInt8CPUKernel : public LiteKernel { public: - explicit MulInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit MulInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx_->thread_num_) {} ~MulInt8CPUKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc index f836cfa22a..d5cddcd46a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc @@ -128,8 +128,8 @@ int PadInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - in_data_ = reinterpret_cast(in_tensors_[0]->Data()); - out_data_ = reinterpret_cast(out_tensors_[0]->Data()); + in_data_ = reinterpret_cast(in_tensors_[0]->MutableData()); + out_data_ = reinterpret_cast(out_tensors_[0]->MutableData()); memset(out_data_, pad_param_->pad_quant_arg_.constant_value_[0], out_tensors_[0]->ElementsNum() * sizeof(int8_t)); int error_code = ParallelLaunch(THREAD_POOL_DEFAULT, PadInt8Impl, this, context_->thread_num_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h index 91cd01c8a1..622f0bb2dc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class PadInt8CPUKernel : public LiteKernel { public: - explicit PadInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit PadInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { op_parameter_->thread_num_ = ctx->thread_num_; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc index db8ef66042..eb96e36dac 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc @@ -59,8 +59,8 @@ int PoolingInt8CPUKernel::ReSize() { } int PoolingInt8CPUKernel::RunImpl(int task_id) { - auto input_data = reinterpret_cast(in_tensors_.at(kInputIndex)->Data()); - auto output_data = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); + auto input_data = reinterpret_cast(in_tensors_.at(kInputIndex)->MutableData()); + auto output_data = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); if (pooling_param_->pool_mode_ == PoolMode_MaxPool) { if (pooling_param_->quantize_) { MaxPoolingWithQuantInt8(input_data, output_data, pooling_param_, task_id); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.h index 201ebe688b..a42c65fdb8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class PoolingInt8CPUKernel : public PoolingBaseCPUKernel { public: - PoolingInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + PoolingInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : PoolingBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~PoolingInt8CPUKernel() { FreeQuantParam(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.cc index 5abe82c9b9..0640dc78d3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.cc @@ -57,8 +57,8 @@ int PowerInt8CPUKernel::Init() { int PowerInt8CPUKernel::ReSize() { return PowerBaseCPUKernel::ReSize(); } int PowerInt8CPUKernel::DoPower(int task_id) { - const int8_t *input_data = reinterpret_cast(in_tensors_[0]->Data()); - int8_t *output_data = reinterpret_cast(out_tensors_[0]->Data()); + const int8_t *input_data = reinterpret_cast(in_tensors_[0]->MutableData()); + int8_t *output_data = reinterpret_cast(out_tensors_[0]->MutableData()); auto size = in_tensors_[0]->ElementsNum(); int stride = UP_DIV(size, op_parameter_->thread_num_); @@ -70,7 +70,7 @@ int PowerInt8CPUKernel::DoPower(int task_id) { auto exp_quant_args = exp_tensor->GetQuantParams(); param_->quant_arg_.exp_args_.scale_ = exp_quant_args.front().scale; param_->quant_arg_.exp_args_.zp_ = exp_quant_args.front().zeroPoint; - exp_ptr = reinterpret_cast(exp_tensor->Data()); + exp_ptr = reinterpret_cast(exp_tensor->MutableData()); param_->broadcast_ = false; if (in_tensors_[0]->Size() != in_tensors_[1]->Size()) { MS_LOG(ERROR) << "Power input size " << in_tensors_[0]->Size() << " is not equal to exponent size " diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h index c02cbff9d8..4fa5827b48 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h @@ -24,12 +24,11 @@ namespace mindspore::kernel { class PowerInt8CPUKernel : public PowerBaseCPUKernel { public: - PowerInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, - const mindspore::lite::PrimitiveC *primitive) + PowerInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, + const mindspore::lite::PrimitiveC *primitive) : PowerBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} - ~PowerInt8CPUKernel() { - } + ~PowerInt8CPUKernel() {} int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc index 2e498bbcc8..b3d02c874b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc @@ -82,7 +82,8 @@ int ReduceInt8CPUKernel::Init() { last_reducer_ = ReduceSumSquareLastAxis; break; } - default:MS_LOG(ERROR) << "Reduce unsupported reduce mode: " << mode_; + default: + MS_LOG(ERROR) << "Reduce unsupported reduce mode: " << mode_; return RET_ERROR; } if (!InferShapeDone()) { @@ -92,8 +93,8 @@ int ReduceInt8CPUKernel::Init() { } int ReduceInt8CPUKernel::CalculateQuantArgs() { - lite::tensor::Tensor *input = in_tensors_.at(0); - lite::tensor::Tensor *output = out_tensors_.at(0); + lite::Tensor *input = in_tensors_.at(0); + lite::Tensor *output = out_tensors_.at(0); MS_ASSERT(input); MS_ASSERT(output); @@ -115,7 +116,7 @@ int ReduceInt8CPUKernel::CalculateQuantArgs() { for (auto i = 0; i < num_axes_; i++) { auto axis = axes_[i]; double reciprocal = 1.0 / in_tensors_.at(0)->shape()[axis]; - QuantMulArg *qm = new(std::nothrow) QuantMulArg; + QuantMulArg *qm = new (std::nothrow) QuantMulArg; if (qm == nullptr) { MS_LOG(ERROR) << "Reduce new QuantMulArg failed."; return RET_NULL_PTR; @@ -133,7 +134,7 @@ int ReduceInt8CPUKernel::CalculateQuantArgs() { if (mode_ == static_cast(schema::ReduceMode_ReduceProd)) { for (auto i = 0; i < num_axes_; i++) { int axis_size = in_tensors_.at(0)->shape()[axes_[i]]; - QuantMulArg *qm = new(std::nothrow) QuantMulArg; + QuantMulArg *qm = new (std::nothrow) QuantMulArg; if (qm == nullptr) { MS_LOG(ERROR) << "ReduceProd new QuantMulArg failed."; return RET_NULL_PTR; @@ -151,7 +152,7 @@ int ReduceInt8CPUKernel::CalculateQuantArgs() { // scale_in * scale_in/scale_out if (mode_ == static_cast(schema::ReduceMode_ReduceSumSquare)) { for (auto i = 0; i < num_axes_ - 1; i++) { - QuantMulArg *qm = new(std::nothrow) QuantMulArg; + QuantMulArg *qm = new (std::nothrow) QuantMulArg; if (qm == nullptr) { MS_LOG(ERROR) << "ReduceProd new QuantMultiplier failed."; return RET_NULL_PTR; @@ -163,7 +164,7 @@ int ReduceInt8CPUKernel::CalculateQuantArgs() { sum_square_multipliers_.push_back(qm); } - QuantMulArg *qm = new(std::nothrow) QuantMulArg; + QuantMulArg *qm = new (std::nothrow) QuantMulArg; if (qm == nullptr) { MS_LOG(ERROR) << "ReduceProd new QuantMultiplier failed."; return RET_NULL_PTR; @@ -202,7 +203,7 @@ int ReduceInt8CPUKernel::MallocTmpBuffer() { if (begin_src_data_ == nullptr) { return RET_NULL_PTR; } - auto input_data = reinterpret_cast(input->Data()); + auto input_data = reinterpret_cast(input->MutableData()); for (auto i = 0; i < input->ElementsNum(); i++) { begin_src_data_[i] = static_cast(input_data[i]); } @@ -319,7 +320,7 @@ int ReduceInt8CPUKernel::Run() { inner_size_ *= tmp_shape_[i]; } axis_size_ = tmp_shape_[last_reduce_axis]; - last_dst_data_ = reinterpret_cast(out_tensors_.at(0)->Data()); + last_dst_data_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); is_last_axis_ = true; auto error_code = ParallelLaunch(THREAD_POOL_DEFAULT, ReduceInt8Impl, this, context_->thread_num_); if (error_code != RET_OK) { @@ -336,14 +337,7 @@ int ReduceInt8CPUKernel::CallReduceUnit(int task_id) { int ret; if (!is_last_axis_) { ret = - reducer_(outer_size_, - inner_size_, - axis_size_, - src_data_, - dst_data_, - &quant_arg_, - task_id, - context_->thread_num_); + reducer_(outer_size_, inner_size_, axis_size_, src_data_, dst_data_, &quant_arg_, task_id, context_->thread_num_); } else { ret = last_reducer_(outer_size_, inner_size_, axis_size_, src_data_, last_dst_data_, &quant_arg_, task_id, context_->thread_num_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h index e4bbdb3e54..f9aa5b231d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h @@ -35,8 +35,8 @@ class ReduceInt8CPUKernel : public ReduceBaseCPUKernel { int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); public: - ReduceInt8CPUKernel(OpParameter *param, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ReduceInt8CPUKernel(OpParameter *param, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ReduceBaseCPUKernel(param, inputs, outputs, ctx, primitive) {} ~ReduceInt8CPUKernel() { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.cc index 8ec6f39d58..c1361ab478 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.cc @@ -28,8 +28,8 @@ using mindspore::schema::ActivationType_RELU; namespace mindspore::kernel { int ReluXInt8CPUKernel::Init() { - lite::tensor::Tensor *input = in_tensors_.at(0); - lite::tensor::Tensor *output = out_tensors_.at(0); + lite::Tensor *input = in_tensors_.at(0); + lite::Tensor *output = out_tensors_.at(0); MS_ASSERT(input); MS_ASSERT(output); @@ -47,8 +47,8 @@ int ReluXInt8CPUKernel::Init() { int ReluXInt8CPUKernel::ReSize() { return RET_OK; } int ReluXInt8CPUKernel::DoActivation(int task_id) { - auto input_addr = reinterpret_cast(in_tensors_.at(0)->Data()); - auto output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto input_addr = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto output_addr = reinterpret_cast(out_tensors_.at(0)->MutableData()); auto length = in_tensors_.at(0)->ElementsNum(); int stride = UP_DIV(length, op_parameter_->thread_num_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.h index 336d2b1c89..8473579a37 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.h @@ -25,8 +25,8 @@ namespace mindspore::kernel { class ReluXInt8CPUKernel : public LiteKernel { public: - ReluXInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ReluXInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { type_ = (reinterpret_cast(parameter))->type_; @@ -46,8 +46,8 @@ class ReluXInt8CPUKernel : public LiteKernel { class ReluInt8CPUKernel : public ReluXInt8CPUKernel { public: - ReluInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ReluInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ReluXInt8CPUKernel(parameter, inputs, outputs, ctx, primitive) {} @@ -63,8 +63,8 @@ class ReluInt8CPUKernel : public ReluXInt8CPUKernel { class Relu6Int8CPUKernel : public ReluXInt8CPUKernel { public: - Relu6Int8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + Relu6Int8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ReluXInt8CPUKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.cc index a730a61c48..1508c8af39 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.cc @@ -54,8 +54,8 @@ int ReshapeInt8CPUKernel::Run() { } MS_ASSERT(in_tensors_.size() == 1); MS_ASSERT(out_tensors_.size() == 1); - input_data_ = static_cast(in_tensors_.at(kInputIndex)->Data()); - output_data_ = static_cast(out_tensors_.at(kOutputIndex)->Data()); + input_data_ = static_cast(in_tensors_.at(kInputIndex)->MutableData()); + output_data_ = static_cast(out_tensors_.at(kOutputIndex)->MutableData()); elements_num_ = in_tensors_.at(kInputIndex)->ElementsNum(); count_unit_ = op_parameter_->thread_num_ > 1 ? UP_DIV(elements_num_, op_parameter_->thread_num_) : elements_num_; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.h index 61115acdd8..39618802f1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class ReshapeInt8CPUKernel : public ReshapeBaseCPUKernel { public: - ReshapeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + ReshapeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ReshapeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ReshapeInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc index aab798265a..4c768bf702 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc @@ -68,11 +68,11 @@ int ResizeInt8Impl(void *cdata, int task_id) { int ResizeInt8CPUKernel::RunImpl(int task_id) { auto input = in_tensors_.at(0); - auto input_data = reinterpret_cast(input->Data()); + auto input_data = reinterpret_cast(input->data_c()); if (input_data == nullptr) { return RET_NULL_PTR; } - auto output_data = reinterpret_cast(out_tensors_.at(0)->Data()); + auto output_data = reinterpret_cast(out_tensors_.at(0)->data_c()); if (output_data == nullptr) { return RET_NULL_PTR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h index 7386e58a43..ef1717c09c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h @@ -27,8 +27,8 @@ using mindspore::schema::ResizeMethod; namespace mindspore::kernel { class ResizeInt8CPUKernel : public ResizeBaseCPUKernel { public: - ResizeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + ResizeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : ResizeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc index 2add85dd2e..35f8a59913 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc @@ -31,8 +31,8 @@ using mindspore::schema::ActivationType_SIGMOID; namespace mindspore::kernel { int SigmoidInt8CPUKernel::Init() { - lite::tensor::Tensor *input = in_tensors_.at(0); - lite::tensor::Tensor *output = out_tensors_.at(0); + lite::Tensor *input = in_tensors_.at(0); + lite::Tensor *output = out_tensors_.at(0); MS_ASSERT(input); MS_ASSERT(output); @@ -68,8 +68,8 @@ void SigmoidInt8CPUKernel::MultiplierInt32ToInt16(int32_t input, int16_t *output int SigmoidInt8CPUKernel::ReSize() { return RET_OK; } int SigmoidInt8CPUKernel::DoActivation(int task_id) { - auto input_addr = reinterpret_cast(in_tensors_.at(0)->Data()); - auto output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto input_addr = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto output_addr = reinterpret_cast(out_tensors_.at(0)->MutableData()); auto length = in_tensors_.at(0)->ElementsNum(); int stride = UP_DIV(length, op_parameter_->thread_num_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.h index 9a86bac3f3..71f8379b6e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class SigmoidInt8CPUKernel : public LiteKernel { public: - SigmoidInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + SigmoidInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~SigmoidInt8CPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc index f2c3107a98..7e98e0659e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc @@ -57,8 +57,8 @@ int SliceInt8CPUKernel::Init() { int SliceInt8CPUKernel::ReSize() { return SliceBaseCPUKernel::ReSize(); } int SliceInt8CPUKernel::DoSlice(int task_id) { - const int8_t *input_data = reinterpret_cast(in_tensors_[0]->Data()); - int8_t *output_data = reinterpret_cast(out_tensors_[0]->Data()); + const int8_t *input_data = reinterpret_cast(in_tensors_[0]->MutableData()); + int8_t *output_data = reinterpret_cast(out_tensors_[0]->MutableData()); auto ret = SliceInt8(input_data, output_data, param_, task_id); if (ret != RET_OK) { @@ -83,8 +83,8 @@ int SliceInt8CPUKernel::Run() { return ret; } - const int8_t *input_data = reinterpret_cast(in_tensors_[0]->Data()); - int8_t *output_data = reinterpret_cast(out_tensors_[0]->Data()); + const int8_t *input_data = reinterpret_cast(in_tensors_[0]->MutableData()); + int8_t *output_data = reinterpret_cast(out_tensors_[0]->MutableData()); if (param_->size_[1] < param_->op_parameter_.thread_num_) { ret = SliceInt8NoParallel(input_data, output_data, param_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h index c9247421f0..c2557a0578 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h @@ -24,12 +24,11 @@ namespace mindspore::kernel { class SliceInt8CPUKernel : public SliceBaseCPUKernel { public: - SliceInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, - const mindspore::lite::PrimitiveC *primitive) + SliceInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, + const mindspore::lite::PrimitiveC *primitive) : SliceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} - ~SliceInt8CPUKernel() { - } + ~SliceInt8CPUKernel() {} int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc index 1b16a00336..afabbf7f13 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc @@ -62,16 +62,14 @@ int SoftmaxInt8CPUKernel::Init() { return ReSize(); } -int SoftmaxInt8CPUKernel::ReSize() { - return SoftmaxBaseCPUKernel::ReSize(); -} +int SoftmaxInt8CPUKernel::ReSize() { return SoftmaxBaseCPUKernel::ReSize(); } int SoftmaxInt8CPUKernel::DoSoftmax(int task_id) { MS_ASSERT(in_tensors_.size() == 1); MS_ASSERT(out_tensors_.size() == 1); - auto input_ptr = reinterpret_cast(in_tensors_.at(0)->Data()); - auto output_ptr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto input_ptr = reinterpret_cast(in_tensors_.at(0)->MutableData()); + auto output_ptr = reinterpret_cast(out_tensors_.at(0)->MutableData()); int outter_size = 1, inner_size = 1; for (int i = 0; i < softmax_param_->axis_; i++) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h index 97bcc62e39..5397b437b3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class SoftmaxInt8CPUKernel : public SoftmaxBaseCPUKernel { public: - SoftmaxInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + SoftmaxInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : SoftmaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SoftmaxInt8CPUKernel() {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc index 676da57151..afde0b18e0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc @@ -88,10 +88,10 @@ int SplitInt8CPUKernel::Run() { return ret; } auto in_tensor = in_tensors_.at(kInputIndex); - input_ptr_ = reinterpret_cast(in_tensor->Data()); + input_ptr_ = reinterpret_cast(in_tensor->MutableData()); MS_ASSERT(param->num_split_ == outputs_.size()); for (int i = 0; i < param->num_split_; i++) { - output_ptr_.push_back(reinterpret_cast(out_tensors_.at(i)->Data())); + output_ptr_.push_back(reinterpret_cast(out_tensors_.at(i)->MutableData())); } ret = ParallelLaunch(THREAD_POOL_DEFAULT, SplitInt8Run, this, thread_n_num_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h index 48bfcbb527..8369bdd8bd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class SplitInt8CPUKernel : public SplitBaseCPUKernel { public: - SplitInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + SplitInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : SplitBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SplitInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.cc index c1e4196b6e..47d44bd7c9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.cc @@ -132,7 +132,7 @@ int SqueezeInt8CPUKernel::Run() { inputs_array[i] = reinterpret_cast(malloc(sizeof(int8_t) * input_size)); auto input_type = in_tensors_[i]->data_type(); if (input_type == kNumberTypeUInt8) { - uint8_t *input_tmp = reinterpret_cast(in_tensors_[i]->Data()); + uint8_t *input_tmp = reinterpret_cast(in_tensors_[i]->MutableData()); for (int j = 0; j < input_size; j++) { inputs_array[i][j] = (int8_t)(input_tmp[j] - 128); } @@ -141,10 +141,10 @@ int SqueezeInt8CPUKernel::Run() { } quant_Squeeze_parm_->out_quant_args_.zp_ -= 128; } else { - ::memcpy(inputs_array[i], in_tensors_.at(i)->Data(), sizeof(int8_t) * input_size); + ::memcpy(inputs_array[i], in_tensors_.at(i)->MutableData(), sizeof(int8_t) * input_size); } } - int8_t *output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + int8_t *output_addr = reinterpret_cast(out_tensors_.at(0)->MutableData()); auto output_type = out_tensors_[0]->data_type(); if (output_type == kNumberTypeUInt8) { auto output_size = quant_Squeeze_parm_->output_size_; @@ -174,8 +174,8 @@ int SqueezeInt8Run(void *cdata, int task_id) { int SqueezeInt8CPUKernel::DoExecute(int task_id) { auto input_tensor = in_tensors_.at(kInputIndex); auto out_tensor = out_tensors_.at(kOutputIndex); - int8_t *input_data = reinterpret_cast(input_tensor->Data()); - int8_t *output_data = reinterpret_cast(out_tensor->Data()); + int8_t *input_data = reinterpret_cast(input_tensor->MutableData()); + int8_t *output_data = reinterpret_cast(out_tensor->MutableData()); size_t data_size = in_tensors_.front()->Size(); Squeeze(&input_data, output_data, task_id, quant_Squeeze_parm_, para_, data_size); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h index 6d205ce62b..32a69a467b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h @@ -28,8 +28,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class SqueezeInt8CPUKernel : public SqueezeBaseCPUKernel { public: - SqueezeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + SqueezeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : SqueezeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SqueezeInt8CPUKernel() override { delete quant_Squeeze_parm_; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc index 9a4f705072..3c76e2fb1b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc @@ -30,9 +30,9 @@ using mindspore::schema::PrimitiveType_Sub; namespace mindspore::kernel { int SubInt8CPUKernel::Init() { - lite::tensor::Tensor *input0 = in_tensors_.at(0); - lite::tensor::Tensor *input1 = in_tensors_.at(1); - lite::tensor::Tensor *output = out_tensors_.at(0); + lite::Tensor *input0 = in_tensors_.at(0); + lite::Tensor *input1 = in_tensors_.at(1); + lite::Tensor *output = out_tensors_.at(0); MS_ASSERT(input0); MS_ASSERT(input1); MS_ASSERT(output); @@ -80,14 +80,12 @@ int SubInt8CPUKernel::Init() { return ReSize(); } -int SubInt8CPUKernel::ReSize() { - return RET_OK; -} +int SubInt8CPUKernel::ReSize() { return RET_OK; } int SubInt8CPUKernel::DoExecute(int task_id) { - auto input0_data_ = static_cast(in_tensors_.at(0)->Data()); - auto input1_data_ = static_cast(in_tensors_.at(1)->Data()); - auto output_data_ = static_cast(out_tensors_.at(0)->Data()); + auto input0_data_ = static_cast(in_tensors_.at(0)->MutableData()); + auto input1_data_ = static_cast(in_tensors_.at(1)->MutableData()); + auto output_data_ = static_cast(out_tensors_.at(0)->MutableData()); auto element_num = out_tensors_[0]->ElementsNum(); MS_ASSERT(op_parameter_->thread_num_ != 0); @@ -143,9 +141,9 @@ int SubInt8CPUKernel::Run() { context_->allocator->Free(tile1_data_); return RET_ERROR; } - TileDimensionsUint8(static_cast(in_tensors_.at(0)->Data()), - static_cast(in_tensors_.at(1)->Data()), reinterpret_cast(tile0_data_), - reinterpret_cast(tile1_data_), &tile_para); + TileDimensionsUint8(static_cast(in_tensors_.at(0)->MutableData()), + static_cast(in_tensors_.at(1)->MutableData()), + reinterpret_cast(tile0_data_), reinterpret_cast(tile1_data_), &tile_para); } ret = ParallelLaunch(THREAD_POOL_DEFAULT, SubInt8Run, this, op_parameter_->thread_num_); if (broadcast_) { @@ -158,8 +156,8 @@ int SubInt8CPUKernel::Run() { return ret; } -kernel::LiteKernel *CpuSubInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, OpParameter *parameter, +kernel::LiteKernel *CpuSubInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.h index 96cd4732c6..7efa0df9b9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.h @@ -24,8 +24,8 @@ namespace mindspore::kernel { class SubInt8CPUKernel : public LiteKernel { public: - explicit SubInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit SubInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~SubInt8CPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc index 2361bbd9d4..61dd3a5b65 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc @@ -37,7 +37,7 @@ int TopKInt8CPUKernel::ReSize() { free(parameter->topk_node_list_); parameter->topk_node_list_ = nullptr; } - lite::tensor::Tensor *input = in_tensors_.at(0); + lite::Tensor *input = in_tensors_.at(0); parameter->last_dim_size_ = input->shape()[input->shape().size() - 1]; parameter->loop_num_ = 1; for (size_t i = 0; i < input->shape().size() - 1; ++i) { @@ -52,9 +52,9 @@ int TopKInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return ret; } - int8_t *input_data = reinterpret_cast(in_tensors_.at(0)->Data()); - int8_t *output_data = reinterpret_cast(out_tensors_.at(0)->Data()); - int32_t *output_index = reinterpret_cast(out_tensors_.at(1)->Data()); + int8_t *input_data = reinterpret_cast(in_tensors_.at(0)->MutableData()); + int8_t *output_data = reinterpret_cast(out_tensors_.at(0)->MutableData()); + int32_t *output_index = reinterpret_cast(out_tensors_.at(1)->MutableData()); MS_ASSERT(context_->allocator != nullptr); TopkParameter *parameter = reinterpret_cast(op_parameter_); @@ -68,8 +68,8 @@ int TopKInt8CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuTopKInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, OpParameter *parameter, +kernel::LiteKernel *CpuTopKInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h index 91c204c27e..8ba030e04c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h @@ -23,13 +23,13 @@ namespace mindspore::kernel { class TopKInt8CPUKernel : public LiteKernel { public: - explicit TopKInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit TopKInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { - TopkParameter *param = reinterpret_cast(op_parameter_); - param->topk_node_list_ = nullptr; - } + TopkParameter *param = reinterpret_cast(op_parameter_); + param->topk_node_list_ = nullptr; + } ~TopKInt8CPUKernel() override { TopkParameter *parameter = reinterpret_cast(op_parameter_); if (parameter->topk_node_list_ != nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc index 98f3d9067a..77e633f817 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc @@ -58,8 +58,8 @@ int Unsqueezeint8CPUKernel::DoUnsqueeze(int task_id) { return RET_OK; } - auto input_ptr = reinterpret_cast(in_tensors_.front()->Data()); - auto output_ptr = reinterpret_cast(out_tensors_.front()->Data()); + auto input_ptr = reinterpret_cast(in_tensors_.front()->MutableData()); + auto output_ptr = reinterpret_cast(out_tensors_.front()->MutableData()); size_t data_size = out_tensors_.front()->Size(); int ret = Int8Unsqueeze(input_ptr, output_ptr, Unsq_para_, data_size, task_id); @@ -86,8 +86,8 @@ int Unsqueezeint8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - in_ptr_ = reinterpret_cast(in_tensors_.at(0)->Data()); - out_ptr_ = reinterpret_cast(out_tensors_.at(0)->Data()); + in_ptr_ = reinterpret_cast(in_tensors_.at(0)->MutableData()); + out_ptr_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); ret = ParallelLaunch(THREAD_POOL_DEFAULT, UnsqueezeIn8Run, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "UnsqueezeRun error error_code[" << ret << "]"; @@ -96,10 +96,9 @@ int Unsqueezeint8CPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuUnsqueezeInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *CpuUnsqueezeInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Unsqueeze); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h index 0e3a580cd7..9631f54a60 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h @@ -27,8 +27,8 @@ using mindspore::lite::Context; namespace mindspore::kernel { class Unsqueezeint8CPUKernel : public LiteKernel { public: - Unsqueezeint8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, + Unsqueezeint8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { Unsq_para_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/opencl/image_format.h b/mindspore/lite/src/runtime/kernel/opencl/image_format.h index a2533dfb05..018292ad81 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/image_format.h +++ b/mindspore/lite/src/runtime/kernel/opencl/image_format.h @@ -60,4 +60,3 @@ cl_channel_type ToImageChannelType(TypeId data_type) { } // namespace mindspore #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_IMAGE_FORMAT_H_ - diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc index c4dfae3c46..70fbc34120 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc @@ -91,8 +91,8 @@ int ActivationOpenClKernel::Run() { cl_int4 img2d_shape = GetImg2dShape(); auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); int arg_idx = 0; - ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->MutableData()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_idx++, img2d_shape); if (type_ == ActivationType_LEAKY_RELU) { ocl_runtime->SetKernelArg(kernel_, arg_idx++, alpha_); @@ -135,8 +135,8 @@ int ActivationOpenClKernel::GetImageSize(size_t idx, std::vector *img_si return RET_OK; } -kernel::LiteKernel *OpenClActivationFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *OpenClActivationFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.h index 72ed7a0f8e..3c89a08b36 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.h @@ -27,8 +27,8 @@ namespace mindspore::kernel { class ActivationOpenClKernel : public OpenCLKernel { public: - explicit ActivationOpenClKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit ActivationOpenClKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) { type_ = (reinterpret_cast(parameter))->type_; alpha_ = (reinterpret_cast(parameter))->alpha_; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc index 4952261d1e..b0298b3039 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc @@ -48,11 +48,11 @@ std::vector ArithmeticOpenCLKernel::InitGlobalSize() const { void ArithmeticOpenCLKernel::Image2dGetWorkGroupSize() { local_size_ = {16, 16}; - if (out_tensors_[0]->GetFormat() == schema::Format_NHWC4) { + if (out_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) { size_t H = out_tensors_[0]->Batch() * out_tensors_[0]->Height(); size_t W = out_tensors_[0]->Width() * UP_DIV(out_tensors_[0]->Channel(), C4NUM); global_size_ = {W, H}; - } else if (out_tensors_[0]->GetFormat() == schema::Format_NC4) { + } else if (out_tensors_[0]->GetFormat() == schema::Format::Format_NC4) { size_t H = out_tensors_[0]->Batch(); size_t W = UP_DIV(out_tensors_[0]->Channel(), C4NUM); global_size_ = {W, H}; @@ -68,10 +68,10 @@ void ArithmeticOpenCLKernel::BufferGetWorkGroupSize() { int ArithmeticOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { size_t im_dst_x, im_dst_y; - if (out_tensors_[0]->GetFormat() == schema::Format_NHWC4) { + if (out_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) { im_dst_x = out_tensors_[0]->Width() * UP_DIV(out_tensors_[0]->Channel(), C4NUM); im_dst_y = out_tensors_[0]->Batch() * out_tensors_[0]->Height(); - } else if (out_tensors_[0]->GetFormat() == schema::Format_NC4) { + } else if (out_tensors_[0]->GetFormat() == schema::Format::Format_NC4) { im_dst_y = out_tensors_[0]->Batch(); im_dst_x = UP_DIV(out_tensors_[0]->Channel(), C4NUM); } else { @@ -92,11 +92,12 @@ int ArithmeticOpenCLKernel::GetImageSize(size_t idx, std::vector *img_si int ArithmeticOpenCLKernel::InitBuffer() { const ArithmeticParameter *arithmetic_parameter = reinterpret_cast(op_parameter_); if (!arithmetic_parameter->broadcasting_) { - if (in_tensors_[1]->TensorType() == schema::NodeType_ValueNode && in_tensors_[1]->Data() != nullptr) { + if (in_tensors_[1]->category() == lite::Tensor::Category::CONST && in_tensors_[1]->MutableData() != nullptr) { auto allocatdor = runtime_->GetAllocator(); std::vector img_size; GetImageSize(0, &img_size); - weight_ptr_ = allocatdor->CreateImageFromHost(in_tensors_[1]->Data(), in_tensors_[1]->ElementsNum(), img_size); + weight_ptr_ = + allocatdor->CreateImageFromHost(in_tensors_[1]->MutableData(), in_tensors_[1]->ElementsNum(), img_size); return RET_OK; } } @@ -107,8 +108,8 @@ int ArithmeticOpenCLKernel::Init() { std::string kernel_name; const ArithmeticParameter *arithmetic_parameter = reinterpret_cast(op_parameter_); - if (arithmetic_parameter->broadcasting_ && in_tensors_[1]->TensorType() == schema::NodeType_ValueNode && - in_tensors_[1]->Data() != nullptr) { + if (arithmetic_parameter->broadcasting_ && in_tensors_[1]->category() == lite::Tensor::Category::CONST && + in_tensors_[1]->MutableData() != nullptr) { element_flag_ = false; kernel_name = "BoardcastArith"; } else { @@ -151,14 +152,14 @@ int ArithmeticOpenCLKernel::Init() { return error_code; } - auto format = schema::Format_NHWC4; + auto format = schema::Format::Format_NHWC4; if (arithmetic_parameter->ndim_ == 2) { - format = schema::Format_NC4; + format = schema::Format::Format_NC4; } in_ori_format_ = in_tensors_[0]->GetFormat(); out_ori_format_ = out_tensors_[0]->GetFormat(); in_tensors_[0]->SetFormat(format); - if (element_flag_ && in_tensors_[1]->TensorType() != schema::NodeType_ValueNode) { + if (element_flag_ && in_tensors_[1]->category() != lite::Tensor::Category::CONST) { in_tensors_[1]->SetFormat(format); } out_tensors_[0]->SetFormat(format); @@ -171,12 +172,12 @@ int ArithmeticOpenCLKernel::Run() { MS_LOG(DEBUG) << this->name() << " Running!"; int arg_idx = 0; - runtime_->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->Data()); + runtime_->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->MutableData()); if (element_flag_) { - void *weight = weight_ptr_ == nullptr ? in_tensors_[1]->Data() : weight_ptr_; + void *weight = weight_ptr_ == nullptr ? in_tensors_[1]->MutableData() : weight_ptr_; runtime_->SetKernelArg(kernel_, arg_idx++, weight); } else { - float value = static_cast(in_tensors_[1]->Data())[0]; + float value = static_cast(in_tensors_[1]->MutableData())[0]; switch (op_parameter_->type_) { case PrimitiveType_Mul: weight_ = value; @@ -197,14 +198,14 @@ int ArithmeticOpenCLKernel::Run() { runtime_->SetKernelArg(kernel_, arg_idx++, weight_); runtime_->SetKernelArg(kernel_, arg_idx++, bias_); } - runtime_->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data()); + runtime_->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->MutableData()); int H = 0; int W = 0; - if (out_tensors_[0]->GetFormat() == schema::Format_NHWC4) { + if (out_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) { H = out_tensors_[0]->Batch() * out_tensors_[0]->Height(); W = out_tensors_[0]->Width() * UP_DIV(out_tensors_[0]->Channel(), C4NUM); - } else if (out_tensors_[0]->GetFormat() == schema::Format_NC4) { + } else if (out_tensors_[0]->GetFormat() == schema::Format::Format_NC4) { H = out_tensors_[0]->Batch(); W = UP_DIV(out_tensors_[0]->Channel(), C4NUM); } else { @@ -217,15 +218,14 @@ int ArithmeticOpenCLKernel::Run() { return RET_OK; } -kernel::LiteKernel *OpenCLBiasAddKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::PrimitiveC *primitive); +kernel::LiteKernel *OpenCLBiasAddKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, + const lite::PrimitiveC *primitive); -kernel::LiteKernel *OpenCLArithmeticKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *OpenCLArithmeticKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { const ArithmeticParameter *arithmetic_parameter = reinterpret_cast(opParameter); if (arithmetic_parameter->broadcasting_) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.h index a09c7bf0fd..0555212e32 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.h @@ -26,14 +26,14 @@ namespace mindspore::kernel { class ArithmeticOpenCLKernel : public OpenCLKernel { public: - explicit ArithmeticOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx) + explicit ArithmeticOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx) : OpenCLKernel(parameter, inputs, outputs) {} ~ArithmeticOpenCLKernel() override; int Init() override; int Run() override; - int GetImageSize(size_t idx, std::vector* img_size) override; + int GetImageSize(size_t idx, std::vector *img_size) override; private: std::vector InitGlobalSize() const; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc index abd70a3f5a..bb52f82925 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc @@ -195,8 +195,8 @@ int ArithmeticSelfOpenCLKernel::Run() { ArithmeticSelfGetWorkGroup(global, &local, max_global[0]); int arg_cn = 0; - ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->MutableData()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_cn++, output_shape_); ocl_runtime->RunKernel(kernel_, global, local, nullptr); @@ -204,8 +204,8 @@ int ArithmeticSelfOpenCLKernel::Run() { return RET_OK; } -kernel::LiteKernel *OpenCLArithmeticSelfKernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *OpenCLArithmeticSelfKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.h index 7c49e76587..ac741018a1 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.h @@ -28,8 +28,8 @@ namespace mindspore::kernel { class ArithmeticSelfOpenCLKernel : public OpenCLKernel { public: - explicit ArithmeticSelfOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit ArithmeticSelfOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) {} ~ArithmeticSelfOpenCLKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc index bcbae3ca2d..f3e59391e5 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc @@ -31,7 +31,7 @@ namespace mindspore::kernel { int BatchNormOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); size_t im_dst_x, im_dst_y; - if (in_tensors_[0]->GetFormat() == schema::Format_NHWC4) { + if (in_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) { im_dst_x = out_tensors_[0]->Width() * CO4; im_dst_y = out_tensors_[0]->Height(); } else { @@ -54,7 +54,7 @@ int BatchNormOpenCLKernel::Init() { if (in_format != schema::Format_NHWC4 && in_format != schema::Format_NC4HW4) { MS_LOG(ERROR) << "input format(" << in_format << ") " << "format not support!"; - return RET_ERROR; + return RET_ERROR; } in_ori_format_ = in_tensors_[0]->GetFormat(); in_tensors_[0]->SetFormat(op_format_); @@ -128,12 +128,12 @@ int BatchNormOpenCLKernel::Run() { std::vector global = {OH, OW, OC}; BatchNormGetWorkGroup(global, &local, max_global[0]); int arg_cn = 0; - ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->Data()); // input tensor - ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[1]->Data()); // scale - ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[2]->Data()); // offest - ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[3]->Data()); // mean - ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[4]->Data()); // variance - ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->Data()); // out tensor + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->MutableData()); // input tensor + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[1]->MutableData()); // scale + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[2]->MutableData()); // offest + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[3]->MutableData()); // mean + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[4]->MutableData()); // variance + ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->MutableData()); // out tensor ocl_runtime->SetKernelArg(kernel_, arg_cn++, input_shape_); ocl_runtime->SetKernelArg(kernel_, arg_cn++, param->epsilon_); ocl_runtime->RunKernel(kernel_, global, local, nullptr); @@ -141,10 +141,9 @@ int BatchNormOpenCLKernel::Run() { return RET_OK; } -kernel::LiteKernel *OpenCLBatchnormKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *OpenCLBatchnormKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) BatchNormOpenCLKernel(opParameter, inputs, outputs); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.h index 16027ab595..f76c8a2055 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.h @@ -27,8 +27,8 @@ namespace mindspore::kernel { class BatchNormOpenCLKernel : public OpenCLKernel { public: - explicit BatchNormOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit BatchNormOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) {} ~BatchNormOpenCLKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/biasadd.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/biasadd.cc index 8c7e8d766c..19433da577 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/biasadd.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/biasadd.cc @@ -47,7 +47,7 @@ void BiasAddOpenCLKernel::InitBuffer() { BiasAdd_ = allocator->Malloc(div_ci * C4NUM * fp_size, img_size); BiasAdd_ = allocator->MapBuffer(BiasAdd_, CL_MAP_WRITE, nullptr, true); memset(BiasAdd_, 0x00, div_ci * C4NUM * fp_size); - memcpy(BiasAdd_, in_tensors_[1]->Data(), C * fp_size); + memcpy(BiasAdd_, in_tensors_[1]->MutableData(), C * fp_size); allocator->UnmapBuffer(BiasAdd_); } @@ -92,9 +92,9 @@ int BiasAddOpenCLKernel::Run() { auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); int arg_idx = 0; std::map data_type{ - {schema::Format_NC4, 1}, {schema::Format_NHWC4, 2}, {schema::Format_NC4HW4, 3}}; - ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data()); + {schema::Format::Format_NC4, 1}, {schema::Format::Format_NHWC4, 2}, {schema::Format::Format_NC4HW4, 3}}; + ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->MutableData()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_idx++, input_shape_); ocl_runtime->SetKernelArg(kernel_, arg_idx++, BiasAdd_); ocl_runtime->SetKernelArg(kernel_, arg_idx++, data_type[op_format_]); @@ -110,14 +110,14 @@ int BiasAddOpenCLKernel::Run() { cl_int4 BiasAddOpenCLKernel::GetGlobalshape() { cl_int4 global_shape = input_shape_; - if (op_format_ == schema::Format_NC4) { + if (op_format_ == schema::Format::Format_NC4) { global_shape.s[1] = global_shape.s[2]; global_shape.s[2] = UP_DIV(global_shape.s[3], C4NUM); } - if (op_format_ == schema::Format_NC4HW4) { + if (op_format_ == schema::Format::Format_NC4HW4) { global_shape.s[1] = UP_DIV(global_shape.s[3], C4NUM) * global_shape.s[1]; // c / 4 * H } - if (op_format_ == schema::Format_NHWC4) { + if (op_format_ == schema::Format::Format_NHWC4) { global_shape.s[2] = UP_DIV(global_shape.s[3], C4NUM) * global_shape.s[2]; } return global_shape; @@ -136,10 +136,10 @@ int BiasAddOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) return RET_OK; } -kernel::LiteKernel *OpenCLBiasAddKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::PrimitiveC *primitive) { +kernel::LiteKernel *OpenCLBiasAddKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, + const lite::PrimitiveC *primitive) { if (inputs.size() == 0) { MS_LOG(ERROR) << "Input data size must be greater than 0, but your size is " << inputs.size(); return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/biasadd.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/biasadd.h index 9d95089a11..e7619a8296 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/biasadd.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/biasadd.h @@ -20,7 +20,7 @@ #include #include -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "src/runtime/kernel/opencl/opencl_kernel.h" #include "schema/model_generated.h" #include "src/runtime/opencl/opencl_runtime.h" @@ -29,8 +29,8 @@ namespace mindspore::kernel { class BiasAddOpenCLKernel : public OpenCLKernel { public: - explicit BiasAddOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit BiasAddOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) {} ~BiasAddOpenCLKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc index c9b5f62cf5..e123007f4e 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc @@ -31,7 +31,7 @@ namespace mindspore::kernel { int ConcatOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); size_t im_dst_x, im_dst_y; - if (in_tensors_[0]->GetFormat() == schema::Format_NHWC4) { + if (in_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) { im_dst_x = out_tensors_[0]->Width() * CO4; im_dst_y = out_tensors_[0]->Height() * out_tensors_[0]->Batch(); } else { @@ -65,7 +65,7 @@ int ConcatOpenCLKernel::Init() { if (in_format != schema::Format_NHWC4 && in_format != schema::Format_NC4HW4) { MS_LOG(ERROR) << "input format(" << in_format << ") " << "format not support!"; - return RET_ERROR; + return RET_ERROR; } in_ori_format_ = in_tensors_[0]->GetFormat(); in_tensors_[0]->SetFormat(op_format_); @@ -154,9 +154,9 @@ int ConcatOpenCLKernel::Run() { int arg_cn = 0; if (in_tensors_.size() == 2) { - ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[1]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->MutableData()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[1]->MutableData()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_cn++, input_shape1_); ocl_runtime->SetKernelArg(kernel_, arg_cn++, input_shape2_); ocl_runtime->SetKernelArg(kernel_, arg_cn++, output_shape_); @@ -165,10 +165,10 @@ int ConcatOpenCLKernel::Run() { auto input3_shape = in_tensors_[2]->shape(); cl_int4 input_shape3_ = {input3_shape[0], input3_shape[1], input3_shape[2], UP_DIV(input3_shape[3], C4NUM)}; - ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[1]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[2]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->MutableData()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[1]->MutableData()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[2]->MutableData()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_cn++, input_shape1_); ocl_runtime->SetKernelArg(kernel_, arg_cn++, input_shape2_); ocl_runtime->SetKernelArg(kernel_, arg_cn++, input_shape3_); @@ -186,10 +186,9 @@ int ConcatOpenCLKernel::Run() { return RET_OK; } -kernel::LiteKernel *OpenCLConcatKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *OpenCLConcatKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) ConcatOpenCLKernel(opParameter, inputs, outputs); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.h index dafd374753..8832330d49 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.h @@ -27,8 +27,8 @@ namespace mindspore::kernel { class ConcatOpenCLKernel : public OpenCLKernel { public: - explicit ConcatOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit ConcatOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) {} ~ConcatOpenCLKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc index e9385cef48..26b02d7e24 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc @@ -79,7 +79,7 @@ void Conv2dTransposeOpenCLKernel::PadWeight() { padWeight_ = allocator->Malloc(div_ci * div_co * C4NUM * C4NUM * kh * kw * data_size); padWeight_ = allocator->MapBuffer(padWeight_, CL_MAP_WRITE, nullptr, true); memset(padWeight_, 0x00, div_ci * div_co * C4NUM * C4NUM * kh * kw * data_size); - auto origin_weight = in_tensors_.at(kWeightIndex)->Data(); + auto origin_weight = in_tensors_.at(kWeightIndex)->MutableData(); auto weight_dtype = in_tensors_.at(kWeightIndex)->data_type(); int index = 0; for (int co_i = 0; co_i < div_co; co_i++) { @@ -129,12 +129,12 @@ void Conv2dTransposeOpenCLKernel::PadWeight() { auto bias_dtype = in_tensors_[2]->data_type(); if (in_tensors_.size() >= 3) { if (bias_dtype == kNumberTypeFloat32 && enable_fp16_) { - auto fdata = reinterpret_cast(in_tensors_[2]->Data()); + auto fdata = reinterpret_cast(in_tensors_[2]->MutableData()); for (int i = 0; i < co; i++) { reinterpret_cast(bias_)[i] = Float32ToShort(fdata[i]); } } else { - memcpy(bias_, in_tensors_[2]->Data(), co * data_size); + memcpy(bias_, in_tensors_[2]->MutableData(), co * data_size); } } allocator->UnmapBuffer(bias_); @@ -146,10 +146,10 @@ int Conv2dTransposeOpenCLKernel::GetImageSize(size_t idx, std::vector *i int h = out_tensors_[0]->shape()[1]; int w = out_tensors_[0]->shape()[2]; int c = out_tensors_[0]->shape()[3]; - if (op_format_ == schema::Format_NHWC4) { + if (op_format_ == schema::Format::Format_NHWC4) { im_dst_x = w * UP_DIV(c, C4NUM); im_dst_y = n * h; - } else if (op_format_ == schema::Format_NC4HW4) { + } else if (op_format_ == schema::Format::Format_NC4HW4) { im_dst_x = w; im_dst_y = n * UP_DIV(c, C4NUM) * h; } else { @@ -191,10 +191,10 @@ int Conv2dTransposeOpenCLKernel::Run() { cl_int4 src_size = {h, w, UP_DIV(ci, C4NUM), 1}; cl_int4 dst_size = {oh, ow, UP_DIV(co, C4NUM), 1}; int arg_cnt = 0; - ocl_runtime->SetKernelArg(kernel_, arg_cnt++, in_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cnt++, in_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_cnt++, padWeight_, lite::opencl::MemType::BUF); ocl_runtime->SetKernelArg(kernel_, arg_cnt++, bias_); - ocl_runtime->SetKernelArg(kernel_, arg_cnt++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cnt++, out_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_cnt++, kernel_size); ocl_runtime->SetKernelArg(kernel_, arg_cnt++, stride); ocl_runtime->SetKernelArg(kernel_, arg_cnt++, padding); @@ -204,8 +204,8 @@ int Conv2dTransposeOpenCLKernel::Run() { return RET_OK; } -kernel::LiteKernel *OpenCLConv2dTransposeKernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *OpenCLConv2dTransposeKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.h index 8f17966900..0ad8cbe72d 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.h @@ -28,8 +28,8 @@ namespace mindspore::kernel { class Conv2dTransposeOpenCLKernel : public OpenCLKernel { public: - explicit Conv2dTransposeOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit Conv2dTransposeOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) {} ~Conv2dTransposeOpenCLKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.cc index 61db5700fe..a7f0ea66cd 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.cc @@ -38,7 +38,7 @@ int ConvolutionOpenCLKernel::Init() { init_count++; use_fp16_ = ocl_runtime->GetFp16Enable(); - if (op_format_ != schema::Format_NHWC4 && op_format_ != schema::Format_NC4HW4) { + if (op_format_ != schema::Format::Format_NHWC4 && op_format_ != schema::Format::Format_NC4HW4) { MS_LOG(ERROR) << "op_format_ " << op_format_ << " not support!"; } in_ori_format_ = in_tensors_[0]->GetFormat(); @@ -78,7 +78,8 @@ int ConvolutionOpenCLKernel::Init() { ocl_runtime->BuildKernel(kernel_36to4x4, program_name, "Winograd36To4x4", build_options); } else { std::string program_name = "convolution" + std::to_string(init_count); - std::string source = op_format_ == schema::Format_NHWC4 ? CodeGenConvolutionNHWC4() : CodeGenConvolutionNC4HW4(); + std::string source = + op_format_ == schema::Format::Format_NHWC4 ? CodeGenConvolutionNHWC4() : CodeGenConvolutionNC4HW4(); ocl_runtime->LoadSource(program_name, source); ocl_runtime->BuildKernel(kernel_conv, program_name, "Convolution", build_options); } @@ -127,8 +128,8 @@ int ConvolutionOpenCLKernel::InitBuffer() { allocator->MapBuffer(packed_weight_, CL_MAP_WRITE, nullptr, true); memset(packed_weight_, 0x00, packed_weight_size); auto weight_tensor = in_tensors_[1]; - auto origin_weight_fp32 = reinterpret_cast(weight_tensor->Data()); - auto origin_weight_fp16 = reinterpret_cast(weight_tensor->Data()); + auto origin_weight_fp32 = reinterpret_cast(weight_tensor->MutableData()); + auto origin_weight_fp16 = reinterpret_cast(weight_tensor->MutableData()); if (use_winograd_) { // weight: OHWI -> O66I -> O/8 6 6 I/4 O2 I4 O4 @@ -221,7 +222,7 @@ int ConvolutionOpenCLKernel::InitBuffer() { packed_bias_ = allocator->Malloc(packed_bias_size); allocator->MapBuffer(packed_bias_, CL_MAP_WRITE, nullptr, true); memset(packed_bias_, 0x00, packed_bias_size); - memcpy(packed_bias_, bias_tensor->Data(), CO * sizeof_FLT); + memcpy(packed_bias_, bias_tensor->MutableData(), CO * sizeof_FLT); allocator->UnmapBuffer(packed_bias_); return RET_OK; @@ -229,7 +230,7 @@ int ConvolutionOpenCLKernel::InitBuffer() { int ConvolutionOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { size_t im_dst_x, im_dst_y; - if (in_tensors_[0]->GetFormat() == schema::Format_NHWC4) { + if (in_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) { if (out_tensors_[0]->Width() * CO_SLICES < 65536) { { im_dst_x = out_tensors_[0]->Width() * CO_SLICES; @@ -260,7 +261,7 @@ int ConvolutionOpenCLKernel::Run() { arg_cn = 0; cl_int4 _4x4to36_in_shape = {1, IH, IW, CI_SLICES}; cl_int4 _4x4to36_out_shape = {1, 36, TILES_XY, CI_SLICES}; - ocl_runtime->SetKernelArg(kernel_4x4to36, arg_cn++, in_tensors_[0]->Data(), lite::opencl::MemType::IMG); + ocl_runtime->SetKernelArg(kernel_4x4to36, arg_cn++, in_tensors_[0]->MutableData(), lite::opencl::MemType::IMG); ocl_runtime->SetKernelArg(kernel_4x4to36, arg_cn++, winograd_mem0_, lite::opencl::MemType::IMG); ocl_runtime->SetKernelArg(kernel_4x4to36, arg_cn++, _4x4to36_in_shape); ocl_runtime->SetKernelArg(kernel_4x4to36, arg_cn++, _4x4to36_out_shape); @@ -278,17 +279,17 @@ int ConvolutionOpenCLKernel::Run() { cl_int4 _36to4x4_in_shape = {1, 16, TILES_XY, CO_SLICES}; cl_int4 _36to4x4_out_shape = {1, OH, OW, CO_SLICES}; ocl_runtime->SetKernelArg(kernel_36to4x4, arg_cn++, winograd_mem1_, lite::opencl::MemType::IMG); - ocl_runtime->SetKernelArg(kernel_36to4x4, arg_cn++, out_tensors_[0]->Data(), lite::opencl::MemType::IMG); + ocl_runtime->SetKernelArg(kernel_36to4x4, arg_cn++, out_tensors_[0]->MutableData(), lite::opencl::MemType::IMG); ocl_runtime->SetKernelArg(kernel_36to4x4, arg_cn++, packed_bias_, lite::opencl::MemType::BUF); ocl_runtime->SetKernelArg(kernel_36to4x4, arg_cn++, _36to4x4_in_shape); ocl_runtime->SetKernelArg(kernel_36to4x4, arg_cn++, _36to4x4_out_shape); } else { arg_cn = 0; - ocl_runtime->SetKernelArg(kernel_conv, arg_cn++, in_tensors_[0]->Data(), lite::opencl::MemType::IMG); - ocl_runtime->SetKernelArg(kernel_conv, arg_cn++, out_tensors_[0]->Data(), lite::opencl::MemType::IMG); + ocl_runtime->SetKernelArg(kernel_conv, arg_cn++, in_tensors_[0]->MutableData(), lite::opencl::MemType::IMG); + ocl_runtime->SetKernelArg(kernel_conv, arg_cn++, out_tensors_[0]->MutableData(), lite::opencl::MemType::IMG); ocl_runtime->SetKernelArg(kernel_conv, arg_cn++, packed_weight_, lite::opencl::MemType::BUF); ocl_runtime->SetKernelArg(kernel_conv, arg_cn++, packed_bias_, lite::opencl::MemType::BUF); - if (op_format_ == schema::Format_NC4HW4) { + if (op_format_ == schema::Format::Format_NC4HW4) { cl_int4 input_shape = {1, IH, IW, CI_SLICES}; cl_int4 output_shape = {1, OH, OW, CO_SLICES}; ocl_runtime->SetKernelArg(kernel_conv, arg_cn++, input_shape); @@ -606,12 +607,12 @@ std::string ConvolutionOpenCLKernel::CodeGenWinograd4x4To36() { " {\n" " int y_idx = tile_y * 4 - PAD + y;\n"; - if (op_format_ == schema::Format_NHWC4) { + if (op_format_ == schema::Format::Format_NHWC4) { code += " for (int x = 0; x < 6; x++)\n" " {\n" " int x_idx = (tile_x * 4 - PAD + x) * SLICES + slice;\n"; - } else if (op_format_ == schema::Format_NC4HW4) { + } else if (op_format_ == schema::Format::Format_NC4HW4) { code += " if(y_idx < 0 || y_idx >= IH)\n" " {\n" @@ -787,9 +788,9 @@ std::string ConvolutionOpenCLKernel::CodeGenWinograd36To4x4() { " int tile_x = tile_xy % TILE_X * 4;\n" " int tile_y = tile_xy / TILE_X * 4;\n"; - if (op_format_ == schema::Format_NHWC4) { + if (op_format_ == schema::Format::Format_NHWC4) { code += " WRITE_IMAGE(output, (int2)((tile_x + x) * SLICES + slice, tile_y + row), acc);\n"; - } else if (op_format_ == schema::Format_NC4HW4) { + } else if (op_format_ == schema::Format::Format_NC4HW4) { code += " WRITE_IMAGE(output, (int2)(tile_x + x, slice * OH + tile_y + row), acc);\n"; } @@ -822,7 +823,7 @@ int ConvolutionOpenCLKernel::SetGlobalLocalConv(std::vector *global, std local_h = global_h / 2; } - if (op_format_ == schema::Format_NHWC4) { + if (op_format_ == schema::Format::Format_NHWC4) { if (OW * CO_SLICES > 65536) { local_w = 4; } @@ -837,7 +838,7 @@ int ConvolutionOpenCLKernel::SetGlobalLocalConv(std::vector *global, std local->push_back(local_h); local->push_back(local_c); - if (op_format_ == schema::Format_NC4HW4) { + if (op_format_ == schema::Format::Format_NC4HW4) { // calculate 2 FLT4 along width per work-item global->at(0) = UP_DIV(global->at(0), 2); if (local->at(0) > global->at(0)) { @@ -848,10 +849,9 @@ int ConvolutionOpenCLKernel::SetGlobalLocalConv(std::vector *global, std return RET_OK; } -kernel::LiteKernel *OpenCLConvolutionKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *OpenCLConvolutionKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) ConvolutionOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.h index fc3cd21880..458fc8f16b 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.h @@ -19,7 +19,7 @@ #include #include -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "src/runtime/kernel/opencl/opencl_kernel.h" #include "schema/model_generated.h" #include "src/runtime/opencl/opencl_runtime.h" @@ -29,8 +29,8 @@ namespace mindspore::kernel { class ConvolutionOpenCLKernel : public OpenCLKernel { public: - explicit ConvolutionOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit ConvolutionOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) {} ~ConvolutionOpenCLKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc index eb9d799857..91f29f61b9 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc @@ -47,7 +47,7 @@ int DepthwiseConv2dOpenCLKernel::Init() { auto in_format = op_format_; in_ori_format_ = in_tensors_[0]->GetFormat(); out_ori_format_ = out_tensors_[0]->GetFormat(); - if (in_format != schema::Format_NHWC4 && in_format != schema::Format_NC4HW4) { + if (in_format != schema::Format::Format_NHWC4 && in_format != schema::Format::Format_NC4HW4) { MS_LOG(ERROR) << "input format(" << in_format << ") " << "format not support!"; return RET_ERROR; @@ -59,9 +59,9 @@ int DepthwiseConv2dOpenCLKernel::Init() { } else { kernel_name += "_IMG"; } - if (in_format == schema::Format_NC4HW4) { + if (in_format == schema::Format::Format_NC4HW4) { kernel_name += "_NC4HW4"; - } else if (in_format == schema::Format_NHWC4) { + } else if (in_format == schema::Format::Format_NHWC4) { kernel_name += "_NHWC4"; } auto parameter = reinterpret_cast(op_parameter_); @@ -89,7 +89,7 @@ int DepthwiseConv2dOpenCLKernel::InitBuffer() { bool is_fp16 = ocl_runtime->GetFp16Enable(); // weight: o, h, w, i; o == group, i == 1 - void *origin_weight = in_tensors_.at(kWeightIndex)->Data(); + void *origin_weight = in_tensors_.at(kWeightIndex)->MutableData(); int CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); int pack_weight_size = C4NUM * CO4 * parameter->kernel_h_ * parameter->kernel_w_; @@ -115,7 +115,7 @@ int DepthwiseConv2dOpenCLKernel::InitBuffer() { PackNCHWToNC4HW4(origin_weight, packed_weight_, 1, plane, out_tensors_[0]->Channel(), to_dtype); } else { MS_LOG(ERROR) << "Only support float16/float32, actual data type " << in_tensors_.at(kWeightIndex)->data_type(); - return RET_ERROR; + return RET_ERROR; } } @@ -130,7 +130,7 @@ int DepthwiseConv2dOpenCLKernel::InitBuffer() { bias_data_ = allocator->MapBuffer(bias_data_, CL_MAP_WRITE, nullptr, true); size_t up_co_size = C4NUM * CO4 * dtype_size; memset(bias_data_, 0, up_co_size); - auto ori_bias = in_tensors_.at(kBiasIndex)->Data(); + auto ori_bias = in_tensors_.at(kBiasIndex)->MutableData(); memcpy(bias_data_, ori_bias, out_tensors_[0]->Channel() * dtype_size); allocator->UnmapBuffer(bias_data_); } else { @@ -144,7 +144,7 @@ int DepthwiseConv2dOpenCLKernel::ReSize() { return RET_OK; } int DepthwiseConv2dOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); size_t im_dst_x, im_dst_y; - if (in_tensors_[0]->GetFormat() == schema::Format_NHWC4) { + if (in_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) { im_dst_x = out_tensors_[0]->Width() * CO4; im_dst_y = out_tensors_[0]->Height(); } else { @@ -197,10 +197,10 @@ int DepthwiseConv2dOpenCLKernel::Run() { (cl_int)out_tensors_[0]->Batch()}; int arg_cnt = 0; - ocl_runtime->SetKernelArg(kernel_, arg_cnt++, in_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cnt++, in_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_cnt++, packed_weight_, lite::opencl::MemType::BUF); ocl_runtime->SetKernelArg(kernel_, arg_cnt++, bias_data_, lite::opencl::MemType::BUF); - ocl_runtime->SetKernelArg(kernel_, arg_cnt++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cnt++, out_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_cnt++, kernel_size); ocl_runtime->SetKernelArg(kernel_, arg_cnt++, stride); ocl_runtime->SetKernelArg(kernel_, arg_cnt++, padding); @@ -213,8 +213,8 @@ int DepthwiseConv2dOpenCLKernel::Run() { return RET_OK; } -kernel::LiteKernel *OpenCLDepthwiseConv2dKernelCreator(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *OpenCLDepthwiseConv2dKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.h index 9f5ae2e717..c48bf93285 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class DepthwiseConv2dOpenCLKernel : public OpenCLKernel { public: - explicit DepthwiseConv2dOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit DepthwiseConv2dOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs), packed_weight_(nullptr), bias_data_(nullptr), kernel_(nullptr) {} ~DepthwiseConv2dOpenCLKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc index af81b3747b..ed51c416bc 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc @@ -79,7 +79,7 @@ void MatMulOpenCLKernel::PadWeight() { padWeight_ = allocator->Malloc(sizeCI.s[1] * sizeCO.s[1] * C4NUM * C4NUM * dtype_size); padWeight_ = allocator->MapBuffer(padWeight_, CL_MAP_WRITE, nullptr, true); memset(padWeight_, 0x00, sizeCI.s[1] * sizeCO.s[1] * C4NUM * C4NUM * dtype_size); - auto origin_weight = in_tensors_.at(kWeightIndex)->Data(); + auto origin_weight = in_tensors_.at(kWeightIndex)->MutableData(); int divCI = sizeCI.s[1]; int divCO = sizeCO.s[1]; int co = sizeCO.s[0]; @@ -129,12 +129,12 @@ void MatMulOpenCLKernel::PadWeight() { memset(bias_, 0x00, divCO * C4NUM * dtype_size); if (in_tensors_.size() >= 3) { if (in_tensors_[2]->data_type() == kNumberTypeFloat32 && enable_fp16_) { - auto fdata = reinterpret_cast(in_tensors_[2]->Data()); + auto fdata = reinterpret_cast(in_tensors_[2]->MutableData()); for (int i = 0; i < co; i++) { reinterpret_cast(bias_)[i] = Float32ToShort(fdata[i]); } } else { - memcpy(bias_, in_tensors_[2]->Data(), co * dtype_size); + memcpy(bias_, in_tensors_[2]->MutableData(), co * dtype_size); } } allocator->UnmapBuffer(bias_); @@ -142,10 +142,10 @@ void MatMulOpenCLKernel::PadWeight() { int MatMulOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { size_t im_dst_x, im_dst_y; - if (op_format_ == schema::Format_NHWC4) { + if (op_format_ == schema::Format::Format_NHWC4) { im_dst_x = sizeCO.s[1]; im_dst_y = 1; - } else if (op_format_ == schema::Format_NC4HW4) { + } else if (op_format_ == schema::Format::Format_NC4HW4) { im_dst_x = 1; im_dst_y = sizeCO.s[1]; } else { @@ -169,10 +169,10 @@ int MatMulOpenCLKernel::Run() { std::vector local = {64, 4}; std::vector global = {UP_ROUND(sizeCO.s[1], local[0]), 4}; int arg_count = 0; - ocl_runtime->SetKernelArg(kernel_, arg_count++, in_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_count++, in_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_count++, padWeight_, lite::opencl::MemType::BUF); ocl_runtime->SetKernelArg(kernel_, arg_count++, bias_); - ocl_runtime->SetKernelArg(kernel_, arg_count++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_count++, out_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_count++, sizeCI); ocl_runtime->SetKernelArg(kernel_, arg_count++, sizeCO); ocl_runtime->SetKernelArg(kernel_, arg_count++, hasBias_ ? 1 : 0); @@ -180,10 +180,9 @@ int MatMulOpenCLKernel::Run() { return RET_OK; } -kernel::LiteKernel *OpenCLMatMulKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *OpenCLMatMulKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { bool hasBias = false; if (opParameter->type_ == PrimitiveType_FullConnection) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.h index 7440382a09..dfd102107e 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.h @@ -27,8 +27,8 @@ namespace mindspore::kernel { class MatMulOpenCLKernel : public OpenCLKernel { public: - explicit MatMulOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, bool hasBias) + explicit MatMulOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, bool hasBias) : OpenCLKernel(parameter, inputs, outputs) { hasBias_ = hasBias; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc index 27e4f214ff..fa1095dd8c 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc @@ -98,10 +98,10 @@ int PoolingOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) int h = out_tensors_[0]->shape()[1]; int w = out_tensors_[0]->shape()[2]; int c = out_tensors_[0]->shape()[3]; - if (op_format_ == schema::Format_NHWC4) { + if (op_format_ == schema::Format::Format_NHWC4) { im_dst_x = w * UP_DIV(c, C4NUM); im_dst_y = n * h; - } else if (op_format_ == schema::Format_NC4HW4) { + } else if (op_format_ == schema::Format::Format_NC4HW4) { im_dst_x = w; im_dst_y = n * UP_DIV(c, C4NUM) * h; } else { @@ -135,8 +135,8 @@ int PoolingOpenCLKernel::Run() { cl_int2 padding = {parameter_->pad_u_, parameter_->pad_l_}; int arg_idx = 0; - ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->MutableData()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_idx++, input_shape); ocl_runtime->SetKernelArg(kernel_, arg_idx++, output_shape); ocl_runtime->SetKernelArg(kernel_, arg_idx++, stride); @@ -153,10 +153,9 @@ int PoolingOpenCLKernel::Run() { return RET_OK; } -kernel::LiteKernel *OpenCLPooling2dKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *OpenCLPooling2dKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) PoolingOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.h index c162edf266..bc12fb0a3f 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.h @@ -27,8 +27,8 @@ namespace mindspore::kernel { class PoolingOpenCLKernel : public OpenCLKernel { public: - explicit PoolingOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit PoolingOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) { parameter_ = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc index fb008eddd0..a16c04158f 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc @@ -50,22 +50,22 @@ void PReluOpenCLKernel::InitBuffer() { if (enable_fp16_) { if (in_tensors_[1]->data_type() == kNumberTypeFloat32) { auto PReluWeight_fp16 = reinterpret_cast(PReluWeight_); - auto in_tensor_data_fp32 = reinterpret_cast(in_tensors_[1]->Data()); + auto in_tensor_data_fp32 = reinterpret_cast(in_tensors_[1]->MutableData()); for (int i = 0; i < elem_num; i++) { PReluWeight_fp16[i] = Float32ToShort(in_tensor_data_fp32[i]); } } else { - memcpy(PReluWeight_, in_tensors_[1]->Data(), elem_num * fp_size); + memcpy(PReluWeight_, in_tensors_[1]->MutableData(), elem_num * fp_size); } } else { if (in_tensors_[1]->data_type() == kNumberTypeFloat16) { auto PReluWeight_fp32 = reinterpret_cast(PReluWeight_); - auto in_tensor_data_fp16 = reinterpret_cast(in_tensors_[1]->Data()); + auto in_tensor_data_fp16 = reinterpret_cast(in_tensors_[1]->MutableData()); for (int i = 0; i < elem_num; i++) { PReluWeight_fp32[i] = ShortToFloat32(in_tensor_data_fp16[i]); } } else { - memcpy(PReluWeight_, in_tensors_[1]->Data(), elem_num * fp_size); + memcpy(PReluWeight_, in_tensors_[1]->MutableData(), elem_num * fp_size); } } allocator->UnmapBuffer(PReluWeight_); @@ -108,10 +108,10 @@ int PReluOpenCLKernel::Init() { int PReluOpenCLKernel::Run() { MS_LOG(DEBUG) << op_parameter_->name_ << " Running!"; auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); - std::map data_type{{schema::Format_NHWC4, 1}, {schema::Format_NC4HW4, 2}}; + std::map data_type{{schema::Format::Format_NHWC4, 1}, {schema::Format::Format_NC4HW4, 2}}; int arg_idx = 0; - ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->MutableData()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_idx++, input_shape_); ocl_runtime->SetKernelArg(kernel_, arg_idx++, PReluWeight_); ocl_runtime->SetKernelArg(kernel_, arg_idx++, data_type[op_format_]); @@ -132,9 +132,9 @@ int PReluOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { img_dtype = CL_HALF_FLOAT; } global_shape_ = input_shape_; - if (op_format_ == schema::Format_NC4HW4) { + if (op_format_ == schema::Format::Format_NC4HW4) { global_shape_.s[1] = UP_DIV(input_shape_.s[3], C4NUM) * input_shape_.s[1]; - } else if (op_format_ == schema::Format_NHWC4) { + } else if (op_format_ == schema::Format::Format_NHWC4) { global_shape_.s[2] = UP_DIV(input_shape_.s[3], C4NUM) * input_shape_.s[2]; } else { MS_LOG(ERROR) << "op_format_:" << op_format_ << " is do not support!"; @@ -147,10 +147,10 @@ int PReluOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { return RET_OK; } -kernel::LiteKernel *OpenCLPReluKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::PrimitiveC *primitive) { +kernel::LiteKernel *OpenCLPReluKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, + const lite::PrimitiveC *primitive) { if (inputs.empty()) { MS_LOG(ERROR) << "Input data size must be greater than 0, but your size is " << inputs.size(); return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.h index 3535ba4aa4..d1ab4a76bd 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.h @@ -19,7 +19,7 @@ #include #include -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "src/runtime/kernel/opencl/opencl_kernel.h" #include "schema/model_generated.h" #include "src/runtime/opencl/opencl_runtime.h" @@ -28,8 +28,8 @@ namespace mindspore::kernel { class PReluOpenCLKernel : public OpenCLKernel { public: - explicit PReluOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit PReluOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) {} ~PReluOpenCLKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc index c4124a743c..6beba68518 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc @@ -75,10 +75,10 @@ int ReshapeOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) w = shapex[2]; c = shapex[3]; } - if (op_format_ == schema::Format_NHWC4) { + if (op_format_ == schema::Format::Format_NHWC4) { im_dst_x = w * UP_DIV(c, C4NUM); im_dst_y = n * h; - } else if (op_format_ == schema::Format_NC4HW4) { + } else if (op_format_ == schema::Format::Format_NC4HW4) { im_dst_x = w; im_dst_y = n * UP_DIV(c, C4NUM) * h; } else { @@ -115,18 +115,17 @@ int ReshapeOpenCLKernel::Run() { cl_int4 size = {h, w, c4, 1}; cl_int4 size_out = {oh, ow, c4, 1}; int arg_idx = 0; - ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->MutableData()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->MutableData()); ocl_runtime->SetKernelArg(kernel_, arg_idx++, size); ocl_runtime->SetKernelArg(kernel_, arg_idx++, size_out); ocl_runtime->RunKernel(kernel_, global, local, nullptr); return RET_OK; } -kernel::LiteKernel *OpenCLReshapeKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *OpenCLReshapeKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) ReshapeOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.h index 95ed49548c..8323339e5f 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class ReshapeOpenCLKernel : public OpenCLKernel { public: - explicit ReshapeOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit ReshapeOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) {} ~ReshapeOpenCLKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/slice.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/slice.cc index 5d29e1aa93..a8d5ddfa65 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/slice.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/slice.cc @@ -31,7 +31,7 @@ namespace mindspore::kernel { int SliceOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); size_t im_dst_x, im_dst_y; - if (in_tensors_[0]->GetFormat() == schema::Format_NHWC4) { + if (in_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) { im_dst_x = out_tensors_[0]->Width() * CO4; im_dst_y = out_tensors_[0]->Height(); } else { @@ -123,8 +123,8 @@ int SliceOpenCLKernel::Run() { std::vector global = {1, OH, OW}; SlcieGetWorkGroup(global, &local, max_global[0]); int arg_cn = 0; - ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->Data()); // input tensor - ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->Data()); // out tensor + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->MutableData()); // input tensor + ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->MutableData()); // out tensor ocl_runtime->SetKernelArg(kernel_, arg_cn++, input_shape_); ocl_runtime->SetKernelArg(kernel_, arg_cn++, size_); ocl_runtime->SetKernelArg(kernel_, arg_cn++, begin_); @@ -134,10 +134,9 @@ int SliceOpenCLKernel::Run() { return RET_OK; } // namespace mindspore::kernel -kernel::LiteKernel *OpenCLSliceKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *OpenCLSliceKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) SliceOpenCLKernel(opParameter, inputs, outputs); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/slice.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/slice.h index 7a679e4593..dadf03e062 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/slice.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/slice.h @@ -27,8 +27,8 @@ namespace mindspore::kernel { class SliceOpenCLKernel : public OpenCLKernel { public: - explicit SliceOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit SliceOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) {} ~SliceOpenCLKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc index 9c6a1774c4..b4a9a6683e 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc @@ -146,11 +146,11 @@ int SoftmaxOpenCLKernel::Run() { auto mask_ = GetMaskForLastChannel(channel_size); cl_float4 mask = {mask_[0], mask_[1], mask_[2], mask_[3]}; - runtime_->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->Data()); + runtime_->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->MutableData()); if (is_image_out_) { - runtime_->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data()); + runtime_->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->MutableData()); } else { - runtime_->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data(), lite::opencl::MemType::BUF); + runtime_->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->MutableData(), lite::opencl::MemType::BUF); } runtime_->SetKernelArg(kernel_, arg_idx++, mask); runtime_->SetKernelArg(kernel_, arg_idx++, slices); @@ -160,11 +160,11 @@ int SoftmaxOpenCLKernel::Run() { int slices = UP_DIV(out_tensors_[0]->shape()[3], C4NUM); cl_int4 input_shape = {in_tensors_[0]->shape()[1], in_tensors_[0]->shape()[2], in_tensors_[0]->shape()[3], slices}; - runtime_->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->Data()); + runtime_->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->MutableData()); if (is_image_out_) { - runtime_->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data()); + runtime_->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->MutableData()); } else { - runtime_->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data(), lite::opencl::MemType::BUF); + runtime_->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->MutableData(), lite::opencl::MemType::BUF); } runtime_->SetKernelArg(kernel_, arg_idx, input_shape); SetWorkGroupSize(); @@ -175,10 +175,9 @@ int SoftmaxOpenCLKernel::Run() { return lite::RET_OK; } -kernel::LiteKernel *OpenCLSoftMaxKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *OpenCLSoftMaxKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) SoftmaxOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.h index 154e06057e..329b783a75 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.h @@ -27,8 +27,8 @@ namespace mindspore::kernel { class SoftmaxOpenCLKernel : public OpenCLKernel { public: - explicit SoftmaxOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit SoftmaxOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) { parameter_ = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc index 5d937b270a..719ddbe777 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc @@ -37,9 +37,10 @@ int ToFormatOpenCLKernel::Init() { auto parameter = reinterpret_cast(op_parameter_); out_mem_type_ = parameter->out_mem_type; std::string program_name = "to_format"; - std::map format_str{{schema::Format_NCHW, "NCHW"}, {schema::Format_NHWC, "NHWC"}, - {schema::Format_NC4HW4, "NC4HW4"}, {schema::Format_NC4, "NHWC4"}, - {schema::Format_NC, "NHWC"}, {schema::Format_NHWC4, "NHWC4"}}; + std::map format_str{ + {schema::Format::Format_NCHW, "NCHW"}, {schema::Format::Format_NHWC, "NHWC"}, + {schema::Format::Format_NC4HW4, "NC4HW4"}, {schema::Format::Format_NC4, "NHWC4"}, + {schema::Format::Format_NC, "NHWC"}, {schema::Format::Format_NHWC4, "NHWC4"}}; std::string kernel_name = "to_format_" + format_str[in_tensors_[0]->GetFormat()] + "_to_" + format_str[out_tensors_[0]->GetFormat()]; std::map dtype_str{ @@ -75,18 +76,20 @@ int ToFormatOpenCLKernel::InitNHWCShape() { nhwc_shape_ = {n, h, w, c}; return RET_OK; } - if (out_tensors_[0]->GetFormat() == schema::Format_NC4HW4 || out_tensors_[0]->GetFormat() == schema::Format_NHWC4 || - out_tensors_[0]->GetFormat() == schema::Format_NHWC) { + if (out_tensors_[0]->GetFormat() == schema::Format::Format_NC4HW4 || + out_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4 || + out_tensors_[0]->GetFormat() == schema::Format::Format_NHWC) { n = shapex[0]; h = shapex[1]; w = shapex[2]; c = shapex[3]; - } else if (out_tensors_[0]->GetFormat() == schema::Format_NCHW) { + } else if (out_tensors_[0]->GetFormat() == schema::Format::Format_NCHW) { n = shapex[0]; h = shapex[2]; w = shapex[3]; c = shapex[1]; - } else if (out_tensors_[0]->GetFormat() == schema::Format_NC4 || out_tensors_[0]->GetFormat() == schema::Format_NC) { + } else if (out_tensors_[0]->GetFormat() == schema::Format::Format_NC4 || + out_tensors_[0]->GetFormat() == schema::Format::Format_NC) { n = shapex[0]; h = 1; w = 1; @@ -115,19 +118,19 @@ int ToFormatOpenCLKernel::GetLocalSize(size_t idx, const std::vector &gl int ToFormatOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { size_t im_dst_x, im_dst_y; - if (out_tensors_[0]->GetFormat() == schema::Format_NC4HW4) { + if (out_tensors_[0]->GetFormat() == schema::Format::Format_NC4HW4) { int c = nhwc_shape_[3]; int h = nhwc_shape_[1]; int w = nhwc_shape_[2]; im_dst_y = nhwc_shape_[0] * h * UP_DIV(c, C4NUM); im_dst_x = w; - } else if (out_tensors_[0]->GetFormat() == schema::Format_NHWC4) { + } else if (out_tensors_[0]->GetFormat() == schema::Format::Format_NHWC4) { int h = nhwc_shape_[0] * nhwc_shape_[1]; int w = nhwc_shape_[2]; int c = nhwc_shape_[3]; im_dst_x = w * UP_DIV(c, C4NUM); im_dst_y = h; - } else if (out_tensors_[0]->GetFormat() == schema::Format_NC4) { + } else if (out_tensors_[0]->GetFormat() == schema::Format::Format_NC4) { int c = nhwc_shape_[3]; im_dst_x = UP_DIV(c, C4NUM); im_dst_y = 1; @@ -156,18 +159,17 @@ int ToFormatOpenCLKernel::Run() { cl_int4 gsize{(cl_int)global[0], (cl_int)global[1], (cl_int)global[2], 1}; auto src_mem_type = (out_mem_type_ == OpenCLMemType::IMG) ? lite::opencl::MemType::BUF : lite::opencl::MemType::IMG; auto dst_mem_type = (out_mem_type_ == OpenCLMemType::IMG) ? lite::opencl::MemType::IMG : lite::opencl::MemType::BUF; - ocl_runtime->SetKernelArg(kernel_, 0, in_tensors_[0]->Data(), src_mem_type); - ocl_runtime->SetKernelArg(kernel_, 1, out_tensors_[0]->Data(), dst_mem_type); + ocl_runtime->SetKernelArg(kernel_, 0, in_tensors_[0]->MutableData(), src_mem_type); + ocl_runtime->SetKernelArg(kernel_, 1, out_tensors_[0]->MutableData(), dst_mem_type); ocl_runtime->SetKernelArg(kernel_, 2, gsize); ocl_runtime->SetKernelArg(kernel_, 3, shape); ocl_runtime->RunKernel(kernel_, global, local, nullptr); return RET_OK; } -kernel::LiteKernel *OpenCLToFormatKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *OpenCLToFormatKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) ToFormatOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.h index 3aeca11adb..03165f7b20 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class ToFormatOpenCLKernel : public OpenCLKernel { public: - explicit ToFormatOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit ToFormatOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) {} ~ToFormatOpenCLKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc index c15a0723d4..08da236439 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc @@ -61,8 +61,8 @@ int TransposeOpenCLKernel::Init() { in_tensors_[0]->SetFormat(op_format_); out_tensors_[0]->SetFormat(op_format_); if (out_mem_type_ == OpenCLMemType::BUF) { - out_ori_format_ = schema::Format_NCHW; - out_tensors_[0]->SetFormat(schema::Format_NCHW); + out_ori_format_ = schema::Format::Format_NCHW; + out_tensors_[0]->SetFormat(schema::Format::Format_NCHW); } MS_LOG(DEBUG) << kernel_name << " Init Done!"; @@ -77,10 +77,10 @@ int TransposeOpenCLKernel::GetImageSize(size_t idx, std::vector *img_siz int h = out_tensors_[0]->shape()[1]; int w = out_tensors_[0]->shape()[2]; int c = out_tensors_[0]->shape()[3]; - if (op_format_ == schema::Format_NHWC4) { + if (op_format_ == schema::Format::Format_NHWC4) { im_dst_x = w * UP_DIV(c, C4NUM); im_dst_y = n * h; - } else if (op_format_ == schema::Format_NC4HW4) { + } else if (op_format_ == schema::Format::Format_NC4HW4) { im_dst_x = w; im_dst_y = n * UP_DIV(c, C4NUM) * h; } else { @@ -113,11 +113,11 @@ int TransposeOpenCLKernel::Run() { cl_int2 HW = {h * w, hw4}; cl_int2 C = {c, c4}; int arg_idx = 0; - ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->MutableData()); if (out_mem_type_ == OpenCLMemType::BUF) { - ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data(), lite::opencl::MemType::BUF); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->MutableData(), lite::opencl::MemType::BUF); } else { - ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->MutableData()); } ocl_runtime->SetKernelArg(kernel_, arg_idx++, HW); ocl_runtime->SetKernelArg(kernel_, arg_idx++, C); @@ -127,10 +127,9 @@ int TransposeOpenCLKernel::Run() { return RET_OK; } -kernel::LiteKernel *OpenCLTransposeKernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, +kernel::LiteKernel *OpenCLTransposeKernelCreator(const std::vector &inputs, + const std::vector &outputs, OpParameter *opParameter, + const lite::Context *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) TransposeOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h index 4576bd066c..06e2bead0e 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h @@ -26,8 +26,8 @@ namespace mindspore::kernel { class TransposeOpenCLKernel : public OpenCLKernel { public: - explicit TransposeOpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit TransposeOpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : OpenCLKernel(parameter, inputs, outputs) {} ~TransposeOpenCLKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h b/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h index d33d760554..c265cee18a 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h +++ b/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h @@ -27,15 +27,15 @@ enum class OpenCLMemType { BUF, IMG }; struct OpenCLToFormatParameter { OpParameter op_parameter; - schema::Format src_format{schema::Format_NHWC}; - schema::Format dst_format{schema::Format_NHWC4}; + schema::Format src_format{schema::Format::Format_NHWC}; + schema::Format dst_format{schema::Format::Format_NHWC4}; OpenCLMemType out_mem_type{OpenCLMemType::IMG}; }; class OpenCLKernel : public LiteKernel { public: - explicit OpenCLKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs) + explicit OpenCLKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs) : LiteKernel(parameter, inputs, outputs, nullptr, nullptr) {} virtual int Init() { return RET_ERROR; } @@ -56,9 +56,9 @@ class OpenCLKernel : public LiteKernel { protected: OpenCLMemType out_mem_type_{OpenCLMemType::IMG}; - schema::Format in_ori_format_{schema::Format_NHWC}; - schema::Format out_ori_format_{schema::Format_NHWC4}; - schema::Format op_format_{schema::Format_NHWC4}; + schema::Format in_ori_format_{schema::Format::Format_NHWC}; + schema::Format out_ori_format_{schema::Format::Format_NHWC4}; + schema::Format op_format_{schema::Format::Format_NHWC4}; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.cc b/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.cc index 1aabf04208..9e5bec0bb8 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.cc @@ -26,9 +26,9 @@ namespace mindspore::kernel { SubGraphOpenCLKernel::~SubGraphOpenCLKernel() { UnInit(); } -int SubGraphOpenCLKernel::GenToFormatOp(const std::vector &in_tensors, +int SubGraphOpenCLKernel::GenToFormatOp(const std::vector &in_tensors, const std::vector> in_kernels, - std::vector *out_tensors, + std::vector *out_tensors, std::vector *out_parameters, std::vector *out_convert_ops, OpenCLMemType mem_type) { out_tensors->clear(); @@ -66,22 +66,22 @@ int SubGraphOpenCLKernel::GenToFormatOp(const std::vectorin_tensors()[0]->GetFormat() : out_ori_format; auto src_format = (mem_type == OpenCLMemType::IMG) ? in_ori_format : in_kernels[i][0]->out_tensors()[0]->GetFormat(); - lite::tensor::Tensor *new_tensor = new (std::nothrow) lite::tensor::Tensor(); + lite::Tensor *new_tensor = new (std::nothrow) lite::Tensor(); MS_ASSERT(new_tensor); if (new_tensor == nullptr) { MS_LOG(ERROR) << "SubGraphOpenCLKernel new tensor failed!"; return RET_ERROR; } new_tensor->CopyTensor(*in_tensors[i]); - if ((dst_format == schema::Format_NCHW || dst_format == schema::Format_NC4HW4) && - (src_format == schema::Format_NHWC || src_format == schema::Format_NHWC4)) { - auto &shape = new_tensor->shape(); + if ((dst_format == schema::Format::Format_NCHW || dst_format == schema::Format::Format_NC4HW4) && + (src_format == schema::Format::Format_NHWC || src_format == schema::Format::Format_NHWC4)) { + auto shape = new_tensor->shape(); std::vector dst_shape{shape[0], shape[3], shape[1], shape[2]}; new_tensor->set_shape(shape); } - if ((dst_format == schema::Format_NHWC || dst_format == schema::Format_NHWC4) && - (src_format == schema::Format_NCHW || src_format == schema::Format_NC4HW4)) { - auto &shape = new_tensor->shape(); + if ((dst_format == schema::Format::Format_NHWC || dst_format == schema::Format::Format_NHWC4) && + (src_format == schema::Format::Format_NCHW || src_format == schema::Format::Format_NC4HW4)) { + auto shape = new_tensor->shape(); std::vector dst_shape{shape[0], shape[2], shape[3], shape[1]}; new_tensor->set_shape(shape); } @@ -193,7 +193,7 @@ int SubGraphOpenCLKernel::UpdateTensorDataType() { auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); bool is_fp16 = ocl_runtime->GetFp16Enable(); if (is_fp16 && (in_tensors_[0]->data_type() == kNumberTypeFloat32)) { - std::set out_set; + std::set out_set; out_set.insert(in_tensors_.begin(), in_tensors_.end()); out_set.insert(out_tensors_.begin(), out_tensors_.end()); for (auto iv : nodes_) { @@ -258,14 +258,14 @@ int SubGraphOpenCLKernel::MallocTensorWithReuse() { return RET_OK; } -int SubGraphOpenCLKernel::GetKernelFromToTensor(const std::vector &in_tensors, +int SubGraphOpenCLKernel::GetKernelFromToTensor(const std::vector &in_tensors, const std::vector &in_kernels, std::vector> *out_kernels, bool is_from) { - std::vector> ksets; + std::vector> ksets; for (auto jv : in_kernels) { auto tens = is_from ? jv->in_tensors() : jv->out_tensors(); - std::set kset; + std::set kset; kset.insert(tens.begin(), tens.end()); ksets.emplace_back(kset); } @@ -307,7 +307,7 @@ int SubGraphOpenCLKernel::ReSize() { return RET_OK; } int SubGraphOpenCLKernel::Run() { auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); for (auto &tensor : in_tensors_) { - allocator_->UnmapBuffer(tensor->Data()); + allocator_->UnmapBuffer(tensor->MutableData()); } lite::opencl::OpenCLExecutor executor; diff --git a/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h b/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h index 70e695d294..a2f8a0a2b6 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h +++ b/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h @@ -31,8 +31,7 @@ struct SubGraphOpenCLParameter { class SubGraphOpenCLKernel : public SubGraphKernel { public: - explicit SubGraphOpenCLKernel(const std::vector inputs, - const std::vector outputs, + explicit SubGraphOpenCLKernel(const std::vector inputs, const std::vector outputs, const std::vector inKernels, const std::vector outKernels, const std::vector nodes) @@ -48,19 +47,18 @@ class SubGraphOpenCLKernel : public SubGraphKernel { protected: int UpdateTensorDataType(); int MallocTensorWithReuse(); - int GenToFormatOp(const std::vector &in_tensors, + int GenToFormatOp(const std::vector &in_tensors, const std::vector> in_kernels, - std::vector *out_tensors, - std::vector *out_parameters, std::vector *out_convert_ops, - OpenCLMemType mem_type); - int GetKernelFromToTensor(const std::vector &in_tensors, + std::vector *out_tensors, std::vector *out_parameters, + std::vector *out_convert_ops, OpenCLMemType mem_type); + int GetKernelFromToTensor(const std::vector &in_tensors, const std::vector &in_kernels, std::vector> *out_kernels, bool is_from); private: lite::opencl::OpenCLAllocator *allocator_; - std::vector in_convert_tensors_; - std::vector out_convert_tensors_; + std::vector in_convert_tensors_; + std::vector out_convert_tensors_; std::vector in_parameters_; std::vector out_parameters_; std::vector in_convert_ops_; diff --git a/mindspore/lite/src/runtime/kernel/opencl/utils.cc b/mindspore/lite/src/runtime/kernel/opencl/utils.cc index a72ad23c0b..91375c045f 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/utils.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/utils.cc @@ -23,9 +23,8 @@ using mindspore::lite::KernelRegistrar; namespace mindspore::lite { -kernel::LiteKernel *GetOpenCLKernel(const std::vector &in_tensors, - const std::vector &out_tensors, OpParameter *parameter, - const Context *ctx, const kernel::KernelKey &key) { +kernel::LiteKernel *GetOpenCLKernel(const std::vector &in_tensors, const std::vector &out_tensors, + OpParameter *parameter, const Context *ctx, const kernel::KernelKey &key) { auto creator = KernelRegistry::GetInstance()->GetCreator(key); if (creator != nullptr) { auto kernel = creator(in_tensors, out_tensors, parameter, nullptr, key, nullptr); diff --git a/mindspore/lite/src/runtime/kernel/opencl/utils.h b/mindspore/lite/src/runtime/kernel/opencl/utils.h index 699bf04430..ed62ccf5d2 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/utils.h +++ b/mindspore/lite/src/runtime/kernel/opencl/utils.h @@ -26,9 +26,8 @@ #include "src/common//utils.h" namespace mindspore::lite { -kernel::LiteKernel *GetOpenCLKernel(const std::vector &in_tensors, - const std::vector &out_tensors, OpParameter *parameter, - const Context *ctx, const kernel::KernelKey &key); +kernel::LiteKernel *GetOpenCLKernel(const std::vector &in_tensors, const std::vector &out_tensors, + OpParameter *parameter, const Context *ctx, const kernel::KernelKey &key); } namespace mindspore::kernel { @@ -91,8 +90,7 @@ std::vector GetCommonLocalSize(const std::vector &global, int ma std::string CLErrorCode(cl_int error_code); template -void PackNCHWToNC4HW4(void *src, void *dst, int batch, int plane, int channel, - const std::function &to_dtype) { +void PackNCHWToNC4HW4(void *src, void *dst, int batch, int plane, int channel, const std::function &to_dtype) { int c4 = UP_DIV(channel, C4NUM); for (int b = 0; b < batch; b++) { int src_offset = b * plane * channel; @@ -105,15 +103,13 @@ void PackNCHWToNC4HW4(void *src, void *dst, int batch, int plane, int channel, for (int k = 0; k < plane; k++) { int src_kernel_offset = src_c_offset + k; int dst_kernel_offset = dst_c_offset + C4NUM * k + c4_block_rem; - (static_cast(dst) + dst_kernel_offset)[0] = - to_dtype((static_cast(src) + src_kernel_offset)[0]); + (static_cast(dst) + dst_kernel_offset)[0] = to_dtype((static_cast(src) + src_kernel_offset)[0]); } } } } template -void PackNHWCToNHWC4(void *src, void *dst, int batch, int plane, int channel, - const std::function &to_dtype) { +void PackNHWCToNHWC4(void *src, void *dst, int batch, int plane, int channel, const std::function &to_dtype) { int c4 = UP_DIV(channel, C4NUM); int nhwc4_batch_unit_offset = c4 * C4NUM * plane; int ic_remainder_ = channel % C4NUM; @@ -137,8 +133,7 @@ void PackNHWCToNHWC4(void *src, void *dst, int batch, int plane, int channel, } } template -void PackNHWCToNC4HW4(void *src, void *dst, int batch, int plane, int channel, - const std::function &to_dtype) { +void PackNHWCToNC4HW4(void *src, void *dst, int batch, int plane, int channel, const std::function &to_dtype) { int c4 = UP_DIV(channel, C4NUM); for (int b = 0; b < batch; b++) { int src_oc_offset = b * plane * channel; diff --git a/mindspore/lite/src/runtime/opencl/opencl_executor.cc b/mindspore/lite/src/runtime/opencl/opencl_executor.cc index 170e937db0..6b324e46c5 100644 --- a/mindspore/lite/src/runtime/opencl/opencl_executor.cc +++ b/mindspore/lite/src/runtime/opencl/opencl_executor.cc @@ -17,14 +17,13 @@ #include "src/runtime/opencl/opencl_executor.h" #include "src/runtime/kernel/opencl/utils.h" #include "nnacl/pack.h" -#include "src/common/ms_tensor_utils.h" #include "include/errorcode.h" namespace mindspore::lite::opencl { int OpenCLExecutor::Prepare(const std::vector &kernels) { return RET_OK; } -int OpenCLExecutor::Run(std::vector &inputs, std::vector &outputs, +int OpenCLExecutor::Run(std::vector &inputs, std::vector &outputs, std::vector &kernels, Allocator *allocator, const session::KernelCallBack &before, const session::KernelCallBack &after) { kernel::LiteKernelUtil::InitTensorRefCount(kernels); @@ -34,7 +33,7 @@ int OpenCLExecutor::Run(std::vector &inputs, std::vectorname(); if (before != nullptr) { - if (!before(PackToMSTensors(kernel->in_tensors()), PackToMSTensors(kernel->out_tensors()), callbackParam)) { + if (!before(TensorVectorCast(kernel->in_tensors()), TensorVectorCast(kernel->out_tensors()), callbackParam)) { MS_LOG(ERROR) << "run kernel before_callback failed, name: " << kernel->name(); } } @@ -60,7 +59,7 @@ int OpenCLExecutor::Run(std::vector &inputs, std::vectorin_tensors()), PackToMSTensors(kernel->out_tensors()), callbackParam)) { + if (!after(TensorVectorCast(kernel->in_tensors()), TensorVectorCast(kernel->out_tensors()), callbackParam)) { MS_LOG(ERROR) << "run kernel after_callback failed, name: " << kernel->name(); } } diff --git a/mindspore/lite/src/runtime/opencl/opencl_executor.h b/mindspore/lite/src/runtime/opencl/opencl_executor.h index 02058a8ab2..dd3072b625 100644 --- a/mindspore/lite/src/runtime/opencl/opencl_executor.h +++ b/mindspore/lite/src/runtime/opencl/opencl_executor.h @@ -31,9 +31,9 @@ class OpenCLExecutor : Executor { int Prepare(const std::vector &kernels); - int Run(std::vector &inputs, std::vector &outputs, - std::vector &kernels, Allocator *allocator = nullptr, - const session::KernelCallBack &before = nullptr, const session::KernelCallBack &after = nullptr); + int Run(std::vector &inputs, std::vector &outputs, std::vector &kernels, + Allocator *allocator = nullptr, const session::KernelCallBack &before = nullptr, + const session::KernelCallBack &after = nullptr); protected: Context *context = nullptr; diff --git a/mindspore/lite/src/runtime/opencl/opencl_runtime.cc b/mindspore/lite/src/runtime/opencl/opencl_runtime.cc index c3b6204e46..3ebce581fa 100644 --- a/mindspore/lite/src/runtime/opencl/opencl_runtime.cc +++ b/mindspore/lite/src/runtime/opencl/opencl_runtime.cc @@ -309,12 +309,11 @@ int OpenCLRuntime::BuildKernel(cl::Kernel &kernel, const std::string &program_na "-DWRITE_IMAGE=write_imagef -DREAD_IMAGE=read_imagef -DTO_FLT=convert_float -DTO_FLT4=convert_float4 "; } - auto build_options_ext = std::accumulate( - build_options.begin(), build_options.end(), std::string(""), - [](const std::string &options, const std::string &option) -> std::string { - auto res = options + " " + option; - return res; - }); + auto build_options_ext = std::accumulate(build_options.begin(), build_options.end(), std::string(""), + [](const std::string &options, const std::string &option) -> std::string { + auto res = options + " " + option; + return res; + }); build_options_str += default_build_opts_; // program identifier = program_name + build_options std::string build_program_key = program_name + build_options_str + build_options_ext; diff --git a/mindspore/lite/src/runtime/parallel_executor.cc b/mindspore/lite/src/runtime/parallel_executor.cc index 488e7ad736..b1fbc05b20 100644 --- a/mindspore/lite/src/runtime/parallel_executor.cc +++ b/mindspore/lite/src/runtime/parallel_executor.cc @@ -53,7 +53,7 @@ static int RunKernel(void *data, int index) { return 0; } -int ParallelExecutor::Run(std::vector &in_tensors, std::vector &out_tensors, +int ParallelExecutor::Run(std::vector &in_tensors, std::vector &out_tensors, std::vector &kernels, Allocator *allocator, const session::KernelCallBack &before, const session::KernelCallBack &after) { MS_ASSERT(nullptr != allocator); @@ -62,7 +62,7 @@ int ParallelExecutor::Run(std::vector &in_tensors, std::vector MS_LOG(ERROR) << "Graph input tensor is nullptr"; return RET_ERROR; } - if (inTensor->GetFormat() != schema::Format_NHWC) { + if (inTensor->GetFormat() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "Model input tensor should be NHWC"; return RET_ERROR; } diff --git a/mindspore/lite/src/runtime/parallel_executor.h b/mindspore/lite/src/runtime/parallel_executor.h index 95dfbbd58f..ed90c9471c 100644 --- a/mindspore/lite/src/runtime/parallel_executor.h +++ b/mindspore/lite/src/runtime/parallel_executor.h @@ -32,7 +32,7 @@ class ParallelExecutor : public Executor { int Prepare(std::vector &kernels) override; - int Run(std::vector &in_tensors, std::vector &out_tensors, + int Run(std::vector &in_tensors, std::vector &out_tensors, std::vector &kernels, Allocator *allocator = nullptr, const session::KernelCallBack &before = nullptr, const session::KernelCallBack &after = nullptr) override; inline kernel::LiteKernel *GetReadyKernel(const int index) { return readyKernels.at(index); } diff --git a/mindspore/lite/src/runtime/thread_pool.h b/mindspore/lite/src/runtime/thread_pool.h index d537aaaf02..1b9e60bf15 100644 --- a/mindspore/lite/src/runtime/thread_pool.h +++ b/mindspore/lite/src/runtime/thread_pool.h @@ -23,7 +23,7 @@ typedef enum { MID_MODE = -1, /**< bind middle cpu first */ HIGHER_MODE = 1, /**< bind higher cpu first */ - NO_BIND_MODE = 0 /**< no bind */ + NO_BIND_MODE = 0 /**< no bind */ } BindMode; /// \brief ThreadPoolId defined for specifying which thread pool to use. diff --git a/mindspore/lite/src/runtime/workspace_pool.cc b/mindspore/lite/src/runtime/workspace_pool.cc index 5fd4ab4eb2..2fbfe6605e 100644 --- a/mindspore/lite/src/runtime/workspace_pool.cc +++ b/mindspore/lite/src/runtime/workspace_pool.cc @@ -152,4 +152,3 @@ WorkspacePool::~WorkspacePool() { } } // namespace predict } // namespace mindspore - diff --git a/mindspore/lite/src/runtime/workspace_pool.h b/mindspore/lite/src/runtime/workspace_pool.h index 9342200b28..0acdd05e1e 100644 --- a/mindspore/lite/src/runtime/workspace_pool.h +++ b/mindspore/lite/src/runtime/workspace_pool.h @@ -42,4 +42,3 @@ class WorkspacePool { } // namespace predict } // namespace mindspore #endif // MINDSPORE_LITE_SRC_RUNTIME_WORKSPACE_POOL_H_ - diff --git a/mindspore/lite/src/scheduler.cc b/mindspore/lite/src/scheduler.cc index 93a797e1f3..e8ee1cec96 100644 --- a/mindspore/lite/src/scheduler.cc +++ b/mindspore/lite/src/scheduler.cc @@ -27,7 +27,7 @@ #endif namespace mindspore::lite { -int Scheduler::Schedule(const lite::Model *model, std::vector *tensors, +int Scheduler::Schedule(const lite::Model *model, std::vector *tensors, std::vector *kernels) { // 1. op ---> kernel // 2. sub graph @@ -62,8 +62,8 @@ int Scheduler::ReSizeKernels(const std::vector &kernels) { MS_LOG(ERROR) << "kernel(" << kernels[i]->name() << ")'s primitive is nullptr!"; return RET_ERROR; } - std::vector &inputs = kernels[i]->in_tensors(); - std::vector &outputs = kernels[i]->out_tensors(); + std::vector &inputs = kernels[i]->in_tensors(); + std::vector &outputs = kernels[i]->out_tensors(); auto ret = primitive->InferShape(inputs, outputs); if (ret != RET_OK) { MS_LOG(ERROR) << "InferShape failed, name: " << kernels[i]->name() << ", ret = " << ret; @@ -78,7 +78,7 @@ int Scheduler::ReSizeKernels(const std::vector &kernels) { return RET_OK; } -int Scheduler::InferShape(const lite::Model *model, std::vector *tensors) { +int Scheduler::InferShape(const lite::Model *model, std::vector *tensors) { MS_ASSERT(model != nullptr); MS_ASSERT(tensors != nullptr); bool infer_shape_interrupt = false; @@ -86,8 +86,8 @@ int Scheduler::InferShape(const lite::Model *model, std::vectornodes_[i]; MS_ASSERT(node != nullptr); - std::vector inputs; - std::vector outputs; + std::vector inputs; + std::vector outputs; auto in_size = node->input_indices_.size(); for (size_t j = 0; j < in_size; ++j) { inputs.emplace_back(tensors->at(node->input_indices_[j])); @@ -105,14 +105,12 @@ int Scheduler::InferShape(const lite::Model *model, std::vectorInferShape(inputs, outputs); if (ret == RET_INFER_INVALID) { MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << node->name_ - << ", type: " - << schema::EnumNamePrimitiveType(static_cast(primitive->Type())) + << ", type: " << schema::EnumNamePrimitiveType(static_cast(primitive->Type())) << "flag set to false."; primitive->SetInferFlag(false); infer_shape_interrupt = true; } else if (ret != RET_OK) { - MS_LOG(ERROR) << "InferShape failed, name: " << node->name_ - << ", type: " + MS_LOG(ERROR) << "InferShape failed, name: " << node->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(primitive->Type())); return RET_INFER_ERR; } @@ -121,7 +119,7 @@ int Scheduler::InferShape(const lite::Model *model, std::vector *tensors, +int Scheduler::InitOp2Kernel(const lite::Model *model, std::vector *tensors, std::vector *kernels) { MS_ASSERT(model != nullptr); MS_ASSERT(tensors != nullptr); @@ -130,8 +128,8 @@ int Scheduler::InitOp2Kernel(const lite::Model *model, std::vectornodes_[i]; MS_ASSERT(node != nullptr); - std::vector inputs; - std::vector outputs; + std::vector inputs; + std::vector outputs; auto in_size = node->input_indices_.size(); for (size_t j = 0; j < in_size; ++j) { inputs.emplace_back(tensors->at(node->input_indices_[j])); @@ -144,8 +142,7 @@ int Scheduler::InitOp2Kernel(const lite::Model *model, std::vectorScheduleNode(inputs, outputs, primitive, node); if (kernel == nullptr) { - MS_LOG(ERROR) << "ScheduleNode return nullptr, name: " << node->name_ - << ", type: " + MS_LOG(ERROR) << "ScheduleNode return nullptr, name: " << node->name_ << ", type: " << schema::EnumNamePrimitiveType(static_cast(primitive->Type())); return RET_ERROR; } @@ -188,7 +185,7 @@ void Scheduler::ConstructSubgraphs(std::vector *kernels) { tensor->set_allocator(context_->allocator.get()); } } - std::vector output_tensor = kernel::LiteKernelUtil::SubgraphOutputTensors(temp_kernels); + std::vector output_tensor = kernel::LiteKernelUtil::SubgraphOutputTensors(temp_kernels); for (auto tensor : output_tensor) { if (context_->float16_priority && tensor->data_type() == kNumberTypeFloat16) { tensor->set_data_type(kNumberTypeFloat32); @@ -212,8 +209,8 @@ kernel::LiteKernel *Scheduler::CreateSubKernel(const std::vector input_tensors = kernel::LiteKernelUtil::SubgraphInputTensors(kernels); - std::vector output_tensors = kernel::LiteKernelUtil::SubgraphOutputTensors(kernels); + std::vector input_tensors = kernel::LiteKernelUtil::SubgraphInputTensors(kernels); + std::vector output_tensors = kernel::LiteKernelUtil::SubgraphOutputTensors(kernels); std::vector input_kernels = kernel::LiteKernelUtil::SubgraphInputKernels(kernels); std::vector output_kernels = kernel::LiteKernelUtil::SubgraphOutputKernels(kernels); sub_kernel = @@ -228,13 +225,13 @@ kernel::LiteKernel *Scheduler::CreateSubKernel(const std::vector &in_tensors, - const std::vector &out_tensors, +kernel::LiteKernel *Scheduler::ScheduleNode(const std::vector &in_tensors, + const std::vector &out_tensors, const mindspore::lite::PrimitiveC *primitive, const Model::Node *node) { MS_ASSERT(primitive != nullptr); TypeId data_type = GetFirstFp32Fp16OrInt8Type(in_tensors); kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, static_cast(primitive->Type())}; - if (context_->device_ctx_.type == DT_GPU) { + if (context_->device_type_ == DT_GPU) { desc.arch = kernel::KERNEL_ARCH::kGPU; auto *kernel = KernelRegistry::GetInstance()->GetKernel(in_tensors, out_tensors, primitive, context_, desc); if (kernel != nullptr) { @@ -242,8 +239,8 @@ kernel::LiteKernel *Scheduler::ScheduleNode(const std::vector return kernel; } else { MS_LOG(ERROR) << "Not supported GPU Op " - << schema::EnumNamePrimitiveType(static_cast(primitive->Type())) << " " - << node->name_; + << schema::EnumNamePrimitiveType(static_cast(primitive->Type())) << " " + << node->name_; } } @@ -272,7 +269,7 @@ kernel::LiteKernel *Scheduler::ScheduleNode(const std::vector return nullptr; } -TypeId Scheduler::GetFirstFp32Fp16OrInt8Type(const std::vector &in_tensors) { +TypeId Scheduler::GetFirstFp32Fp16OrInt8Type(const std::vector &in_tensors) { for (const auto &tensor : in_tensors) { auto dtype = tensor->data_type(); if (dtype == kNumberTypeFloat32 || dtype == kNumberTypeFloat16 || dtype == kNumberTypeInt8) { @@ -294,7 +291,7 @@ void Scheduler::SetKernelTensorDataType(kernel::LiteKernel *kernel) { } } else if (kernel->desc().data_type == kNumberTypeFloat32) { for (auto tensor : kernel->in_tensors()) { - if (tensor->TensorType() != schema::NodeType_ValueNode && tensor->data_type() == kNumberTypeFloat16) { + if (tensor->category() != Tensor::Category::CONST && tensor->data_type() == kNumberTypeFloat16) { tensor->set_data_type(kNumberTypeFloat32); } } diff --git a/mindspore/lite/src/scheduler.h b/mindspore/lite/src/scheduler.h index 708273649e..20fe42cb6c 100644 --- a/mindspore/lite/src/scheduler.h +++ b/mindspore/lite/src/scheduler.h @@ -27,27 +27,24 @@ namespace mindspore::lite { class Scheduler { public: explicit Scheduler(const Context *ctx) { context_ = const_cast(ctx); } - int Schedule(const lite::Model *model, std::vector *tensors, - std::vector *kernels); + int Schedule(const lite::Model *model, std::vector *tensors, std::vector *kernels); int ReSizeKernels(const std::vector &kernels); protected: - kernel::LiteKernel *ScheduleNode(const std::vector &in_tensors, - const std::vector &out_tensors, - const mindspore::lite::PrimitiveC *primitive, - const Model::Node *cnode); + kernel::LiteKernel *ScheduleNode(const std::vector &in_tensors, const std::vector &out_tensors, + const mindspore::lite::PrimitiveC *primitive, const Model::Node *cnode); private: - int InitOp2Kernel(const lite::Model *model, std::vector *tensors, + int InitOp2Kernel(const lite::Model *model, std::vector *tensors, std::vector *kernels); - int InferShape(const lite::Model *model, std::vector *tensors); + int InferShape(const lite::Model *model, std::vector *tensors); // construct SubGraphKernel for each kernel-group in markedKernelGroup void ConstructSubgraphs(std::vector *kernels); kernel::LiteKernel *CreateSubKernel(const std::vector &kernels, kernel::KERNEL_ARCH arch); - TypeId GetFirstFp32Fp16OrInt8Type(const std::vector &in_tensors); + TypeId GetFirstFp32Fp16OrInt8Type(const std::vector &in_tensors); void SetKernelTensorDataType(kernel::LiteKernel *kernel); protected: diff --git a/mindspore/lite/src/ir/tensor.cc b/mindspore/lite/src/tensor.cc similarity index 54% rename from mindspore/lite/src/ir/tensor.cc rename to mindspore/lite/src/tensor.cc index 9698d9d632..48fad5d80c 100644 --- a/mindspore/lite/src/ir/tensor.cc +++ b/mindspore/lite/src/tensor.cc @@ -17,19 +17,18 @@ #include #include #include -#include "src/ir/tensor.h" +#include +#include "src/tensor.h" #include "securec/include/securec.h" #include "include/errorcode.h" namespace mindspore { namespace lite { -namespace tensor { #define kMaxMallocSize 1024 * 1024 * 100 -Tensor::Tensor(const TypeId data_type, const std::vector &shape, const schema::Format &format, - schema::NodeType tensorType) - : MetaTensor(data_type, shape), format_(format), tensorType(tensorType) {} +Tensor::Tensor(const TypeId data_type, const std::vector &shape, const schema::Format &format, Category category) + : data_type_(data_type), shape_(shape), format_(format), category_(category) {} -Tensor::Tensor(const Tensor &tensor) : MetaTensor(tensor) { +Tensor::Tensor(const Tensor &tensor) { auto ret = CopyTensor(tensor, true); if (0 != ret) { MS_LOG(EXCEPTION) << "CopyTensorData error"; @@ -57,7 +56,7 @@ int Tensor::CopyTensorData(const Tensor &srcTensor) { int Tensor::CopyTensor(const Tensor &srcTensor, bool copyData) { this->data_type_ = srcTensor.data_type_; this->shape_ = srcTensor.shape_; - this->tensorType = srcTensor.tensorType; + this->category_ = srcTensor.category_; if (copyData) { auto ret = CopyTensorData(srcTensor); if (0 != ret) { @@ -95,39 +94,30 @@ bool Tensor::operator==(const Tensor &tensor) { return data_ == tensor.data_ && shape_ == tensor.shape_ && data_type_ == tensor.data_type_; } -bool Tensor::operator==(const Value &other) const { - if (other.isa()) { - auto other_ = static_cast(other); - return *this == other_; - } else { - return false; - } -} - int32_t Tensor::Batch() const { if (this->shape_.size() != 4 && this->shape_.size() != 2) { MS_LOG(ERROR) << "Unsupported tensor shape: " << this->shape().size(); return -1; } switch (this->format_) { - case schema::Format_NHWC: - case schema::Format_NHWC4: - case schema::Format_NCHW: - case schema::Format_NC4HW4: - case schema::Format_KCHW: - case schema::Format_KHWC: - case schema::Format_NC: - case schema::Format_NC4: + case schema::Format::Format_NHWC: + case schema::Format::Format_NHWC4: + case schema::Format::Format_NCHW: + case schema::Format::Format_NC4HW4: + case schema::Format::Format_KCHW: + case schema::Format::Format_KHWC: + case schema::Format::Format_NC: + case schema::Format::Format_NC4: return this->shape_[0]; - case schema::Format_HWCK: - case schema::Format_CHWK: + case schema::Format::Format_HWCK: + case schema::Format::Format_CHWK: return this->shape_[3]; - case schema::Format_HWKC: + case schema::Format::Format_HWKC: return this->shape_[2]; - case schema::Format_CKHW: + case schema::Format::Format_CKHW: return this->shape_[1]; default: - MS_LOG(ERROR) << "Unsupported format: " << schema::EnumNameFormat(this->format_); + MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(this->format_); return -1; } } @@ -138,21 +128,21 @@ int32_t Tensor::Channel() const { return -1; } switch (this->format_) { - case schema::Format_NCHW: - case schema::Format_KCHW: - case schema::Format_NC: - case schema::Format_NC4: + case schema::Format::Format_NCHW: + case schema::Format::Format_KCHW: + case schema::Format::Format_NC: + case schema::Format::Format_NC4: return this->shape_[1]; - case schema::Format_HWCK: + case schema::Format::Format_HWCK: return this->shape_[2]; - case schema::Format_HWKC: - case schema::Format_NHWC: - case schema::Format_NHWC4: - case schema::Format_NC4HW4: - case schema::Format_KHWC: + case schema::Format::Format_HWKC: + case schema::Format::Format_NHWC: + case schema::Format::Format_NHWC4: + case schema::Format::Format_NC4HW4: + case schema::Format::Format_KHWC: return this->shape_[3]; - case schema::Format_CKHW: - case schema::Format_CHWK: + case schema::Format::Format_CKHW: + case schema::Format::Format_CHWK: return this->shape_[0]; default: return -1; @@ -165,23 +155,23 @@ int32_t Tensor::Height() const { return -1; } switch (this->format_) { - case schema::Format_NCHW: - case schema::Format_KCHW: - case schema::Format_CKHW: + case schema::Format::Format_NCHW: + case schema::Format::Format_KCHW: + case schema::Format::Format_CKHW: return this->shape_[2]; - case schema::Format_NHWC: - case schema::Format_NHWC4: - case schema::Format_NC4HW4: - case schema::Format_KHWC: - case schema::Format_CHWK: + case schema::Format::Format_NHWC: + case schema::Format::Format_NHWC4: + case schema::Format::Format_NC4HW4: + case schema::Format::Format_KHWC: + case schema::Format::Format_CHWK: return this->shape_[1]; - case schema::Format_HWCK: - case schema::Format_HWKC: - case schema::Format_HW: - case schema::Format_HW4: + case schema::Format::Format_HWCK: + case schema::Format::Format_HWKC: + case schema::Format::Format_HW: + case schema::Format::Format_HW4: return this->shape_[0]; default: - MS_LOG(ERROR) << "Unsupported format: " << schema::EnumNameFormat(this->format_); + MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(this->format_); return -1; } } @@ -192,20 +182,20 @@ int32_t Tensor::Width() const { return -1; } switch (this->format_) { - case schema::Format_NCHW: - case schema::Format_KCHW: - case schema::Format_CKHW: + case schema::Format::Format_NCHW: + case schema::Format::Format_KCHW: + case schema::Format::Format_CKHW: return this->shape_[3]; - case schema::Format_KHWC: - case schema::Format_NHWC: - case schema::Format_NHWC4: - case schema::Format_NC4HW4: - case schema::Format_CHWK: + case schema::Format::Format_KHWC: + case schema::Format::Format_NHWC: + case schema::Format::Format_NHWC4: + case schema::Format::Format_NC4HW4: + case schema::Format::Format_CHWK: return this->shape_[2]; - case schema::Format_HWCK: - case schema::Format_HWKC: - case schema::Format_HW: - case schema::Format_HW4: + case schema::Format::Format_HWCK: + case schema::Format::Format_HWKC: + case schema::Format::Format_HW: + case schema::Format::Format_HW4: return this->shape_[1]; default: return -1; @@ -224,9 +214,9 @@ int32_t Tensor::ElementsC4Num() const { std::string Tensor::ToString() const { std::ostringstream oss; - oss << "Format: " << schema::EnumNameFormat(this->format_); + oss << "schema::Format: " << EnumNameFormat(this->format_); oss << " DataType: " << this->data_type_; - oss << " NodeType: " << schema::EnumNameNodeType(this->tensorType); + oss << " Category: " << this->category_; oss << " Shape:"; for (auto &dim : this->shape()) { oss << " " << dim; @@ -259,7 +249,7 @@ std::string Tensor::ToString() const { return "Data of tensor is nullptr"; } else { for (int i = 0; i < 40 && i < this->ElementsNum(); i++) { - oss << " " << static_cast(data[i]); + oss << " " << static_cast(data[i]); } } } break; @@ -270,83 +260,14 @@ std::string Tensor::ToString() const { return oss.str(); } -void Tensor::AddQuantParam(const tensor::QuantArg &quant_arg) { this->quant_params_.push_back(quant_arg); } - -std::vector Tensor::GetQuantParams() const { return this->quant_params_; } - -LiteTensor::LiteTensor() { this->tensor_impl_ = new tensor::Tensor(); } - -LiteTensor::LiteTensor(TypeId data_type, const std::vector &shape) { - this->tensor_impl_ = new tensor::Tensor(data_type, shape); -} - -LiteTensor::LiteTensor(tensor::Tensor *tensor_ptr) { this->tensor_impl_ = tensor_ptr; } - -TypeId LiteTensor::data_type() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->data_type(); -} - -TypeId LiteTensor::set_data_type(TypeId data_type) { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->set_data_type(data_type); -} - -std::vector LiteTensor::shape() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->shape(); -} - -size_t LiteTensor::set_shape(const std::vector &shape) { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->set_shape(shape); -} - -int LiteTensor::DimensionSize(size_t index) const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->DimensionSize(index); -} - -int LiteTensor::ElementsNum() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->ElementsNum(); -} - -std::size_t LiteTensor::hash() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->hash(); -} +void Tensor::AddQuantParam(const QuantArg &quant_arg) { this->quant_params_.push_back(quant_arg); } -tensor::Tensor *LiteTensor::tensor() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_; -} +std::vector Tensor::GetQuantParams() const { return this->quant_params_; } -size_t LiteTensor::Size() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - return this->tensor_impl_->Size(); +std::vector TensorVectorCast(const std::vector &src) { + std::vector target(src.size()); + std::transform(src.begin(), src.end(), target.begin(), [](Tensor *t) { return dynamic_cast(t); }); + return target; } - -void *LiteTensor::MutableData() const { - MS_ASSERT(this->tensor_impl_ != nullptr); - this->tensor_impl_->Prepare(); - auto data = this->tensor_impl_->Data(); - if (nullptr == data) { - auto ret = tensor_impl_->MallocData(); - if (0 != ret) { - return nullptr; - } - } - return this->tensor_impl_->Data(); -} -LiteTensor::~LiteTensor() { delete this->tensor_impl_; } - -void LiteTensor::SetTensorImpl(tensor::Tensor *tensor) { this->tensor_impl_ = tensor; } -} // namespace tensor } // namespace lite -namespace tensor { -MSTensor *MSTensor::CreateTensor(TypeId data_type, const std::vector &shape) { - return new mindspore::lite::tensor::LiteTensor(data_type, shape); -} -} // namespace tensor } // namespace mindspore diff --git a/mindspore/lite/src/ir/tensor.h b/mindspore/lite/src/tensor.h similarity index 64% rename from mindspore/lite/src/ir/tensor.h rename to mindspore/lite/src/tensor.h index 31b1623f14..0bbcdb7fb1 100644 --- a/mindspore/lite/src/ir/tensor.h +++ b/mindspore/lite/src/tensor.h @@ -20,42 +20,66 @@ #include #include #include -#include "ir/meta_tensor.h" +#include +#include #include "include/ms_tensor.h" -#include "ir/dtype/type_id.h" #include "src/runtime/allocator.h" +#include "ir/dtype/type_id.h" +#include "utils/log_adapter.h" #include "schema/model_generated.h" namespace mindspore { namespace lite { -namespace tensor { + struct QuantArg { double scale; int32_t zeroPoint; }; -class Tensor : public mindspore::tensor::MetaTensor { +class Tensor : public mindspore::tensor::MSTensor { public: - Tensor() : MetaTensor() {} + enum Category { + CONST, // weight tensor + VAR // activation tensor + }; + Tensor() = default; - Tensor(const TypeId data_type, const std::vector &shape, const schema::Format &format = schema::Format_NHWC, - schema::NodeType tensorType = schema::NodeType_Parameter); + Tensor(const TypeId data_type, const std::vector &shape, + const schema::Format &format = schema::Format::Format_NHWC, Category category = VAR); Tensor(const Tensor &tensor); - ~Tensor() override; + virtual ~Tensor(); int CopyTensorData(const Tensor &srcTensor); int CopyTensor(const Tensor &srcTensor, bool copyData = false); - MS_DECLARE_PARENT(Tensor, MetaTensor) - virtual Tensor &operator=(const Tensor &tensor); virtual bool operator==(const Tensor &tensor); - bool operator==(const Value &other) const override; + TypeId data_type() const override { return data_type_; } + + void set_data_type(TypeId data_type) { data_type_ = data_type; } + + std::vector shape() const override { return shape_; } + + void set_shape(const std::vector &shape) { shape_ = shape; } + + int DimensionSize(size_t index) const override { + int dim_size = -1; + if (index < shape_.size()) { + dim_size = shape_[index]; + } else { + MS_LOG(ERROR) << "Dimension index is wrong: " << index; + } + return dim_size; + } + + int ElementsNum() const override { + return std::accumulate(shape_.begin(), shape_.end(), 1LL, std::multiplies()); + } int32_t Batch() const; @@ -67,9 +91,7 @@ class Tensor : public mindspore::tensor::MetaTensor { int32_t ElementsC4Num() const; - int DataSize() const { return this->ElementsNum(); } - - size_t Size() const { + size_t Size() const override { size_t size = 0; switch (this->data_type_) { case kNumberTypeFloat64: @@ -113,8 +135,8 @@ class Tensor : public mindspore::tensor::MetaTensor { MS_LOG(ERROR) << "Not support the type: " << this->data_type_; return 0; } - size *= (format_ == schema::Format_NC4HW4 || format_ == schema::Format_NHWC4) ? ElementsC4Num() - : MetaTensor::ElementsNum(); + size *= (format_ == schema::Format::Format_NC4HW4 || format_ == schema::Format::Format_NHWC4) ? ElementsC4Num() + : ElementsNum(); return size; } @@ -156,11 +178,21 @@ class Tensor : public mindspore::tensor::MetaTensor { return 0; } - void *Data() { return data_; } + void *MutableData() override { + if (this->data_ == nullptr) { + auto ret = this->MallocData(); + if (ret != 0) { + MS_LOG(WARNING) << "Malloc data failed"; + } + } + return this->data_; + } + + void *data_c() const { return data_; } void SetData(void *data) { this->data_ = data; } - schema::NodeType TensorType() { return this->tensorType; } + Category category() { return this->category_; } void SetFormat(schema::Format format) { this->format_ = format; } @@ -172,11 +204,11 @@ class Tensor : public mindspore::tensor::MetaTensor { void decRefCount() { this->refCount--; } - std::string ToString() const override; + std::string ToString() const; - void AddQuantParam(const tensor::QuantArg &quant_arg); + void AddQuantParam(const QuantArg &quant_arg); - std::vector GetQuantParams() const; + std::vector GetQuantParams() const; void Prepare() { if (allocator_ != nullptr) { @@ -187,52 +219,24 @@ class Tensor : public mindspore::tensor::MetaTensor { protected: void *data_ = nullptr; void *device_data_ = nullptr; + TypeId data_type_; + std::vector shape_; schema::Format format_; - schema::NodeType tensorType; + Category category_; size_t refCount = 0; - std::vector quant_params_; + std::vector quant_params_; mindspore::lite::Allocator *allocator_ = nullptr; }; -class LiteTensor : public mindspore::tensor::MSTensor { - public: - LiteTensor(); - - LiteTensor(TypeId data_type, const std::vector &shape); - - explicit LiteTensor(tensor::Tensor *tensor_ptr); - - ~LiteTensor() override; - - TypeId data_type() const override; - - TypeId set_data_type(TypeId data_type) override; - - std::vector shape() const override; - - size_t set_shape(const std::vector &shape) override; - - int DimensionSize(size_t index) const override; - - int ElementsNum() const override; - - std::size_t hash() const override; - - tensor::Tensor *tensor() const; - - size_t Size() const override; - - void *MutableData() const override; - - void SetTensorImpl(tensor::Tensor *tensor); - - protected: - tensor::Tensor *tensor_impl_; -}; - -using TensorPtr = std::shared_ptr; -} // namespace tensor +inline Tensor::Category TensorCategory(const schema::Tensor *tensor) { + return (tensor->nodeType() == schema::NodeType::NodeType_ValueNode) ? Tensor::Category::CONST : Tensor::Category::VAR; +} +inline Tensor::Category TensorCategory(const schema::NodeType type) { + return (type == schema::NodeType::NodeType_ValueNode) ? Tensor::Category::CONST : Tensor::Category::VAR; +} +std::vector TensorVectorCast(const std::vector &src); } // namespace lite } // namespace mindspore +using TensorPtr = std::shared_ptr; #endif // MINDSPORE_LITE_SRC_IR_TENSOR_H_ diff --git a/mindspore/lite/src/train/loss_kernel.h b/mindspore/lite/src/train/loss_kernel.h index 70ac705771..5205cb072c 100644 --- a/mindspore/lite/src/train/loss_kernel.h +++ b/mindspore/lite/src/train/loss_kernel.h @@ -21,10 +21,9 @@ namespace mindspore::kernel { class LossKernel : public LiteKernel { public: - LossKernel() = default; - explicit LossKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, - const lite::Context *ctx, + LossKernel() = default; + explicit LossKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::Context *ctx, const lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~LossKernel() = default; diff --git a/mindspore/lite/src/train/train_populate_parameter.cc b/mindspore/lite/src/train/train_populate_parameter.cc index b8725f4fe2..b2766fd991 100644 --- a/mindspore/lite/src/train/train_populate_parameter.cc +++ b/mindspore/lite/src/train/train_populate_parameter.cc @@ -146,10 +146,10 @@ OpParameter *PopulateConvolutionGradFilterParameter(const mindspore::lite::Primi auto convg_primitive = reinterpret_cast(const_cast(primitive)); - param->kernel_h_ = convg_primitive->GetKernelH(); - param->kernel_w_ = convg_primitive->GetKernelW(); - param->stride_h_ = convg_primitive->GetStrideH(); - param->stride_w_ = convg_primitive->GetStrideW(); + param->kernel_h_ = convg_primitive->GetKernelH(); + param->kernel_w_ = convg_primitive->GetKernelW(); + param->stride_h_ = convg_primitive->GetStrideH(); + param->stride_w_ = convg_primitive->GetStrideW(); param->dilation_h_ = convg_primitive->GetDilateH(); param->dilation_w_ = convg_primitive->GetDilateW(); param->pad_u_ = convg_primitive->GetPadUp(); @@ -187,10 +187,10 @@ OpParameter *PopulateConvolutionGradInputParameter(const mindspore::lite::Primit auto convg_primitive = reinterpret_cast(const_cast(primitive)); - param->kernel_h_ = convg_primitive->GetKernelH(); - param->kernel_w_ = convg_primitive->GetKernelW(); - param->stride_h_ = convg_primitive->GetStrideH(); - param->stride_w_ = convg_primitive->GetStrideW(); + param->kernel_h_ = convg_primitive->GetKernelH(); + param->kernel_w_ = convg_primitive->GetKernelW(); + param->stride_h_ = convg_primitive->GetStrideH(); + param->stride_w_ = convg_primitive->GetStrideW(); param->dilation_h_ = convg_primitive->GetDilateH(); param->dilation_w_ = convg_primitive->GetDilateW(); param->pad_u_ = convg_primitive->GetPadUp(); diff --git a/mindspore/lite/src/train/train_populate_parameter.h b/mindspore/lite/src/train/train_populate_parameter.h index 3c187850f0..0829efbe4f 100644 --- a/mindspore/lite/src/train/train_populate_parameter.h +++ b/mindspore/lite/src/train/train_populate_parameter.h @@ -21,8 +21,7 @@ namespace mindspore::kernel { - void PopulateTrainParameters(); - +void PopulateTrainParameters(); } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_TRAIN_TRAIN_POPULATE_PARAMETER_H_ diff --git a/mindspore/lite/src/train/train_session.cc b/mindspore/lite/src/train/train_session.cc index 408599209e..7d49c2518e 100644 --- a/mindspore/lite/src/train/train_session.cc +++ b/mindspore/lite/src/train/train_session.cc @@ -19,7 +19,7 @@ #include "utils/log_adapter.h" #include "include/context.h" #include "src/common/utils.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" #include "src/train/loss_kernel.h" #include "src/train/train_populate_parameter.h" #include "src/runtime/runtime_api.h" @@ -43,28 +43,24 @@ int TrainSession::CompileGraph(lite::Model *model) { return LiteSession::CompileGraph(model); } -void* TrainSession::ExportToBuf(void* buf, size_t *len) const { -// auto train_model_impl = (dynamic_cast(model_->model_impl())); -// return train_model_impl->ExportToBuf(buf, len); +void *TrainSession::ExportToBuf(void *buf, size_t *len) const { + // auto train_model_impl = (dynamic_cast(model_->model_impl())); + // return train_model_impl->ExportToBuf(buf, len); return nullptr; } - int TrainSession::RunGraph(const session::KernelCallBack &before, const session::KernelCallBack &after) { - auto ms_output_tensors = GetOutputs(); + auto ms_output_tensors = GetOutputMap(); this->outputs_.clear(); for (auto ms_tensors : ms_output_tensors) - for (auto ms_tensor : ms_tensors.second) - this->outputs_.push_back((dynamic_cast(ms_tensor))->tensor()); - if (train_mode_) - return LiteSession::RunGraph(before, after); + for (auto ms_tensor : ms_tensors.second) this->outputs_.push_back((dynamic_cast(ms_tensor))); + if (train_mode_) return LiteSession::RunGraph(before, after); // object is expected to run only inference part of graph // prepare a lit of kernels till the loss function -- temporary solution std::vector infference_kernels; for (auto kernel : this->kernels_) { - if (dynamic_cast(kernel) != nullptr) - break; + if (dynamic_cast(kernel) != nullptr) break; infference_kernels.push_back(kernel); } @@ -76,8 +72,8 @@ int TrainSession::RunGraph(const session::KernelCallBack &before, const session: if (before == nullptr && after == nullptr) { return executor.Run(this->inputs_, this->outputs_, infference_kernels, this->context_->allocator.get()); } else { - return executor.Run(this->inputs_, this->outputs_, infference_kernels, this->context_->allocator.get(), - before, after); + return executor.Run(this->inputs_, this->outputs_, infference_kernels, this->context_->allocator.get(), before, + after); } } @@ -89,8 +85,8 @@ void TrainSession::train() { train_mode_ = true; ext_output_map_.clear(); for (auto kernel : this->kernels_) { - if (dynamic_cast(kernel) != nullptr) { - auto *ms_tensor = new lite::tensor::LiteTensor(kernel->out_tensors().at(0)); + if (dynamic_cast(kernel) != nullptr) { + auto *ms_tensor = new lite::Tensor(*kernel->out_tensors().at(0)); ext_output_map_[kernel->name()].emplace_back(ms_tensor); } } @@ -102,26 +98,24 @@ void TrainSession::eval() { kernel->eval(); } train_mode_ = false; - kernel::LiteKernel* last_kernel = nullptr; + kernel::LiteKernel *last_kernel = nullptr; // We should get in_kernels and then get all last kernels ext_output_map_ = output_node_map_; for (auto kernel : this->kernels_) { - if ((dynamic_cast(kernel) != nullptr) && - (last_kernel != nullptr)) { - auto *ms_tensor = new lite::tensor::LiteTensor(last_kernel->out_tensors().at(0)); + if ((dynamic_cast(kernel) != nullptr) && (last_kernel != nullptr)) { + auto *ms_tensor = new lite::Tensor(*last_kernel->out_tensors().at(0)); ext_output_map_[last_kernel->name()].emplace_back(ms_tensor); } last_kernel = kernel; } } -std::unordered_map> TrainSession::GetOutputs() const { +std::unordered_map> TrainSession::GetOutputMap() const { return ext_output_map_; } std::vector TrainSession::GetOutputsByName(const std::string &name) const { auto ret_vect = LiteSession::GetOutputsByNodeName(name); // TODO(emir): GetOutputsByTensorName? - if (ret_vect.size() > 0) - return ret_vect; + if (ret_vect.size() > 0) return ret_vect; auto ret = ext_output_map_.find(name); if (ret == ext_output_map_.end()) { MS_LOG(WARNING) << "Node " << name << " is not an output node"; @@ -131,6 +125,4 @@ std::vector TrainSession::GetOutputsByName(const std::string return ret->second; } - - } // namespace mindspore::session diff --git a/mindspore/lite/test/CMakeLists.txt b/mindspore/lite/test/CMakeLists.txt index f0011c79b8..e33c31429e 100644 --- a/mindspore/lite/test/CMakeLists.txt +++ b/mindspore/lite/test/CMakeLists.txt @@ -10,79 +10,44 @@ string(REPLACE " -Werror " " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") string(REPLACE " -Werror " " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") STRING(REPLACE " -fvisibility=hidden " " -fvisibility=default " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") STRING(REPLACE " -fvisibility=hidden " " -fvisibility=default " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") -### anf src -set(ANF_SRC - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/meta_tensor.cc - ${CORE_DIR}/gvar/logging_level.cc - ${CORE_DIR}/gvar/typeid_manager.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/base/base.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/utils/log_adapter.cc - ) + if(BUILD_CONVERTER) - set(ANF_SRC - ${ANF_SRC} - # core/base - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/base/base_ref.cc - # core/ir - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/anf.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/anf_extends.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/meta_func_graph.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/func_graph.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/graph_utils.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/func_graph_cloner.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/func_graph_extends.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/manager.cc +file (GLOB_RECURSE CORE_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/*.cc + ) +set(CCSRC_SRC + ## ccsrc + ${CCSRC_DIR}/backend/optimizer/common/pattern_engine.cc + ${CCSRC_DIR}/backend/optimizer/common/visit.cc + ${CCSRC_DIR}/backend/optimizer/common/optimizer.cc + ) +else(BUILD_CONVERTER) + set(CORE_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/abstract/utils.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/abstract/dshape.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/abstract/abstract_value.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/abstract/abstract_function.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/primitive.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/tensor.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/visitor.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/meta_tensor_extends.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype_extends.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/named.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/scope.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/value.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/value_extends.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype/ref.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype/tensor_type.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype_extends.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype/container.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype/empty.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype/number.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype/ref.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype/type.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype/empty.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype/tensor_type.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype/type_extends.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/utils/any.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/utils/symbolic.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/utils/misc.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/utils/trace_base.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/utils/trace_info.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/utils/label.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/utils/info.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/utils/profile.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/utils/ms_context.cc - # core/abstract - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/abstract/abstract_function.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/abstract/analysis_context.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/abstract/param_validator.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/abstract/abstract_value.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/abstract/dshape.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../core/abstract/utils.cc - ## ccsrc - ${CCSRC_DIR}/debug/draw.cc - ${CCSRC_DIR}/pybind_api/export_flags.cc - ${CCSRC_DIR}/utils/context/context_extends.cc - ${CCSRC_DIR}/frontend/parallel/costmodel_context.cc - ${CCSRC_DIR}/backend/optimizer/common/pattern_engine.cc - ${CCSRC_DIR}/backend/optimizer/common/visit.cc - ${CCSRC_DIR}/backend/optimizer/common/optimizer.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../src/common/graph_utils_extends.cc - ) -else() - set(ANF_SRC - ${ANF_SRC} - ${CMAKE_CURRENT_SOURCE_DIR}/../src/ir/meta_tensor_extends.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/ir/dtype/number.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/gvar/logging_level.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/gvar/typeid_manager.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/base/base.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/base/base_ref.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../core/utils/log_adapter.cc ) endif() + ### cpu kernel file(GLOB KERNEL_OP_SRC ${LITE_DIR}/src/runtime/kernel/arm/base/*.cc @@ -176,7 +141,8 @@ endif() ### runtime framework file(GLOB_RECURSE OPS_SRC ${LITE_DIR}/src/ops/*.cc) set(TEST_LITE_SRC - ${ANF_SRC} + ${CORE_SRC} + ${CCSRC_SRC} ${OPS_SRC} ${KERNEL_OP_SRC} ${LITE_DIR}/src/runtime/allocator.cc @@ -184,9 +150,7 @@ set(TEST_LITE_SRC ${LITE_DIR}/src/runtime/thread_pool.c ${LITE_DIR}/src/runtime/workspace_pool.cc ${LITE_DIR}/src/runtime/parallel_executor.cc - ${LITE_DIR}/src/ir/tensor.cc -# ${LITE_DIR}/src/ir/primitive_t_value.cc - ${LITE_DIR}/src/context.cc + ${LITE_DIR}/src/tensor.cc ${LITE_DIR}/src/executor.cc ${LITE_DIR}/src/kernel_registry.cc ${LITE_DIR}/src/lite_kernel.cc @@ -198,7 +162,6 @@ set(TEST_LITE_SRC ${LITE_DIR}/src/common/file_utils.cc ${LITE_DIR}/src/common/file_utils_ext.cc ${LITE_DIR}/src/common/utils.cc - ${LITE_DIR}/src/common/ms_tensor_utils.cc ${LITE_DIR}/tools/common/graph_util.cc ${LITE_DIR}/tools/common/tensor_util.cc ${LITE_DIR}/tools/common/node_util.cc @@ -312,6 +275,7 @@ set(TEST_SRC ${TEST_DIR}/ut/src/runtime/kernel/arm/common/pack_tests.cc ${TEST_DIR}/ut/src/infer_test.cc ${TEST_DIR}/ut/src/utils_test.cc + #${TEST_DIR}/ut/internal/infer_test.cc ) if (SUPPORT_TRAIN) @@ -362,6 +326,10 @@ endif () add_executable(lite-test ${TEST_SRC}) target_link_libraries(lite-test dl ${GTEST_LIBRARY}) +if (PLATFORM_ARM64) + target_link_libraries(lite-test mslite_internal) +endif() + if (BUILD_MINDDATA STREQUAL "lite") target_link_libraries(lite-test minddata-lite diff --git a/mindspore/lite/test/main.cc b/mindspore/lite/test/main.cc index 40ede795c7..e936ceccaa 100644 --- a/mindspore/lite/test/main.cc +++ b/mindspore/lite/test/main.cc @@ -21,7 +21,7 @@ namespace mindspore { extern void InitSubModulesLogLevel(); } -GTEST_API_ int main(int argc, char** argv) { +GTEST_API_ int main(int argc, char **argv) { mindspore::InitSubModulesLogLevel(); testing::InitGoogleTest(&argc, argv); int ret = RUN_ALL_TESTS(); diff --git a/mindspore/lite/test/st/benchmark_test.cc b/mindspore/lite/test/st/benchmark_test.cc index 99c2e42666..93ea4bb6b3 100644 --- a/mindspore/lite/test/st/benchmark_test.cc +++ b/mindspore/lite/test/st/benchmark_test.cc @@ -27,48 +27,42 @@ class BenchmarkTest : public mindspore::CommonTest { TEST_F(BenchmarkTest, TestVideo) { const char *argv[] = {"./benchmark", "--modelPath=./hiai/hiai_label_and_video.ms", - "--inDataPath=./hiai/hiai_label_and_video.bin", - "--calibDataPath=./hiai/hiai_label_and_video.txt"}; + "--inDataPath=./hiai/hiai_label_and_video.bin", + "--calibDataPath=./hiai/hiai_label_and_video.txt"}; auto status = RunBenchmark(4, argv); ASSERT_EQ(status, RET_OK); } TEST_F(BenchmarkTest, TestOCR_02) { const char *argv[] = {"./benchmark", "--modelPath=./hiai/hiai_cv_focusShootOCRMOdel_02.ms", - "--inDataPath=./hiai/hiai_cv_focusShootOCRMOdel_02.bin", - "--calibDataPath=./hiai/hiai_cv_focusShootOCRMOdel_02.txt"}; + "--inDataPath=./hiai/hiai_cv_focusShootOCRMOdel_02.bin", + "--calibDataPath=./hiai/hiai_cv_focusShootOCRMOdel_02.txt"}; auto status = RunBenchmark(4, argv); ASSERT_EQ(status, RET_OK); } TEST_F(BenchmarkTest, TestOCR_02_GPU) { -const char *argv[] = {"./benchmark", "--modelPath=./hiai/model_02.ms", - "--inDataPath=./hiai/model_02_in.bin", - "--calibDataPath=./hiai/model_02_out.bin", - "--device=GPU"}; -auto status = RunBenchmark(5, argv); -ASSERT_EQ(status, RET_OK); + const char *argv[] = {"./benchmark", "--modelPath=./hiai/model_02.ms", "--inDataPath=./hiai/model_02_in.bin", + "--calibDataPath=./hiai/model_02_out.bin", "--device=GPU"}; + auto status = RunBenchmark(5, argv); + ASSERT_EQ(status, RET_OK); } TEST_F(BenchmarkTest, TestOCR_02_GPU_PERF) { -const char *argv[] = {"./benchmark", "--modelPath=./hiai/model_02.ms", - "--inDataPath=./hiai/model_02_in.bin", - "--device=GPU"}; -auto status = RunBenchmark(4, argv); -ASSERT_EQ(status, RET_OK); + const char *argv[] = {"./benchmark", "--modelPath=./hiai/model_02.ms", "--inDataPath=./hiai/model_02_in.bin", + "--device=GPU"}; + auto status = RunBenchmark(4, argv); + ASSERT_EQ(status, RET_OK); } TEST_F(BenchmarkTest, Test_MV2_GPU) { -const char *argv[] = {"./benchmark", "--modelPath=./hiai/mobilenet_v2.ms", - "--inDataPath=./hiai/mobilenet_v2_in.bin", - "--calibDataPath=./hiai/mobilenet_v2_out.bin", - "--device=GPU"}; -auto status = RunBenchmark(5, argv); -ASSERT_EQ(status, RET_OK); + const char *argv[] = {"./benchmark", "--modelPath=./hiai/mobilenet_v2.ms", "--inDataPath=./hiai/mobilenet_v2_in.bin", + "--calibDataPath=./hiai/mobilenet_v2_out.bin", "--device=GPU"}; + auto status = RunBenchmark(5, argv); + ASSERT_EQ(status, RET_OK); } TEST_F(BenchmarkTest, Test_MV2_GPU_PERF) { - const char *argv[] = {"./benchmark", "--modelPath=./hiai/mobilenet_v2.ms", - "--inDataPath=./hiai/mobilenet_v2_in.bin", + const char *argv[] = {"./benchmark", "--modelPath=./hiai/mobilenet_v2.ms", "--inDataPath=./hiai/mobilenet_v2_in.bin", "--device=GPU"}; auto status = RunBenchmark(4, argv); ASSERT_EQ(status, RET_OK); @@ -76,11 +70,10 @@ TEST_F(BenchmarkTest, Test_MV2_GPU_PERF) { TEST_F(BenchmarkTest, TestHebing) { const char *argv[] = {"./benchmark", "--modelPath=./hiai/model_hebing_3branch.ms", - "--inDataPath=./hiai/model_hebing_3branch.bin", - "--calibDataPath=./hiai/model_hebing_3branch.txt"}; + "--inDataPath=./hiai/model_hebing_3branch.bin", + "--calibDataPath=./hiai/model_hebing_3branch.txt"}; auto status = RunBenchmark(4, argv); ASSERT_EQ(status, RET_OK); } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/test/st/converter_test.cc b/mindspore/lite/test/st/converter_test.cc index 31a6669103..b02729f8cf 100644 --- a/mindspore/lite/test/st/converter_test.cc +++ b/mindspore/lite/test/st/converter_test.cc @@ -55,4 +55,3 @@ TEST_F(ConverterTest, TestHebing) { } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/test/ut/internal/infer_test.cc b/mindspore/lite/test/ut/internal/infer_test.cc new file mode 100644 index 0000000000..f7f7ae121b --- /dev/null +++ b/mindspore/lite/test/ut/internal/infer_test.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "common/common_test.h" +#include "internal/include/model.h" +#include "internal/include/lite_session.h" +#include "internal/include/context.h" +#include "internal/include/errorcode.h" +#include "internal/include/ms_tensor.h" +#include "nnacl/conv_parameter.h" + +namespace mindspore { +class InferTest : public mindspore::CommonTest { + public: + InferTest() {} +}; + +TEST_F(InferTest, TestSession) { +// Model model; +// Node *node = (Node *)malloc(sizeof(Node)); +// node->name_ = "conv2d"; +// uint32_t index = model.all_tensors_.size(); +// node->input_indices_ = {index}; +// MSTensor *in = CreateTensor(kNumberTypeFloat32, {3, 3, 24, 24}); +// model.all_tensors_.emplace_back(in); +// +// index = model.all_tensors_.size(); +// node->output_indices_ = {index}; +// MSTensor *out = CreateTensor(kNumberTypeFloat32, {3, 3, 24, 24}); +// model.all_tensors_.emplace_back(out); +// +// ConvParameter *param = (ConvParameter *)malloc(sizeof(ConvParameter)); +// param->kernel_w_ = 3; +// // todo: fill other param fields +// node->primitive_ = (PrimitiveC *)param; +// model.nodes_.push_back(node); +// +// LiteSession session; +// session.CompileGraph(&model); +// TensorPtrVector invec = session.GetInputs(); +// ASSERT_EQ(invec.size(), 1); +// // todo: fill inputs data +// session.RunGraph(); +// TensorPtrVector outvec = session.GetOutputs(); +// ASSERT_EQ(outvec.size(), 1); +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc index 3a0323d448..21248e6dd5 100644 --- a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc +++ b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc @@ -21,7 +21,7 @@ #include "dataset/core/tensor.h" #include "dataset/core/cv_tensor.h" #include "dataset/core/data_type.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" using MSTensor = mindspore::tensor::MSTensor; using DETensor = mindspore::tensor::DETensor; @@ -44,10 +44,10 @@ TEST_F(MindDataTestTensorDE, MSTensorBasic) { TEST_F(MindDataTestTensorDE, MSTensorConvertToLiteTensor) { std::shared_ptr t = std::make_shared(TensorShape({2, 3}), DataType(DataType::DE_FLOAT32)); auto ms_tensor = std::shared_ptr(new DETensor(t)); - std::shared_ptr lite_ms_tensor = std::shared_ptr( - std::dynamic_pointer_cast(ms_tensor)->ConvertToLiteTensor()); + std::shared_ptr lite_ms_tensor = + std::shared_ptr(std::dynamic_pointer_cast(ms_tensor)->ConvertToLiteTensor()); // check if the lite_ms_tensor is the derived LiteTensor - LiteTensor * lite_tensor = static_cast(lite_ms_tensor.get()); + LiteTensor *lite_tensor = static_cast(lite_ms_tensor.get()); ASSERT_EQ(lite_tensor != nullptr, true); } @@ -84,7 +84,7 @@ TEST_F(MindDataTestTensorDE, MSTensorMutableData) { std::shared_ptr t; Tensor::CreateFromVector(x, TensorShape({2, 2}), &t); auto ms_tensor = std::shared_ptr(new DETensor(t)); - float *data = static_cast(ms_tensor->MutableData()); + float *data = static_cast(ms_tensor->MutableData()); std::vector tensor_vec(data, data + ms_tensor->ElementsNum()); ASSERT_EQ(x == tensor_vec, true); } @@ -105,4 +105,3 @@ TEST_F(MindDataTestTensorDE, MSTensorCreateFromMemory) { auto ms_tensor = std::shared_ptr(new DETensor(t)); ASSERT_EQ(ms_tensor->hash() == mem_tensor->hash(), true); } - diff --git a/mindspore/lite/test/ut/src/dataset/eager_test.cc b/mindspore/lite/test/ut/src/dataset/eager_test.cc index a4c31ac8e2..cf3d02a040 100644 --- a/mindspore/lite/test/ut/src/dataset/eager_test.cc +++ b/mindspore/lite/test/ut/src/dataset/eager_test.cc @@ -44,7 +44,8 @@ TEST_F(MindDataTestEager, Test1) { Path base_dir = Path(in_dir); MS_LOG(WARNING) << base_dir.toString() << "."; if (!base_dir.IsDirectory() || !base_dir.Exists()) { - MS_LOG(INFO) << "Input dir is not a directory or doesn't exist" << "."; + MS_LOG(INFO) << "Input dir is not a directory or doesn't exist" + << "."; } auto t_start = std::chrono::high_resolution_clock::now(); // check if output_dir exists and create it if it does not exist @@ -66,6 +67,6 @@ TEST_F(MindDataTestEager, Test1) { EXPECT_EQ(image->DimensionSize(1), 224); } auto t_end = std::chrono::high_resolution_clock::now(); - double elapsed_time_ms = std::chrono::duration(t_end-t_start).count(); + double elapsed_time_ms = std::chrono::duration(t_end - t_start).count(); MS_LOG(INFO) << "duration: " << elapsed_time_ms << " ms\n"; } diff --git a/mindspore/lite/test/ut/src/infer_test.cc b/mindspore/lite/test/ut/src/infer_test.cc index 9de1a2dde1..1811233c54 100644 --- a/mindspore/lite/test/ut/src/infer_test.cc +++ b/mindspore/lite/test/ut/src/infer_test.cc @@ -107,7 +107,7 @@ TEST_F(InferTest, TestConvNode) { content = nullptr; auto context = new lite::Context; context->cpu_bind_mode_ = lite::NO_BIND; - context->device_ctx_.type = lite::DT_CPU; + context->device_type_ = lite::DT_CPU; context->thread_num_ = 4; auto session = session::LiteSession::CreateSession(context); ASSERT_NE(nullptr, session); @@ -130,10 +130,9 @@ TEST_F(InferTest, TestConvNode) { memcpy(data, input_data, input_size); ret = session->RunGraph(); ASSERT_EQ(lite::RET_OK, ret); - auto outputs = session->GetOutputMapByNode(); + auto outputs = session->GetOutputs(); ASSERT_EQ(outputs.size(), 1); - ASSERT_EQ(outputs.begin()->second.size(), 1); - auto outTensor = outputs.begin()->second.front(); + auto outTensor = outputs.begin()->second; ASSERT_NE(nullptr, outTensor); ASSERT_EQ(28 * 28 * 32, outTensor->ElementsNum()); ASSERT_EQ(TypeId::kNumberTypeFloat32, outTensor->data_type()); @@ -206,7 +205,7 @@ TEST_F(InferTest, TestAddNode) { content = nullptr; auto context = new lite::Context; context->cpu_bind_mode_ = lite::NO_BIND; - context->device_ctx_.type = lite::DT_CPU; + context->device_type_ = lite::DT_CPU; context->thread_num_ = 4; auto session = session::LiteSession::CreateSession(context); ASSERT_NE(nullptr, session); @@ -222,10 +221,9 @@ TEST_F(InferTest, TestAddNode) { (void)inTensor1->MutableData(); ret = session->RunGraph(); ASSERT_EQ(lite::RET_OK, ret); - auto outputs = session->GetOutputMapByNode(); + auto outputs = session->GetOutputs(); ASSERT_EQ(outputs.size(), 1); - ASSERT_EQ(outputs.begin()->second.size(), 1); - auto outTensor = outputs.begin()->second.front(); + auto outTensor = outputs.begin()->second; ASSERT_NE(nullptr, outTensor); ASSERT_EQ(28 * 28 * 3, outTensor->ElementsNum()); ASSERT_EQ(TypeId::kNumberTypeFloat32, outTensor->data_type()); @@ -308,7 +306,7 @@ TEST_F(InferTest, TestParallelExecutor) { content = nullptr; auto context = new lite::Context; context->cpu_bind_mode_ = lite::NO_BIND; - context->device_ctx_.type = lite::DT_CPU; + context->device_type_ = lite::DT_CPU; context->thread_num_ = 4; auto session = new SessionWithParallelExecutor(); session->Init(context); @@ -325,10 +323,9 @@ TEST_F(InferTest, TestParallelExecutor) { (void)inTensor1->MutableData(); ret = session->RunGraph(); ASSERT_EQ(lite::RET_OK, ret); - auto outputs = session->GetOutputMapByNode(); + auto outputs = session->GetOutputs(); ASSERT_EQ(outputs.size(), 1); - ASSERT_EQ(outputs.begin()->second.size(), 1); - auto outTensor = outputs.begin()->second.front(); + auto outTensor = outputs.begin()->second; ASSERT_NE(nullptr, outTensor); ASSERT_EQ(28 * 28 * 3, outTensor->ElementsNum()); ASSERT_EQ(TypeId::kNumberTypeFloat32, outTensor->data_type()); @@ -349,7 +346,7 @@ TEST_F(InferTest, TestModel) { delete[] buf[0]; auto context = new lite::Context; context->cpu_bind_mode_ = lite::NO_BIND; - context->device_ctx_.type = lite::DT_CPU; + context->device_type_ = lite::DT_CPU; context->thread_num_ = 4; auto session = session::LiteSession::CreateSession(context); ASSERT_NE(nullptr, session); @@ -362,7 +359,7 @@ TEST_F(InferTest, TestModel) { (void)inTensor->MutableData(); ret = session->RunGraph(); ASSERT_EQ(lite::RET_OK, ret); - auto outputs = session->GetOutputMapByNode(); + auto outputs = session->GetOutputs(); MS_LOG(INFO) << "Passed"; } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc index 4ef77d515c..02b08216d4 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc @@ -45,14 +45,14 @@ void InitStridedSliceParam(StridedSliceParameter *strided_slice_param) { } TEST_F(TestStridedSlice, StridedSlice) { - lite::tensor::Tensor in_tensor(kNumberTypeFloat32, {1, 2, 4}); - lite::tensor::Tensor out_tensor(kNumberTypeFloat32, {1, 1, 2}); + lite::Tensor in_tensor(kNumberTypeFloat32, {1, 2, 4}); + lite::Tensor out_tensor(kNumberTypeFloat32, {1, 1, 2}); float input_data[] = {0.2390374, 0.92039955, 0.05051243, 0.49574447, 0.8355223, 0.02647042, 0.08811307, 0.4566604}; float output_data[2] = {0}; in_tensor.SetData(input_data); out_tensor.SetData(output_data); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor}; StridedSliceParameter parameter = {0}; InitStridedSliceParam(¶meter); @@ -77,15 +77,14 @@ TEST_F(TestStridedSlice, StridedSlice) { } TEST_F(TestStridedSlice, StridedSliceInt8) { - lite::tensor::Tensor in_tensor(kNumberTypeInt8, {2, 3, 4}); - lite::tensor::Tensor out_tensor(kNumberTypeInt8, {2, 3, 4}); - int8_t input_data[] = {-12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + lite::Tensor in_tensor(kNumberTypeInt8, {2, 3, 4}); + lite::Tensor out_tensor(kNumberTypeInt8, {2, 3, 4}); + int8_t input_data[] = {-12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; int8_t output_data[4] = {0}; in_tensor.SetData(input_data); out_tensor.SetData(output_data); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor}; StridedSliceParameter parameter = {0}; parameter.begins_[0] = 0; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/convolution_fp16_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/convolution_fp16_tests.cc index bad7728b9e..95cdaf1628 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/convolution_fp16_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/convolution_fp16_tests.cc @@ -144,7 +144,7 @@ TEST_F(TestConvolutionFp16, ConvTest1) { fp16_input_data[i] = static_cast(input_data[i]); } - auto nhwc4_input_data = reinterpret_cast(malloc(i_h * i_w * ic4 * C4NUM* sizeof(float16_t))); + auto nhwc4_input_data = reinterpret_cast(malloc(i_h * i_w * ic4 * C4NUM * sizeof(float16_t))); PackNHWCToNHWC4Fp32(fp16_input_data, nhwc4_input_data, 1, i_h * i_w, in_channel); std::cout << "==============fp16 input data===========" << std::endl; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/reduce_fp16_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/reduce_fp16_tests.cc index afba6425a4..4cb045ad54 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/reduce_fp16_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/reduce_fp16_tests.cc @@ -31,10 +31,10 @@ class TestReduceFp16 : public mindspore::CommonTest { public: float err_tol = 1e-5; - lite::tensor::Tensor in_tensor_; - lite::tensor::Tensor out_tensor_; - std::vector inputs_{&in_tensor_}; - std::vector outputs_{&out_tensor_}; + lite::Tensor in_tensor_; + lite::Tensor out_tensor_; + std::vector inputs_{&in_tensor_}; + std::vector outputs_{&out_tensor_}; ReduceParameter param_ = {{}}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat16, schema::PrimitiveType_Reduce}; lite::Context ctx_ = lite::Context(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc index 9b1c8fcd99..024c286dea 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc @@ -85,8 +85,8 @@ TEST_F(TestActivationFp32, TanhFp32) { } TEST_F(TestActivationFp32, HSwishFp32) { - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; ActivationParameter op_param; op_param.op_parameter_.type_ = schema::PrimitiveType_Activation; @@ -96,7 +96,7 @@ TEST_F(TestActivationFp32, HSwishFp32) { std::vector input = {-3.0, -2.0, -1.0, 0.0, 1.0, 5.0, 6.0, 7.0}; std::vector in_shape = {8}; - lite::tensor::Tensor input0_tensor; + lite::Tensor input0_tensor; inputs_tensor.push_back(&input0_tensor); input0_tensor.SetData(input.data()); input0_tensor.set_shape(in_shape); @@ -104,7 +104,7 @@ TEST_F(TestActivationFp32, HSwishFp32) { std::vector output(8); std::vector output_shape = {8}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/argminmax_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/argminmax_fp32_test.cc index d27402733b..03b4fb0af6 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/argminmax_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/argminmax_fp32_test.cc @@ -27,9 +27,7 @@ class TestArgMinMaxTestFp32 : public mindspore::CommonTest { }; TEST_F(TestArgMinMaxTestFp32, ArgMaxTest1) { - std::vector in = {10, 20, 30, 40, 90, - 20, 11, 15, 1, 50, - 30, 45, 25, 50, 30}; + std::vector in = {10, 20, 30, 40, 90, 20, 11, 15, 1, 50, 30, 45, 25, 50, 30}; std::vector except_out = {2, 2, 0, 2, 0}; std::vector shape = {3, 5}; float out[5]; @@ -50,9 +48,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest1) { } TEST_F(TestArgMinMaxTestFp32, ArgMaxTest1_keep_dim) { - std::vector in = {10, 20, 30, 40, 90, - 20, 11, 15, 1, 50, - 30, 45, 25, 50, 30}; + std::vector in = {10, 20, 30, 40, 90, 20, 11, 15, 1, 50, 30, 45, 25, 50, 30}; std::vector except_out = {2, 2, 0, 2, 0}; std::vector shape = {3, 5}; float out[5]; @@ -77,16 +73,8 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest1_keep_dim) { } TEST_F(TestArgMinMaxTestFp32, ArgMaxTest_axis2_keep_dim) { - std::vector in = {10, 20, 30, - 11, 15, 10, - 5, 10, 12, - 10, 20, 30, - 11, 15, 10, - 5, 10, 12, - 10, 20, 30, - 11, 15, 10, - 5, 10, 12 - }; + std::vector in = {10, 20, 30, 11, 15, 10, 5, 10, 12, 10, 20, 30, 11, 15, + 10, 5, 10, 12, 10, 20, 30, 11, 15, 10, 5, 10, 12}; std::vector except_out = {1, 0, 0, 1, 0, 0, 1, 0, 0}; std::vector shape = {1, 3, 3, 3}; float out[9]; @@ -111,9 +99,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest_axis2_keep_dim) { } TEST_F(TestArgMinMaxTestFp32, ArgMaxTest2) { - std::vector in = {10, 20, 30, 40, 90, - 20, 11, 15, 1, 50, - 30, 45, 25, 50, 30}; + std::vector in = {10, 20, 30, 40, 90, 20, 11, 15, 1, 50, 30, 45, 25, 50, 30}; std::vector except_out = {30, 45, 30, 50, 90}; std::vector shape = {3, 5}; float out[5]; @@ -130,9 +116,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest2) { } TEST_F(TestArgMinMaxTestFp32, ArgMinTest2) { - std::vector in = {10, 20, 30, 40, 90, - 20, 11, 15, 1, 50, - 30, 45, 25, 50, 30}; + std::vector in = {10, 20, 30, 40, 90, 20, 11, 15, 1, 50, 30, 45, 25, 50, 30}; std::vector except_out = {10, 11, 15, 1, 30}; std::vector shape = {3, 5}; float out[5]; @@ -149,9 +133,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMinTest2) { } TEST_F(TestArgMinMaxTestFp32, ArgMaxTest3_axis2_out_data) { - std::vector in = {10, 20, 30, 40, 90, - 20, 11, 15, 1, 50, - 30, 45, 25, 50, 30}; + std::vector in = {10, 20, 30, 40, 90, 20, 11, 15, 1, 50, 30, 45, 25, 50, 30}; std::vector except_out = {30, 45, 30, 50, 90, 20, 20, 25, 40, 50}; ArgMinMaxParameter param; param.axis_ = 2; @@ -168,9 +150,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest3_axis2_out_data) { } TEST_F(TestArgMinMaxTestFp32, ArgMaxTest3_axis2_out_index) { - std::vector in = {10, 20, 30, 40, 90, - 20, 11, 15, 1, 50, - 30, 45, 25, 50, 30}; + std::vector in = {10, 20, 30, 40, 90, 20, 11, 15, 1, 50, 30, 45, 25, 50, 30}; std::vector except_out = {2, 2, 0, 2, 0, 1, 0, 2, 0, 1}; ArgMinMaxParameter param; param.axis_ = 2; @@ -187,12 +167,8 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest3_axis2_out_index) { } TEST_F(TestArgMinMaxTestFp32, ArgMaxTest4_axis3_out_data) { - std::vector in = {10, 20, 30, 40, 90, - 20, 11, 15, 1, 50, - 30, 45, 25, 50, 30}; - std::vector except_out = {90, 40, - 50, 20, - 50, 45}; + std::vector in = {10, 20, 30, 40, 90, 20, 11, 15, 1, 50, 30, 45, 25, 50, 30}; + std::vector except_out = {90, 40, 50, 20, 50, 45}; ArgMinMaxParameter param; param.axis_ = 3; std::vector in_shape = {1, 1, 3, 5}; @@ -208,12 +184,8 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest4_axis3_out_data) { } TEST_F(TestArgMinMaxTestFp32, ArgMaxTest4_axis3_out_index) { - std::vector in = {10, 20, 30, 40, 90, - 20, 11, 15, 1, 50, - 30, 45, 25, 50, 30}; - std::vector except_out = {4, 3, - 4, 0, - 3, 1}; + std::vector in = {10, 20, 30, 40, 90, 20, 11, 15, 1, 50, 30, 45, 25, 50, 30}; + std::vector except_out = {4, 3, 4, 0, 3, 1}; ArgMinMaxParameter param; param.axis_ = 3; std::vector in_shape = {1, 1, 3, 5}; @@ -229,26 +201,9 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest4_axis3_out_index) { } TEST_F(TestArgMinMaxTestFp32, ArgMaxTest5_axis1_out_index) { - std::vector in = {100, 2, 300, - 4, 50, 6, - 11, 12, 13, - 34, 35, 36, - 9, 6, 17, - 10, 20, 30, - 10, 20, 30, - 40, 5, 60, - 7, 80, 90, - 10, 11, 120, - 18, 5, 16, - 9, 22, 23}; - std::vector except_out = {0, 1, 0, - 1, 0, 1, - 1, 2, 2, - 2, 1, 2, - 2, 1, 1, - 0, 2, 1, - 0, 0, 0, - 1, 1, 0}; + std::vector in = {100, 2, 300, 4, 50, 6, 11, 12, 13, 34, 35, 36, 9, 6, 17, 10, 20, 30, + 10, 20, 30, 40, 5, 60, 7, 80, 90, 10, 11, 120, 18, 5, 16, 9, 22, 23}; + std::vector except_out = {0, 1, 0, 1, 0, 1, 1, 2, 2, 2, 1, 2, 2, 1, 1, 0, 2, 1, 0, 0, 0, 1, 1, 0}; ArgMinMaxParameter param; param.axis_ = 1; std::vector in_shape = {2, 3, 2, 3}; @@ -264,26 +219,10 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest5_axis1_out_index) { } TEST_F(TestArgMinMaxTestFp32, ArgMaxTest5_axis1_out_data) { - std::vector in = {100, 2, 300, - 4, 50, 6, - 11, 12, 13, - 34, 35, 36, - 9, 6, 17, - 10, 20, 30, - 10, 20, 30, - 40, 5, 60, - 7, 80, 90, - 10, 11, 120, - 18, 5, 16, - 9, 22, 23}; - std::vector except_out = {100, 12, 300, - 34, 50, 36, - 11, 6, 17, - 10, 35, 30, - 18, 80, 90, - 40, 22, 120, - 10, 20, 30, - 10, 11, 60}; + std::vector in = {100, 2, 300, 4, 50, 6, 11, 12, 13, 34, 35, 36, 9, 6, 17, 10, 20, 30, + 10, 20, 30, 40, 5, 60, 7, 80, 90, 10, 11, 120, 18, 5, 16, 9, 22, 23}; + std::vector except_out = {100, 12, 300, 34, 50, 36, 11, 6, 17, 10, 35, 30, + 18, 80, 90, 40, 22, 120, 10, 20, 30, 10, 11, 60}; ArgMinMaxParameter param; param.axis_ = 1; std::vector in_shape = {2, 3, 2, 3}; @@ -299,26 +238,8 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest5_axis1_out_data) { } TEST_F(TestArgMinMaxTestFp32, ArgMaxTest6_axis0_out_index) { - std::vector in = {100, 2, - 4, 50, - 11, 12, - 34, 35, - 10, 20, - 40, 5, - 7, 80, - 10, 11, - 55, 25, - 5, 15, - 18, 8, - 15, 16}; - std::vector except_out = {0, 2, - 1, 0, - 2, 1, - 0, 0, - 2, 1, - 2, 2, - 0, 0, - 2, 2}; + std::vector in = {100, 2, 4, 50, 11, 12, 34, 35, 10, 20, 40, 5, 7, 80, 10, 11, 55, 25, 5, 15, 18, 8, 15, 16}; + std::vector except_out = {0, 2, 1, 0, 2, 1, 0, 0, 2, 1, 2, 2, 0, 0, 2, 2}; ArgMinMaxParameter param; param.axis_ = 1; std::vector in_shape = {3, 2, 2, 2}; @@ -334,26 +255,8 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest6_axis0_out_index) { } TEST_F(TestArgMinMaxTestFp32, ArgMaxTest6_axis0_out_data) { - std::vector in = {100, 2, - 4, 50, - 11, 12, - 34, 35, - 10, 20, - 40, 5, - 7, 80, - 10, 11, - 55, 25, - 5, 15, - 18, 8, - 15, 16}; - std::vector except_out = {100, 25, - 40, 50, - 18, 80, - 34, 35, - 55, 20, - 5, 15, - 11, 12, - 15, 16}; + std::vector in = {100, 2, 4, 50, 11, 12, 34, 35, 10, 20, 40, 5, 7, 80, 10, 11, 55, 25, 5, 15, 18, 8, 15, 16}; + std::vector except_out = {100, 25, 40, 50, 18, 80, 34, 35, 55, 20, 5, 15, 11, 12, 15, 16}; ArgMinMaxParameter param; param.axis_ = 1; std::vector in_shape = {3, 2, 2, 2}; @@ -369,12 +272,8 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest6_axis0_out_data) { } TEST_F(TestArgMinMaxTestFp32, ArgMinTest1_axis3_out_data) { - std::vector in = {10, 20, 30, 40, 90, - 20, 11, 15, 1, 50, - 30, 45, 25, 50, 30}; - std::vector except_out = {10, 20, - 1, 11, - 25, 30}; + std::vector in = {10, 20, 30, 40, 90, 20, 11, 15, 1, 50, 30, 45, 25, 50, 30}; + std::vector except_out = {10, 20, 1, 11, 25, 30}; ArgMinMaxParameter param; param.axis_ = 3; std::vector in_shape = {1, 1, 3, 5}; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/arithmetic_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/arithmetic_fp32_tests.cc index b66894ac88..249c639264 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/arithmetic_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/arithmetic_fp32_tests.cc @@ -160,7 +160,7 @@ TEST_F(TestArithmeticTestFp32, DivTest) { TEST_F(TestArithmeticTestFp32, DivTest2) { std::vector in0 = {10, 20, 30, 40, 50, 60, 70, 80, 90, 100}; - std::vector in1 = {5, 10, 2, 8, 2, 3, 7, 80, 45, 20}; + std::vector in1 = {5, 10, 2, 8, 2, 3, 7, 80, 45, 20}; std::vector correct_out = {2, 2, 15, 5, 25, 20, 10, 1, 2, 5}; constexpr int kOutSize = 10; float out[kOutSize]; @@ -457,8 +457,8 @@ TEST_F(TestArithmeticTestFp32, SquaredDifferenceTest) { } TEST_F(TestArithmeticTestFp32, MulFp32) { - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; ArithmeticParameter mul_param; mul_param.broadcasting_ = true; @@ -486,8 +486,8 @@ TEST_F(TestArithmeticTestFp32, MulFp32) { std::vector input1 = {0.16771512, 0.7336843, 0.6768286, 0.4453379}; std::vector input1_shape = {1, 1, 1, 4}; - lite::tensor::Tensor input0_tensor; - lite::tensor::Tensor input1_tensor; + lite::Tensor input0_tensor; + lite::Tensor input1_tensor; input0_tensor.set_data_type(kNumberTypeFloat32); input0_tensor.SetData(input0.data()); input1_tensor.SetData(input1.data()); @@ -499,7 +499,7 @@ TEST_F(TestArithmeticTestFp32, MulFp32) { std::vector output(24); std::vector output_shape = {1, 2, 3, 4}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.set_shape(output_shape); @@ -529,8 +529,8 @@ TEST_F(TestArithmeticTestFp32, MulFp32) { } TEST_F(TestArithmeticTestFp32, MulReluFp32) { - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; ArithmeticParameter mul_param; mul_param.broadcasting_ = true; @@ -559,8 +559,8 @@ TEST_F(TestArithmeticTestFp32, MulReluFp32) { std::vector input1 = {0.16771512, 0.7336843, 0.6768286, 0.4453379}; std::vector input1_shape = {1, 1, 1, 4}; - lite::tensor::Tensor input0_tensor; - lite::tensor::Tensor input1_tensor; + lite::Tensor input0_tensor; + lite::Tensor input1_tensor; input0_tensor.set_data_type(kNumberTypeFloat32); input0_tensor.SetData(input0.data()); input1_tensor.SetData(input1.data()); @@ -572,7 +572,7 @@ TEST_F(TestArithmeticTestFp32, MulReluFp32) { std::vector output(24); std::vector output_shape = {1, 2, 3, 4}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.set_shape(output_shape); @@ -602,8 +602,8 @@ TEST_F(TestArithmeticTestFp32, MulReluFp32) { } TEST_F(TestArithmeticTestFp32, MulRelu6Fp32) { - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; ArithmeticParameter mul_param; mul_param.broadcasting_ = true; @@ -632,8 +632,8 @@ TEST_F(TestArithmeticTestFp32, MulRelu6Fp32) { std::vector input1 = {0.16771512, 0.7336843, 0.6768286, 0.4453379}; std::vector input1_shape = {1, 1, 1, 4}; - lite::tensor::Tensor input0_tensor; - lite::tensor::Tensor input1_tensor; + lite::Tensor input0_tensor; + lite::Tensor input1_tensor; input0_tensor.set_data_type(kNumberTypeFloat32); input0_tensor.SetData(input0.data()); input1_tensor.SetData(input1.data()); @@ -645,7 +645,7 @@ TEST_F(TestArithmeticTestFp32, MulRelu6Fp32) { std::vector output(24); std::vector output_shape = {1, 2, 3, 4}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.set_shape(output_shape); @@ -674,8 +674,8 @@ TEST_F(TestArithmeticTestFp32, MulRelu6Fp32) { } TEST_F(TestArithmeticTestFp32, AddReluFp32) { - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; ArithmeticParameter add_param; add_param.broadcasting_ = true; @@ -704,8 +704,8 @@ TEST_F(TestArithmeticTestFp32, AddReluFp32) { std::vector input1 = {0.9035316, 0.022212252, 0.3038014, 0.3478275}; std::vector input1_shape = {1, 1, 1, 4}; - lite::tensor::Tensor input0_tensor; - lite::tensor::Tensor input1_tensor; + lite::Tensor input0_tensor; + lite::Tensor input1_tensor; input0_tensor.set_data_type(kNumberTypeFloat32); input0_tensor.SetData(input0.data()); input1_tensor.SetData(input1.data()); @@ -717,7 +717,7 @@ TEST_F(TestArithmeticTestFp32, AddReluFp32) { std::vector output(24); std::vector output_shape = {1, 2, 3, 4}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.set_shape(output_shape); @@ -746,8 +746,8 @@ TEST_F(TestArithmeticTestFp32, AddReluFp32) { } TEST_F(TestArithmeticTestFp32, AddRelu6Fp32) { - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; ArithmeticParameter add_param; add_param.broadcasting_ = true; @@ -776,8 +776,8 @@ TEST_F(TestArithmeticTestFp32, AddRelu6Fp32) { std::vector input1 = {0.9035316, 0.022212252, 0.3038014, 0.3478275}; std::vector input1_shape = {1, 1, 1, 4}; - lite::tensor::Tensor input0_tensor; - lite::tensor::Tensor input1_tensor; + lite::Tensor input0_tensor; + lite::Tensor input1_tensor; input0_tensor.set_data_type(kNumberTypeFloat32); input0_tensor.SetData(input0.data()); input1_tensor.SetData(input1.data()); @@ -789,7 +789,7 @@ TEST_F(TestArithmeticTestFp32, AddRelu6Fp32) { std::vector output(24); std::vector output_shape = {1, 2, 3, 4}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.set_shape(output_shape); @@ -817,8 +817,8 @@ TEST_F(TestArithmeticTestFp32, AddRelu6Fp32) { } TEST_F(TestArithmeticTestFp32, DivReluFp32) { - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; ArithmeticParameter div_param; div_param.broadcasting_ = true; @@ -847,8 +847,8 @@ TEST_F(TestArithmeticTestFp32, DivReluFp32) { std::vector input1 = {1.6771512, -7.336843, 0.6768286, 4.453379}; std::vector input1_shape = {1, 1, 1, 4}; - lite::tensor::Tensor input0_tensor; - lite::tensor::Tensor input1_tensor; + lite::Tensor input0_tensor; + lite::Tensor input1_tensor; input0_tensor.set_data_type(kNumberTypeFloat32); input0_tensor.SetData(input0.data()); input1_tensor.SetData(input1.data()); @@ -860,7 +860,7 @@ TEST_F(TestArithmeticTestFp32, DivReluFp32) { std::vector output(24); std::vector output_shape = {1, 2, 3, 4}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.set_shape(output_shape); @@ -890,8 +890,8 @@ TEST_F(TestArithmeticTestFp32, DivReluFp32) { } TEST_F(TestArithmeticTestFp32, DivRelu6Fp32) { - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; ArithmeticParameter div_param; div_param.broadcasting_ = true; @@ -920,8 +920,8 @@ TEST_F(TestArithmeticTestFp32, DivRelu6Fp32) { std::vector input1 = {1.6771512, -7.336843, 0.6768286, 4.453379}; std::vector input1_shape = {1, 1, 1, 4}; - lite::tensor::Tensor input0_tensor; - lite::tensor::Tensor input1_tensor; + lite::Tensor input0_tensor; + lite::Tensor input1_tensor; input0_tensor.set_data_type(kNumberTypeFloat32); input0_tensor.SetData(input0.data()); input1_tensor.SetData(input1.data()); @@ -933,7 +933,7 @@ TEST_F(TestArithmeticTestFp32, DivRelu6Fp32) { std::vector output(24); std::vector output_shape = {1, 2, 3, 4}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.set_shape(output_shape); @@ -961,8 +961,8 @@ TEST_F(TestArithmeticTestFp32, DivRelu6Fp32) { } TEST_F(TestArithmeticTestFp32, EqualFp32) { - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; ArithmeticParameter equal_param; equal_param.broadcasting_ = true; @@ -990,8 +990,8 @@ TEST_F(TestArithmeticTestFp32, EqualFp32) { std::vector input1 = {0.16771512, 3.3466918, 0.6768286, 3.2008505}; std::vector input1_shape = {1, 1, 1, 4}; - lite::tensor::Tensor input0_tensor; - lite::tensor::Tensor input1_tensor; + lite::Tensor input0_tensor; + lite::Tensor input1_tensor; input0_tensor.set_data_type(kNumberTypeFloat32); input0_tensor.SetData(input0.data()); input1_tensor.SetData(input1.data()); @@ -1003,7 +1003,7 @@ TEST_F(TestArithmeticTestFp32, EqualFp32) { std::vector output(24); std::vector output_shape = {1, 2, 3, 4}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.set_shape(output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc index 70875856fe..51e15c65e4 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc @@ -25,7 +25,6 @@ class BatchToSpaceTestFp32 : public mindspore::CommonTest { BatchToSpaceTestFp32() = default; }; - TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest1) { float input[12] = {10, 30, 90, 2, 20, 120, 5, 50, 150, 6, 16, 160}; constexpr int kOutSize = 12; @@ -84,8 +83,7 @@ TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest_crop_2) { float input[32] = {1, 10, 3, 30, 9, 90, 11, 110, 2, 20, 4, 40, 10, 100, 12, 120, 5, 50, 7, 70, 13, 130, 15, 150, 6, 60, 8, 80, 14, 140, 16, 160}; constexpr int kOutSize = 12; - float expect_out[kOutSize] = {6, 60, 7, 70, 8, 80, - 10, 100, 11, 110, 12, 120}; + float expect_out[kOutSize] = {6, 60, 7, 70, 8, 80, 10, 100, 11, 110, 12, 120}; float output[kOutSize]; int in_shape[4] = {4, 2, 2, 2}; @@ -129,8 +127,7 @@ TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest_crop_3) { 21, 10, 23, 30, 29, 90, 211, 110, 22, 20, 24, 40, 210, 100, 212, 120, 25, 50, 27, 70, 213, 130, 215, 150, 26, 60, 28, 80, 214, 140, 216, 160}; constexpr int kOutSize = 16; - float expect_out[kOutSize] = {9, 90, 13, 130, 29, 90, 213, 130, - 10, 100, 14, 140, 210, 100, 214, 140}; + float expect_out[kOutSize] = {9, 90, 13, 130, 29, 90, 213, 130, 10, 100, 14, 140, 210, 100, 214, 140}; float output[kOutSize]; int in_shape[4] = {8, 2, 2, 2}; @@ -177,9 +174,8 @@ TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest_crop_4) { 214, 140, 216, 160, 31, 10, 33, 30, 39, 90, 311, 110, 32, 20, 34, 40, 310, 100, 312, 120, 35, 50, 37, 70, 313, 130, 315, 150, 36, 60, 38, 80, 314, 140, 316, 160}; constexpr int kOutSize = 24; - float expect_out[kOutSize] = { - 25, 50, 23, 30, 35, 50, 33, 30, - 13, 130, 11, 110, 26, 60, 24, 40, 36, 60, 34, 40, 14, 140, 12, 120}; + float expect_out[kOutSize] = {25, 50, 23, 30, 35, 50, 33, 30, 13, 130, 11, 110, + 26, 60, 24, 40, 36, 60, 34, 40, 14, 140, 12, 120}; float output[kOutSize]; int in_shape[4] = {12, 2, 2, 2}; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc index a211458759..b19d6edd0c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc @@ -36,21 +36,21 @@ TEST_F(TestBatchnormFp32, BNTest) { op_param.op_parameter_.type_ = schema::PrimitiveType_BatchNorm; op_param.epsilon_ = 0.001f; - lite::tensor::Tensor input0_tensor(kNumberTypeFloat32, {1, 2, 2, 3}); - lite::tensor::Tensor input1_tensor(kNumberTypeFloat32, {3}); - lite::tensor::Tensor input2_tensor(kNumberTypeFloat32, {3}); + lite::Tensor input0_tensor(kNumberTypeFloat32, {1, 2, 2, 3}); + lite::Tensor input1_tensor(kNumberTypeFloat32, {3}); + lite::Tensor input2_tensor(kNumberTypeFloat32, {3}); input0_tensor.SetData(in_data.data()); input1_tensor.SetData(in_data1.data()); input2_tensor.SetData(in_data2.data()); - std::vector inputs_tensor = {&input0_tensor, &input1_tensor, &input2_tensor}; + std::vector inputs_tensor = {&input0_tensor, &input1_tensor, &input2_tensor}; std::vector output(12); std::vector corr_out = {-6.1533737, 7.4904885, -0.8563998, -0.289212, -9.356432, 0.13245535, -3.5422924, -14.005781, -2.3525476, -6.7113695, -16.396551, -1.4275324}; - lite::tensor::Tensor output0_tensor(kNumberTypeFloat32, {1, 2, 2, 3}); + lite::Tensor output0_tensor(kNumberTypeFloat32, {1, 2, 2, 3}); output0_tensor.SetData(output.data()); - std::vector outputs_tensor = {&output0_tensor}; + std::vector outputs_tensor = {&output0_tensor}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_BatchNorm}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); @@ -88,25 +88,25 @@ TEST_F(TestBatchnormFp32, FusedBNTest) { op_param.op_parameter_.type_ = schema::PrimitiveType_BatchNorm; op_param.epsilon_ = 0.001f; - lite::tensor::Tensor input0(kNumberTypeFloat32, {1, 2, 2, 3}); - lite::tensor::Tensor input1(kNumberTypeFloat32, {3}); - lite::tensor::Tensor input2(kNumberTypeFloat32, {3}); - lite::tensor::Tensor input3(kNumberTypeFloat32, {3}); - lite::tensor::Tensor input4(kNumberTypeFloat32, {3}); + lite::Tensor input0(kNumberTypeFloat32, {1, 2, 2, 3}); + lite::Tensor input1(kNumberTypeFloat32, {3}); + lite::Tensor input2(kNumberTypeFloat32, {3}); + lite::Tensor input3(kNumberTypeFloat32, {3}); + lite::Tensor input4(kNumberTypeFloat32, {3}); input0.SetData(in_data.data()); input1.SetData(scale.data()); input2.SetData(offset.data()); input3.SetData(mean.data()); input4.SetData(var.data()); - std::vector inputs_tensor = {&input0, &input1, &input2, &input3, &input4}; + std::vector inputs_tensor = {&input0, &input1, &input2, &input3, &input4}; std::vector output(12); std::vector corr_out = {-195.5765, 67.03745, -4.243883, -42.028015, 74.37044, 9.075897, 5.1857452, 56.60399, -77.215096, -181.18402, 49.81066, -59.204563}; - lite::tensor::Tensor output0(kNumberTypeFloat32, {1, 2, 2, 3}); + lite::Tensor output0(kNumberTypeFloat32, {1, 2, 2, 3}); output0.SetData(output.data()); - std::vector outputs_tensor = {&output0}; + std::vector outputs_tensor = {&output0}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_FusedBatchNorm}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); @@ -142,21 +142,21 @@ TEST_F(TestBatchnormFp32, easyTest) { op_param.op_parameter_.type_ = schema::PrimitiveType_BatchNorm; op_param.epsilon_ = 0.001f; - lite::tensor::Tensor input0(kNumberTypeFloat32, {1, 1, 6, 2}); - lite::tensor::Tensor input1(kNumberTypeFloat32, {2}); - lite::tensor::Tensor input2(kNumberTypeFloat32, {2}); + lite::Tensor input0(kNumberTypeFloat32, {1, 1, 6, 2}); + lite::Tensor input1(kNumberTypeFloat32, {2}); + lite::Tensor input2(kNumberTypeFloat32, {2}); input0.SetData(in_data.data()); input1.SetData(in_data1.data()); input2.SetData(in_data2.data()); - std::vector inputs_tensor = {&input0, &input1, &input2}; + std::vector inputs_tensor = {&input0, &input1, &input2}; std::vector output(12); std::vector corr_out = {0.519529, 1.69979, 1.09678, 2.19973, 1.67404, 2.69966, -0.63498, -2.29971, -1.21223, -2.79965, -1.78949, -3.29959}; - lite::tensor::Tensor output0(kNumberTypeFloat32, {1, 1, 6, 2}); + lite::Tensor output0(kNumberTypeFloat32, {1, 1, 6, 2}); output0.SetData(output.data()); - std::vector outputs_tensor = {&output0}; + std::vector outputs_tensor = {&output0}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_BatchNorm}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc index cf23d4e87c..d8534fbef8 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc @@ -25,20 +25,18 @@ class TestConstantOfShapeFp32 : public mindspore::CommonTest { TestConstantOfShapeFp32() {} }; -int ConstantOfShapeTestInit(std::vector *inputs_, std::vector *outputs_, - float *a_ptr, std::vector a_shape) { - auto in_t = - new lite::tensor::Tensor(kNumberTypeInt32, a_shape, schema::Format_NHWC, static_cast(1)); +int ConstantOfShapeTestInit(std::vector *inputs_, std::vector *outputs_, float *a_ptr, + std::vector a_shape) { + auto in_t = new lite::Tensor(kNumberTypeInt32, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t->MallocData(); - memcpy(in_t->Data(), a_ptr, sizeof(float) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); std::vector c_shape(in_t->ElementsNum()); for (int i = 0; i < c_shape.size(); ++i) { c_shape[i] = a_ptr[i]; } - auto out_t = - new lite::tensor::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, static_cast(1)); + auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); outputs_->push_back(out_t); @@ -46,8 +44,8 @@ int ConstantOfShapeTestInit(std::vector *inputs_, std::v } TEST_F(TestConstantOfShapeFp32, Simple) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto param = new ConstantOfShapeParameter(); param->value_ = 1; float a[] = {1, 2, 3, 4}; @@ -61,10 +59,10 @@ TEST_F(TestConstantOfShapeFp32, Simple) { op->Init(); op->Run(); float correct[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - float *output = reinterpret_cast(outputs_[0]->Data()); + float *output = reinterpret_cast(outputs_[0]->MutableData()); for (int i = 0; i < 8; ++i) printf("%f ", output[i]); printf("\n"); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete op; for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc index 4fad7b355f..0ce67aed3a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc @@ -23,7 +23,7 @@ #include "src/runtime/kernel/arm/fp32/convolution_1x1.h" namespace mindspore { -using mindspore::lite::tensor::Tensor; +using mindspore::lite::Tensor; class TestConv1x1Fp32 : public mindspore::CommonTest { public: @@ -243,34 +243,34 @@ TEST_F(TestConv1x1Fp32, PostConvFuncC4Test2) { CompareOutputData(out, corr, 40, 0.0001); } -int Conv1x1TestInit1(std::vector *inputs_, std::vector *outputs_, +int Conv1x1TestInit1(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, float **correct) { - lite::tensor::Tensor *in_t = - new lite::tensor::Tensor(kNumberTypeFloat, {1, 2, 3, 4}, schema::Format_NHWC, static_cast(1)); + lite::Tensor *in_t = new lite::Tensor(kNumberTypeFloat, {1, 2, 3, 4}, schema::Format_NHWC, + lite::TensorCategory(static_cast(1))); in_t->MallocData(); float in[] = {12.216284, 3.3466918, 15.327419, 5.234958, 0.804376, 9.952188, 14.727955, -8.080715, 13.71383, 8.055829, 6.5845337, -9.25232, -4.24519, 11.550042, 9.262012, 1.2780352, 6.7263746, -3.9301445, 3.764492, -8.602078, -3.3558068, 13.619035, -2.6694393, 3.2008505}; - memcpy(in_t->Data(), in, sizeof(float) * 24); + memcpy(in_t->MutableData(), in, sizeof(float) * 24); inputs_->push_back(in_t); - lite::tensor::Tensor *weight_t = - new lite::tensor::Tensor(kNumberTypeFloat, {3, 1, 1, 4}, schema::Format_NHWC, static_cast(1)); + lite::Tensor *weight_t = new lite::Tensor(kNumberTypeFloat, {3, 1, 1, 4}, schema::Format_NHWC, + lite::TensorCategory(static_cast(1))); weight_t->MallocData(); float weight[] = {-0.7308652, 0.5257509, -0.87825793, -1.123181, -1.2206168, 0.562695, 1.5382664, -0.5020635, 0.8591602, -0.26410004, 1.1262615, 0.073132955}; /* nhwc */ - memcpy(weight_t->Data(), weight, sizeof(float) * 12); + memcpy(weight_t->MutableData(), weight, sizeof(float) * 12); inputs_->push_back(weight_t); - lite::tensor::Tensor *bias_t = - new lite::tensor::Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, static_cast(1)); + lite::Tensor *bias_t = new lite::Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, + lite::TensorCategory(static_cast(1))); bias_t->MallocData(); float bias[] = {2, 2, 2}; - memcpy(bias_t->Data(), bias, sizeof(float) * 3); + memcpy(bias_t->MutableData(), bias, sizeof(float) * 3); inputs_->push_back(bias_t); - lite::tensor::Tensor *out_t = - new lite::tensor::Tensor(kNumberTypeFloat, {1, 2, 3, 3}, schema::Format_NHWC, static_cast(1)); + lite::Tensor *out_t = new lite::Tensor(kNumberTypeFloat, {1, 2, 3, 3}, schema::Format_NHWC, + lite::TensorCategory(static_cast(1))); out_t->MallocData(); outputs_->push_back(out_t); @@ -287,8 +287,8 @@ int Conv1x1TestInit1(std::vector *inputs_, std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto conv_param = new ConvParameter(); lite::Context *ctx = new lite::Context(); ctx->thread_num_ = 1; @@ -300,7 +300,7 @@ TEST_F(TestConv1x1Fp32, Conv1x1Test1) { conv1x1->Init(); conv1x1->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete conv_param; delete conv1x1; for (auto t : inputs_) delete t; @@ -308,35 +308,35 @@ TEST_F(TestConv1x1Fp32, Conv1x1Test1) { free(correct); } -int Conv1x1TestInit2(std::vector *inputs_, std::vector *outputs_, +int Conv1x1TestInit2(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, float **correct) { size_t buffer_size; - lite::tensor::Tensor *in_t = new lite::tensor::Tensor(kNumberTypeFloat, {1, 300, 300, 24}, schema::Format_NHWC, - static_cast(1)); + lite::Tensor *in_t = new lite::Tensor(kNumberTypeFloat, {1, 300, 300, 24}, schema::Format_NHWC, + lite::TensorCategory(static_cast(1))); in_t->MallocData(); std::string input_path = "./conv/conv1x1fp32_input1_nhwc.bin"; auto in = reinterpret_cast(mindspore::lite::ReadFile(input_path.c_str(), &buffer_size)); - memcpy(in_t->Data(), in, buffer_size); + memcpy(in_t->MutableData(), in, buffer_size); inputs_->push_back(in_t); - lite::tensor::Tensor *weight_t = - new lite::tensor::Tensor(kNumberTypeFloat, {40, 1, 1, 24}, schema::Format_NHWC, static_cast(1)); + lite::Tensor *weight_t = new lite::Tensor(kNumberTypeFloat, {40, 1, 1, 24}, schema::Format_NHWC, + lite::TensorCategory(static_cast(1))); weight_t->MallocData(); std::string weight_path = "./conv/conv1x1fp32_weight1_nhwc.bin"; auto weight = reinterpret_cast(mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size)); - memcpy(weight_t->Data(), weight, buffer_size); + memcpy(weight_t->MutableData(), weight, buffer_size); inputs_->push_back(weight_t); - lite::tensor::Tensor *bias_t = - new lite::tensor::Tensor(kNumberTypeFloat, {40}, schema::Format_NHWC, static_cast(1)); + lite::Tensor *bias_t = new lite::Tensor(kNumberTypeFloat, {40}, schema::Format_NHWC, + lite::TensorCategory(static_cast(1))); bias_t->MallocData(); std::string bias_path = "./conv/conv1x1fp32_bias1_nhwc.bin"; auto bias = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size); - memcpy(bias_t->Data(), bias, buffer_size); + memcpy(bias_t->MutableData(), bias, buffer_size); inputs_->push_back(bias_t); - lite::tensor::Tensor *out_t = new lite::tensor::Tensor(kNumberTypeFloat, {1, 300, 300, 40}, schema::Format_NHWC, - static_cast(1)); + lite::Tensor *out_t = new lite::Tensor(kNumberTypeFloat, {1, 300, 300, 40}, schema::Format_NHWC, + lite::TensorCategory(static_cast(1))); out_t->MallocData(); outputs_->push_back(out_t); @@ -354,8 +354,8 @@ int Conv1x1TestInit2(std::vector *inputs_, std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto conv_param = new ConvParameter(); lite::Context *ctx = new lite::Context(); ctx->thread_num_ = 2; @@ -366,7 +366,7 @@ TEST_F(TestConv1x1Fp32, Conv1x1Test2) { conv1x1->Init(); conv1x1->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); /* running warm up */ for (int i = 0; i < 0; i++) { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc index becf047a69..66199ffa75 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc @@ -51,49 +51,49 @@ void InitConvDwParam(ConvParameter *conv_param) { conv_param->pad_l_ = 1; } -void InitConvDwCreator(std::vector *inputs, std::vector *outputs, +void InitConvDwCreator(std::vector *inputs, std::vector *outputs, const ConvParameter *conv_param) { // prepare input, format NHWC size_t input_size; std::string input_path = "./test_data/convDw/convDwfp32_input.bin"; auto input_data = reinterpret_cast(mindspore::lite::ReadFile(input_path.c_str(), &input_size)); - auto *input = new lite::tensor::Tensor; + auto *input = new lite::Tensor; input->set_data_type(kNumberTypeFloat32); input->SetFormat(schema::Format_NHWC); input->set_shape({conv_param->input_batch_, conv_param->input_h_, conv_param->input_w_, conv_param->input_channel_}); input->MallocData(); - memcpy(input->Data(), input_data, input_size); + memcpy(input->MutableData(), input_data, input_size); // prepare weight, format co kh kw ci, ci = 1 size_t weight_size; std::string weight_path = "./test_data/convDw/convDwfp32_weight.bin"; auto weight_data = reinterpret_cast(mindspore::lite::ReadFile(weight_path.c_str(), &weight_size)); - auto *weight = new lite::tensor::Tensor; + auto *weight = new lite::Tensor; weight->set_data_type(kNumberTypeFloat32); weight->set_shape({conv_param->output_channel_, conv_param->kernel_h_, conv_param->kernel_w_, 1}); weight->MallocData(); - memcpy(weight->Data(), weight_data, weight_size); + memcpy(weight->MutableData(), weight_data, weight_size); // prepare bias - auto *bias = new lite::tensor::Tensor; + auto *bias = new lite::Tensor; bias->set_data_type(kNumberTypeFloat32); bias->set_shape({conv_param->output_channel_}); bias->MallocData(); - memset(bias->Data(), 0, bias->ElementsNum() * sizeof(float)); + memset(bias->MutableData(), 0, bias->ElementsNum() * sizeof(float)); inputs->push_back(input); inputs->push_back(weight); inputs->push_back(bias); - auto *output = new lite::tensor::Tensor; + auto *output = new lite::Tensor; output->set_data_type(kNumberTypeFloat32); output->set_shape( {conv_param->output_batch_, conv_param->output_h_, conv_param->output_w_, conv_param->output_channel_}); output->SetFormat(schema::Format_NHWC); output->MallocData(); - memset(output->Data(), 0, output->ElementsNum() * sizeof(float)); + memset(output->MutableData(), 0, output->ElementsNum() * sizeof(float)); outputs->push_back(output); } @@ -107,22 +107,22 @@ TEST_F(TestConvolutionDwFp32, ConvDwFp32Accuracy) { ctx->thread_num_ = 4; // init tensor - std::vector inputs; - std::vector outputs; + std::vector inputs; + std::vector outputs; InitConvDwCreator(&inputs, &outputs, conv_param); // register op kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_DepthwiseConv2D}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - kernel::LiteKernel *kernel = creator(inputs, outputs, reinterpret_cast(conv_param), ctx, desc, - nullptr); + kernel::LiteKernel *kernel = + creator(inputs, outputs, reinterpret_cast(conv_param), ctx, desc, nullptr); ASSERT_NE(kernel, nullptr); // op run kernel->Run(); std::cout << "==================output data=================" << std::endl; - auto output_ptr = reinterpret_cast(outputs[0]->Data()); + auto output_ptr = reinterpret_cast(outputs[0]->MutableData()); for (int i = 0; i < 20; i++) { std::cout << output_ptr[i] << ", "; } @@ -158,16 +158,16 @@ TEST_F(TestConvolutionDwFp32, ConvDwFp32Performance) { ctx->thread_num_ = 1; // init tensor - std::vector inputs; - std::vector outputs; + std::vector inputs; + std::vector outputs; InitConvDwCreator(&inputs, &outputs, conv_param); // register op kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_DepthwiseConv2D}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - kernel::LiteKernel *kernel = creator(inputs, outputs, reinterpret_cast(conv_param), ctx, desc, - nullptr); + kernel::LiteKernel *kernel = + creator(inputs, outputs, reinterpret_cast(conv_param), ctx, desc, nullptr); ASSERT_NE(kernel, nullptr); /* running warm up */ diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc index d8220719c5..fcabf82e37 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc @@ -172,9 +172,7 @@ TEST_F(CropTestFp32, CropTest7) { } TEST_F(CropTestFp32, CropTest8) { - float input[27] = {1, 2, 3, 4, 5, 6, 7, 8, 9, - 11, 12, 13, 14, 15, 16, 17, 18, 19, - 21, 22, 23, 24, 25, 26, 27, 28, 29}; + float input[27] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27, 28, 29}; const int kOutSize = 4; float expect_out[kOutSize] = {15, 16, 18, 19}; @@ -197,7 +195,7 @@ TEST_F(CropTestFp32, CropTest8) { } TEST_F(CropTestFp32, CropTest9) { - float input[64] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + float input[64] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 11, 12, 13, 14, 15, 16, 17, 18, 19, 110, 111, 112, 113, 114, 115, 116, 21, 22, 23, 24, 25, 26, 27, 28, 29, 210, 211, 212, 213, 214, 215, 216, 31, 32, 33, 34, 35, 36, 37, 38, 39, 310, 311, 312, 313, 314, 315, 316}; @@ -223,21 +221,11 @@ TEST_F(CropTestFp32, CropTest9) { } TEST_F(CropTestFp32, CropTest10) { - float input[50] = {1, 2, 3, 4, 5, - 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50}; + float input[50] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50}; const int kOutSize = 8; - float expect_out[kOutSize] = {1, 2, - 6, 7, - 26, 27, - 31, 32}; + float expect_out[kOutSize] = {1, 2, 6, 7, 26, 27, 31, 32}; float output[kOutSize]; int in_shape[4] = {1, 2, 5, 5}; @@ -257,48 +245,24 @@ TEST_F(CropTestFp32, CropTest10) { } TEST_F(CropTestFp32, CropTest11) { - float input[100] = {1, 2, 3, 4, 5, - 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, - 1, 2, 3, 4, 5, - 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50}; + float input[100] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50}; const int kOutSize = 16; - float expect_out[kOutSize] = {1, 2, - 6, 7, - 26, 27, - 31, 32, - 1, 2, - 6, 7, - 26, 27, - 31, 32}; + float expect_out[kOutSize] = {1, 2, 6, 7, 26, 27, 31, 32, 1, 2, 6, 7, 26, 27, 31, 32}; std::vector in_shape = {1, 4, 5, 5}; std::vector out_shape = {1, 4, 2, 2}; - std::vector inputs; - std::vector outputs; - auto in_t = - new lite::tensor::Tensor(kNumberTypeFloat, in_shape, schema::Format_NHWC, static_cast(1)); + std::vector inputs; + std::vector outputs; + auto in_t = new lite::Tensor(kNumberTypeFloat, in_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t->MallocData(); - memcpy(in_t->Data(), input, sizeof(float) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), input, sizeof(float) * in_t->ElementsNum()); inputs.push_back(in_t); - auto out_t = - new lite::tensor::Tensor(kNumberTypeFloat, out_shape, schema::Format_NHWC, static_cast(1)); + auto out_t = new lite::Tensor(kNumberTypeFloat, out_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); outputs.push_back(out_t); @@ -312,7 +276,7 @@ TEST_F(CropTestFp32, CropTest11) { kernel->Init(); kernel->Run(); - float *output = reinterpret_cast(outputs[0]->Data()); + float *output = reinterpret_cast(outputs[0]->MutableData()); for (int i = 0; i < kOutSize; ++i) { std::cout << output[i] << " "; } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc index 4549f6d641..1497218c2d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc @@ -319,11 +319,11 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test8_8) { CompareOutputData(out, no, 64, 0.0001); } -int DeConvTestInit1(std::vector *inputs_, std::vector *outputs_, +int DeConvTestInit1(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, float **correct) { std::vector in_dims_nhwc = {1, 5, 7, 2}; - lite::tensor::Tensor *in_t = - new lite::tensor::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, static_cast(1)); + lite::Tensor *in_t = + new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t->MallocData(); float in_nchw[] = { 0.39451003, 0.15045597, 0.5367726, 0.62690735, 0.113554195, 0.5402554, 0.5522764, 0.044319753, 0.25721782, @@ -334,12 +334,12 @@ int DeConvTestInit1(std::vector *inputs_, std::vector
  • Data(), in_t->Batch(), in_t->Width() * in_t->Height(), in_t->Channel()); + PackNCHWToNHWCFp32(in_nchw, in_t->MutableData(), in_t->Batch(), in_t->Width() * in_t->Height(), in_t->Channel()); inputs_->push_back(in_t); std::vector weight_dims_nhwc = {2, 3, 3, 6}; - lite::tensor::Tensor *weight_t = - new lite::tensor::Tensor(kNumberTypeFloat, weight_dims_nhwc, schema::Format_NHWC, static_cast(1)); + lite::Tensor *weight_t = + new lite::Tensor(kNumberTypeFloat, weight_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST); weight_t->MallocData(); float weight_nchw[] = { 0.061163727, -0.06261389, 0.07708351, -0.019354159, -0.3859104, -0.082844816, -0.21268463, -0.15746808, @@ -356,20 +356,19 @@ int DeConvTestInit1(std::vector *inputs_, std::vector
  • Data(), weight_t->Batch(), weight_t->Width() * weight_t->Height(), + PackNCHWToNHWCFp32(weight_nchw, weight_t->MutableData(), weight_t->Batch(), weight_t->Width() * weight_t->Height(), weight_t->Channel()); inputs_->push_back(weight_t); - lite::tensor::Tensor *bias_t = - new lite::tensor::Tensor(kNumberTypeFloat, {6}, schema::Format_NHWC, static_cast(1)); + lite::Tensor *bias_t = new lite::Tensor(kNumberTypeFloat, {6}, schema::Format_NHWC, lite::Tensor::Category::CONST); bias_t->MallocData(); float bias[] = {-0.19064677, -0.0034778118, 0.63741624, -1.0311537, -1.0288948, 0.71384084}; - memcpy(bias_t->Data(), bias, sizeof(float) * 6); + memcpy(bias_t->MutableData(), bias, sizeof(float) * 6); inputs_->push_back(bias_t); std::vector output_nhwc_dims = {1, 9, 13, 6}; - lite::tensor::Tensor *out_t = - new lite::tensor::Tensor(kNumberTypeFloat, output_nhwc_dims, schema::Format_NHWC, static_cast(1)); + lite::Tensor *out_t = + new lite::Tensor(kNumberTypeFloat, output_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); outputs_->push_back(out_t); @@ -473,8 +472,8 @@ int DeConvTestInit1(std::vector *inputs_, std::vector
  • inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; ConvParameter *deconv_param = new ConvParameter(); lite::Context *ctx = new lite::Context(); ctx->thread_num_ = 1; @@ -486,7 +485,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest1) { deconv->Init(); deconv->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete deconv_param; delete deconv; for (auto t : inputs_) delete t; @@ -494,19 +493,17 @@ TEST_F(TestDeConvolutionFp32, DeConvTest1) { free(correct); } -int DeConvTestInit2(std::vector *inputs_, std::vector *outputs_, +int DeConvTestInit2(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, float **correct) { - auto *in_t = - new lite::tensor::Tensor(kNumberTypeFloat, {1, 4, 2, 3}, schema::Format_NHWC, static_cast(1)); + auto *in_t = new lite::Tensor(kNumberTypeFloat, {1, 4, 2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t->MallocData(); float in[] = {7.7566547, 19.250782, 17.923292, 13.584222, 3.3293908, 9.734102, 18.83455, -1.5142503, -0.29382008, 18.686155, 0.087307654, 4.2010098, -2.2539594, 4.1795673, 13.142356, -3.5939367, 16.505789, 19.899279, 8.556229, 19.969376, -6.2355065, -2.3804698, -9.027744, 9.5542}; /* nhwc */ - memcpy(in_t->Data(), in, sizeof(float) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), in, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto *weight_t = - new lite::tensor::Tensor(kNumberTypeFloat, {3, 3, 3, 2}, schema::Format_NHWC, static_cast(1)); + auto *weight_t = new lite::Tensor(kNumberTypeFloat, {3, 3, 3, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST); weight_t->MallocData(); float weight[] = {-0.39557076, 0.15087655, 0.35216075, -0.20893791, 0.28683448, 0.08006268, 0.9830812, 0.27212173, 0.5171944, -0.0014505, 0.78694165, 0.25425306, 0.16605458, -0.06127124, @@ -516,12 +513,11 @@ int DeConvTestInit2(std::vector *inputs_, std::vector
  • Data(), weight, sizeof(float) * weight_t->ElementsNum()); + memcpy(weight_t->MutableData(), weight, sizeof(float) * weight_t->ElementsNum()); inputs_->push_back(weight_t); std::vector out_nhwc_dims = {1, 7, 3, 2}; - auto *out_t = - new lite::tensor::Tensor(kNumberTypeFloat, out_nhwc_dims, schema::Format_NHWC, static_cast(1)); + auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); outputs_->push_back(out_t); @@ -542,8 +538,8 @@ int DeConvTestInit2(std::vector *inputs_, std::vector
  • inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto deconv_param = new ConvParameter(); float *correct; int total_size = DeConvTestInit2(&inputs_, &outputs_, deconv_param, &correct); @@ -554,7 +550,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest2) { deconv->Init(); deconv->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete deconv; for (auto t : inputs_) delete t; @@ -562,33 +558,32 @@ TEST_F(TestDeConvolutionFp32, DeConvTest2) { free(correct); } -int DeConvTestInit3(std::vector *inputs_, std::vector *outputs_, +int DeConvTestInit3(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, float **correct) { std::vector in_dims_nhwc = {1, 3, 3, 2}; - auto *in_t = - new lite::tensor::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, static_cast(1)); + auto *in_t = new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t->MallocData(); float in_nchw[] = {0.10411751, 0.24034509, 0.71456534, 0.75286126, 0.9778457, 0.21043599, 0.26498786, 0.6701024, 0.9744634, 0.49075702, 0.03877404, 0.48646277, 0.5473929, 0.32438126, 0.87553847, 0.75820315, 0.86666644, 0.4852329}; - PackNCHWToNHWCFp32(in_nchw, reinterpret_cast(in_t->Data()), in_t->Batch(), in_t->Width() * in_t->Height(), - in_t->Channel()); + PackNCHWToNHWCFp32(in_nchw, reinterpret_cast(in_t->MutableData()), in_t->Batch(), + in_t->Width() * in_t->Height(), in_t->Channel()); inputs_->push_back(in_t); std::vector w_dims_nhwc = {2, 2, 2, 2}; - auto *weight_t = - new lite::tensor::Tensor(kNumberTypeFloat, w_dims_nhwc, schema::Format_NHWC, schema::NodeType_Parameter); + auto *weight_t = new lite::Tensor(kNumberTypeFloat, w_dims_nhwc, schema::Format_NHWC, + lite::TensorCategory(schema::NodeType_Parameter)); weight_t->MallocData(); float w_nchw[] = {-0.108016446, -0.44254777, 0.29249913, 0.18764605, 1.1250675, 0.29441583, -0.34362152, 0.7557833, 0.16503833, 0.2418737, -0.26612744, 0.5072577, -0.4284475, 0.2215941, 0.9273913, 0.34634787}; - PackNCHWToNHWCFp32(w_nchw, weight_t->Data(), weight_t->Batch(), weight_t->Width() * weight_t->Height(), + PackNCHWToNHWCFp32(w_nchw, weight_t->MutableData(), weight_t->Batch(), weight_t->Width() * weight_t->Height(), weight_t->Channel()); inputs_->push_back(weight_t); std::vector out_dims_nhwc = {1, 9, 9, 2}; - auto *out_t = - new lite::tensor::Tensor(kNumberTypeFloat, out_dims_nhwc, schema::Format_NC4HW4, schema::NodeType_Parameter); + auto *out_t = new lite::Tensor(kNumberTypeFloat, out_dims_nhwc, schema::Format_NC4HW4, + lite::TensorCategory(schema::NodeType_Parameter)); out_t->MallocData(); outputs_->push_back(out_t); @@ -621,8 +616,8 @@ int DeConvTestInit3(std::vector *inputs_, std::vector
  • inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto deconv_param = new ConvParameter(); float *correct; int total_size = DeConvTestInit3(&inputs_, &outputs_, deconv_param, &correct); @@ -633,7 +628,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest3) { deconv->Init(); deconv->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete deconv; for (auto t : inputs_) delete t; @@ -641,39 +636,35 @@ TEST_F(TestDeConvolutionFp32, DeConvTest3) { free(correct); } -int DeConvTestInit4(std::vector *inputs_, std::vector *outputs_, +int DeConvTestInit4(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, float **correct) { size_t buffer_size; std::vector in_nhwc_dims = {1, 300, 300, 30}; - auto *in_t = - new lite::tensor::Tensor(kNumberTypeFloat, in_nhwc_dims, schema::Format_NHWC, static_cast(1)); + auto *in_t = new lite::Tensor(kNumberTypeFloat, in_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t->MallocData(); std::string in_nhwc_path = "./deconv/deconv_fp32_nhwc_input1.bin"; auto in_nhwc = reinterpret_cast(mindspore::lite::ReadFile(in_nhwc_path.c_str(), &buffer_size)); - memcpy(in_t->Data(), in_nhwc, buffer_size); + memcpy(in_t->MutableData(), in_nhwc, buffer_size); inputs_->push_back(in_t); std::vector w_nhwc_dims = {30, 3, 3, 40}; - auto *weight_t = - new lite::tensor::Tensor(kNumberTypeFloat, w_nhwc_dims, schema::Format_NHWC, static_cast(1)); + auto *weight_t = new lite::Tensor(kNumberTypeFloat, w_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST); weight_t->MallocData(); std::string weight_path = "./deconv/deconv_fp32_nchw_weight1.bin"; auto weight_nchw = reinterpret_cast(mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size)); - PackNCHWToNHWCFp32(weight_nchw, weight_t->Data(), weight_t->Batch(), weight_t->Width() * weight_t->Height(), + PackNCHWToNHWCFp32(weight_nchw, weight_t->MutableData(), weight_t->Batch(), weight_t->Width() * weight_t->Height(), weight_t->Channel()); inputs_->push_back(weight_t); - auto *bias_t = - new lite::tensor::Tensor(kNumberTypeFloat, {40}, schema::Format_NHWC, static_cast(1)); + auto *bias_t = new lite::Tensor(kNumberTypeFloat, {40}, schema::Format_NHWC, lite::Tensor::Category::CONST); bias_t->MallocData(); std::string bias_path = "./deconv/deconv_fp32_nchw_bias1.bin"; auto bias = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size); - memcpy(bias_t->Data(), bias, buffer_size); + memcpy(bias_t->MutableData(), bias, buffer_size); inputs_->push_back(bias_t); std::vector out_nhwc_dims = {1, 302, 302, 40}; - auto *out_t = - new lite::tensor::Tensor(kNumberTypeFloat, out_nhwc_dims, schema::Format_NHWC, static_cast(1)); + auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); outputs_->push_back(out_t); @@ -692,8 +683,8 @@ int DeConvTestInit4(std::vector *inputs_, std::vector
  • inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto deconv_param = new ConvParameter(); float *correct; int total_size = DeConvTestInit4(&inputs_, &outputs_, deconv_param, &correct); @@ -704,7 +695,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest4) { deconv->Init(); deconv->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); /* running warm up */ for (int i = 0; i < 0; i++) { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc index 782e01439c..d637ec1d03 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc @@ -27,38 +27,38 @@ class TestDetectionPostProcessFp32 : public mindspore::CommonTest { TestDetectionPostProcessFp32() {} }; -void DetectionPostProcessTestInit(std::vector *inputs_, - std::vector *outputs_, DetectionPostProcessParameter *param) { +void DetectionPostProcessTestInit(std::vector *inputs_, std::vector *outputs_, + DetectionPostProcessParameter *param) { std::string input_boxes_path = "./test_data/detectionPostProcess/input_boxes.bin"; size_t input_boxes_size; auto input_boxes_data = reinterpret_cast(mindspore::lite::ReadFile(input_boxes_path.c_str(), &input_boxes_size)); - auto *input_boxes = new lite::tensor::Tensor; + auto *input_boxes = new lite::Tensor; input_boxes->set_data_type(kNumberTypeFloat32); input_boxes->SetFormat(schema::Format_NHWC); input_boxes->set_shape({1, 1917, 4}); input_boxes->MallocData(); - memcpy(input_boxes->Data(), input_boxes_data, input_boxes_size); + memcpy(input_boxes->MutableData(), input_boxes_data, input_boxes_size); inputs_->push_back(input_boxes); std::string input_scores_path = "./test_data/detectionPostProcess/input_scores.bin"; size_t input_scores_size; auto input_scores_data = reinterpret_cast(mindspore::lite::ReadFile(input_scores_path.c_str(), &input_scores_size)); - auto *input_scores = new lite::tensor::Tensor; + auto *input_scores = new lite::Tensor; input_scores->set_data_type(kNumberTypeFloat32); input_scores->SetFormat(schema::Format_NHWC); input_scores->set_shape({1, 1917, 91}); input_scores->MallocData(); - memcpy(input_scores->Data(), input_scores_data, input_scores_size); + memcpy(input_scores->MutableData(), input_scores_data, input_scores_size); inputs_->push_back(input_scores); std::string input_anchors_path = "./test_data/detectionPostProcess/input_anchors.bin"; size_t input_anchors_size; auto input_anchors_data = reinterpret_cast(mindspore::lite::ReadFile(input_anchors_path.c_str(), &input_anchors_size)); - auto *input_anchors = new lite::tensor::Tensor; - lite::tensor::QuantArg quant_arg; + auto *input_anchors = new lite::Tensor; + lite::QuantArg quant_arg; quant_arg.zeroPoint = 0; quant_arg.scale = 0.00645306; input_anchors->AddQuantParam(quant_arg); @@ -66,36 +66,36 @@ void DetectionPostProcessTestInit(std::vector *inputs_, input_anchors->SetFormat(schema::Format_NHWC); input_anchors->set_shape({1917, 4}); input_anchors->MallocData(); - memcpy(input_anchors->Data(), input_anchors_data, input_anchors_size); + memcpy(input_anchors->MutableData(), input_anchors_data, input_anchors_size); inputs_->push_back(input_anchors); - auto *output_boxes = new lite::tensor::Tensor; + auto *output_boxes = new lite::Tensor; output_boxes->set_data_type(kNumberTypeFloat32); output_boxes->set_shape({1, 10, 4}); output_boxes->SetFormat(schema::Format_NHWC); output_boxes->MallocData(); - memset(output_boxes->Data(), 0, output_boxes->ElementsNum() * sizeof(float)); + memset(output_boxes->MutableData(), 0, output_boxes->ElementsNum() * sizeof(float)); - auto *output_classes = new lite::tensor::Tensor; + auto *output_classes = new lite::Tensor; output_classes->set_data_type(kNumberTypeFloat32); output_classes->set_shape({1, 10}); output_classes->SetFormat(schema::Format_NHWC); output_classes->MallocData(); - memset(output_classes->Data(), 0, output_classes->ElementsNum() * sizeof(float)); + memset(output_classes->MutableData(), 0, output_classes->ElementsNum() * sizeof(float)); - auto *output_scores = new lite::tensor::Tensor; + auto *output_scores = new lite::Tensor; output_scores->set_data_type(kNumberTypeFloat32); output_scores->set_shape({1, 10}); output_scores->SetFormat(schema::Format_NHWC); output_scores->MallocData(); - memset(output_scores->Data(), 0, output_scores->ElementsNum() * sizeof(float)); + memset(output_scores->MutableData(), 0, output_scores->ElementsNum() * sizeof(float)); - auto *output_num_det = new lite::tensor::Tensor; + auto *output_num_det = new lite::Tensor; output_num_det->set_data_type(kNumberTypeFloat32); output_num_det->set_shape({1}); output_num_det->SetFormat(schema::Format_NHWC); output_num_det->MallocData(); - memset(output_num_det->Data(), 0, output_num_det->ElementsNum() * sizeof(float)); + memset(output_num_det->MutableData(), 0, output_num_det->ElementsNum() * sizeof(float)); outputs_->push_back(output_boxes); outputs_->push_back(output_classes); @@ -117,8 +117,8 @@ void DetectionPostProcessTestInit(std::vector *inputs_, } TEST_F(TestDetectionPostProcessFp32, Fast) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto param = new DetectionPostProcessParameter(); DetectionPostProcessTestInit(&inputs_, &outputs_, param); auto ctx = new lite::Context; @@ -128,28 +128,28 @@ TEST_F(TestDetectionPostProcessFp32, Fast) { op->Init(); op->Run(); - float *output_boxes = reinterpret_cast(outputs_[0]->Data()); + float *output_boxes = reinterpret_cast(outputs_[0]->MutableData()); size_t output_boxes_size; std::string output_boxes_path = "./test_data/detectionPostProcess/output_0.bin"; auto correct_boxes = reinterpret_cast(mindspore::lite::ReadFile(output_boxes_path.c_str(), &output_boxes_size)); CompareOutputData(output_boxes, correct_boxes, outputs_[0]->ElementsNum(), 0.0001); - float *output_classes = reinterpret_cast(outputs_[1]->Data()); + float *output_classes = reinterpret_cast(outputs_[1]->MutableData()); size_t output_classes_size; std::string output_classes_path = "./test_data/detectionPostProcess/output_1.bin"; auto correct_classes = reinterpret_cast(mindspore::lite::ReadFile(output_classes_path.c_str(), &output_classes_size)); CompareOutputData(output_classes, correct_classes, outputs_[1]->ElementsNum(), 0.0001); - float *output_scores = reinterpret_cast(outputs_[2]->Data()); + float *output_scores = reinterpret_cast(outputs_[2]->MutableData()); size_t output_scores_size; std::string output_scores_path = "./test_data/detectionPostProcess/output_2.bin"; auto correct_scores = reinterpret_cast(mindspore::lite::ReadFile(output_scores_path.c_str(), &output_scores_size)); CompareOutputData(output_scores, correct_scores, outputs_[2]->ElementsNum(), 0.0001); - float *output_num_det = reinterpret_cast(outputs_[3]->Data()); + float *output_num_det = reinterpret_cast(outputs_[3]->MutableData()); size_t output_num_det_size; std::string output_num_det_path = "./test_data/detectionPostProcess/output_3.bin"; auto correct_num_det = diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc index e1856ae126..195da4ca28 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc @@ -22,7 +22,7 @@ #include "utils/log_adapter.h" namespace mindspore { -using mindspore::lite::tensor::Tensor; +using mindspore::lite::Tensor; class TestEluFp32 : public mindspore::CommonTest { public: @@ -30,13 +30,13 @@ class TestEluFp32 : public mindspore::CommonTest { }; void EluTestInit(std::vector *inputs_, std::vector *outputs_, EluParameter *elu_param) { - Tensor *in_t_first = new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, static_cast(1)); + Tensor *in_t_first = new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t_first->MallocData(); float in_first[] = {-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 0}; - memcpy(in_t_first->Data(), in_first, sizeof(float) * in_t_first->ElementsNum()); + memcpy(in_t_first->MutableData(), in_first, sizeof(float) * in_t_first->ElementsNum()); inputs_->push_back(in_t_first); - Tensor *outputs_t = new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, static_cast(1)); + Tensor *outputs_t = new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST); outputs_t->MallocData(); outputs_->push_back(outputs_t); @@ -62,7 +62,7 @@ TEST_F(TestEluFp32, EluTest) { std::cout << outputs_.front()->shape()[i] << ' '; } std::cout << std::endl; - float *out = reinterpret_cast(outputs_.front()->Data()); + float *out = reinterpret_cast(outputs_.front()->MutableData()); for (int i = 0; i < outputs_.front()->ElementsNum(); ++i) { std::cout << out[i] << ' '; } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc index ff9ec8c554..484c33907d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc @@ -22,7 +22,7 @@ #include "utils/log_adapter.h" namespace mindspore { -using mindspore::lite::tensor::Tensor; +using mindspore::lite::Tensor; class TestEmbeddingLookupFp32 : public mindspore::CommonTest { public: @@ -31,25 +31,25 @@ class TestEmbeddingLookupFp32 : public mindspore::CommonTest { void ElTestInit(std::vector *inputs_, std::vector *outputs_, EmbeddingLookupParameter *embedding_lookup_param) { - Tensor *in_t_first = new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, static_cast(1)); + Tensor *in_t_first = new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t_first->MallocData(); float in_first[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; - memcpy(in_t_first->Data(), in_first, sizeof(float) * in_t_first->ElementsNum()); + memcpy(in_t_first->MutableData(), in_first, sizeof(float) * in_t_first->ElementsNum()); inputs_->push_back(in_t_first); - Tensor *in_t_second = new Tensor(kNumberTypeFloat32, {4, 2}, schema::Format_NHWC, static_cast(1)); + Tensor *in_t_second = new Tensor(kNumberTypeFloat32, {4, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t_second->MallocData(); float in_second[] = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8}; - memcpy(in_t_second->Data(), in_second, sizeof(float) * in_t_second->ElementsNum()); + memcpy(in_t_second->MutableData(), in_second, sizeof(float) * in_t_second->ElementsNum()); inputs_->push_back(in_t_second); - Tensor *ids_t = new Tensor(kNumberTypeFloat32, {2, 3}, schema::Format_NHWC, static_cast(1)); + Tensor *ids_t = new Tensor(kNumberTypeFloat32, {2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST); ids_t->MallocData(); int ids[] = {1, 9, 2, 4, 6, 7}; - memcpy(ids_t->Data(), ids, sizeof(int) * ids_t->ElementsNum()); + memcpy(ids_t->MutableData(), ids, sizeof(int) * ids_t->ElementsNum()); inputs_->push_back(ids_t); - Tensor *outputs_t = new Tensor(kNumberTypeInt32, {2, 3, 2}, schema::Format_NHWC, static_cast(1)); + Tensor *outputs_t = new Tensor(kNumberTypeInt32, {2, 3, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST); outputs_t->MallocData(); outputs_->push_back(outputs_t); @@ -75,7 +75,7 @@ TEST_F(TestEmbeddingLookupFp32, ElTest) { std::cout << outputs_.front()->shape()[i] << ' '; } std::cout << std::endl; - float *out = reinterpret_cast(outputs_.front()->Data()); + float *out = reinterpret_cast(outputs_.front()->MutableData()); for (int i = 0; i < outputs_.front()->ElementsNum(); ++i) { std::cout << out[i] << ' '; } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc index f4da518bd2..fb2efa1e69 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc @@ -24,38 +24,38 @@ #include "nnacl/fp32/matmul.h" namespace mindspore { -using mindspore::lite::tensor::Tensor; +using mindspore::lite::Tensor; class TestFcFp32 : public mindspore::CommonTest { public: TestFcFp32() {} }; -int FcTestInit1(std::vector *inputs_, std::vector *outputs_, +int FcTestInit1(std::vector *inputs_, std::vector *outputs_, MatMulParameter *matmal_param, float **correct) { - Tensor *in_t = new Tensor(kNumberTypeFloat, {2, 2, 2, 2}, schema::Format_NHWC, static_cast(1)); + Tensor *in_t = new Tensor(kNumberTypeFloat, {2, 2, 2, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t->MallocData(); float in[] = {-3.2366564, -4.7733846, -7.8329225, 16.146885, 5.060793, -6.1471, -1.7680453, -6.5721383, 17.87506, -5.1192183, 10.742863, 1.4536934, 19.693445, 19.45783, 5.063163, 0.5234792}; - memcpy(in_t->Data(), in, sizeof(float) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), in, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - Tensor *weight_t = new Tensor(kNumberTypeFloat, {3, 8}, schema::Format_NHWC, static_cast(1)); + Tensor *weight_t = new Tensor(kNumberTypeFloat, {3, 8}, schema::Format_NHWC, lite::Tensor::Category::CONST); weight_t->MallocData(); float weight[] = {-0.0024438887, 0.0006738146, -0.008169129, 0.0021510671, -0.012470592, -0.0053063435, 0.006050155, 0.008656233, 0.012911413, -0.0028635843, -0.00034080597, -0.0010622552, -0.012254699, -0.01312836, 0.0025241964, -0.004706142, 0.002451482, -0.009558459, 0.004481974, 0.0033251503, -0.011705584, -0.001720293, -0.0039410214, -0.0073637343}; - memcpy(weight_t->Data(), weight, sizeof(float) * weight_t->ElementsNum()); + memcpy(weight_t->MutableData(), weight, sizeof(float) * weight_t->ElementsNum()); inputs_->push_back(weight_t); - Tensor *bias_t = new Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, static_cast(1)); + Tensor *bias_t = new Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, lite::Tensor::Category::CONST); bias_t->MallocData(); float bias[] = {1.6103756, -0.9872417, 0.546849}; - memcpy(bias_t->Data(), bias, sizeof(float) * bias_t->ElementsNum()); + memcpy(bias_t->MutableData(), bias, sizeof(float) * bias_t->ElementsNum()); inputs_->push_back(bias_t); - Tensor *out_t = new Tensor(kNumberTypeFloat, {2, 3}, schema::Format_NHWC, static_cast(1)); + Tensor *out_t = new Tensor(kNumberTypeFloat, {2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); outputs_->push_back(out_t); @@ -71,8 +71,8 @@ int FcTestInit1(std::vector *inputs_, std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto matmul_param = new MatMulParameter(); float *correct; int total_size = FcTestInit1(&inputs_, &outputs_, matmul_param, &correct); @@ -83,35 +83,35 @@ TEST_F(TestFcFp32, FcTest1) { fc->Init(); fc->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); } -int FcTestInit2(std::vector *inputs_, std::vector *outputs_, +int FcTestInit2(std::vector *inputs_, std::vector *outputs_, MatMulParameter *matmal_param, float **correct) { size_t buffer_size; - Tensor *in_t = new Tensor(kNumberTypeFloat, {20, 4, 2, 10}, schema::Format_NCHW, static_cast(1)); + Tensor *in_t = new Tensor(kNumberTypeFloat, {20, 4, 2, 10}, schema::Format_NCHW, lite::Tensor::Category::CONST); in_t->MallocData(); std::string in_path = "./matmul/FcFp32_input1.bin"; auto in_data = mindspore::lite::ReadFile(in_path.c_str(), &buffer_size); - memcpy(in_t->Data(), in_data, buffer_size); + memcpy(in_t->MutableData(), in_data, buffer_size); inputs_->push_back(in_t); - Tensor *weight_t = new Tensor(kNumberTypeFloat, {30, 80}, schema::Format_NCHW, static_cast(1)); + Tensor *weight_t = new Tensor(kNumberTypeFloat, {30, 80}, schema::Format_NCHW, lite::Tensor::Category::CONST); weight_t->MallocData(); std::string weight_path = "./matmul/FcFp32_weight1.bin"; auto w_data = mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size); - memcpy(weight_t->Data(), w_data, buffer_size); + memcpy(weight_t->MutableData(), w_data, buffer_size); inputs_->push_back(weight_t); - Tensor *bias_t = new Tensor(kNumberTypeFloat, {30}, schema::Format_NCHW, static_cast(1)); + Tensor *bias_t = new Tensor(kNumberTypeFloat, {30}, schema::Format_NCHW, lite::Tensor::Category::CONST); bias_t->MallocData(); std::string bias_path = "./matmul/FcFp32_bias1.bin"; auto bias_data = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size); - memcpy(bias_t->Data(), bias_data, buffer_size); + memcpy(bias_t->MutableData(), bias_data, buffer_size); inputs_->push_back(bias_t); - Tensor *out_t = new Tensor(kNumberTypeFloat, {20, 30}, schema::Format_NCHW, static_cast(1)); + Tensor *out_t = new Tensor(kNumberTypeFloat, {20, 30}, schema::Format_NCHW, lite::Tensor::Category::CONST); out_t->MallocData(); outputs_->push_back(out_t); @@ -128,8 +128,8 @@ int FcTestInit2(std::vector *inputs_, std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto matmul_param = new MatMulParameter(); float *correct; int total_size = FcTestInit2(&inputs_, &outputs_, matmul_param, &correct); @@ -140,6 +140,6 @@ TEST_F(TestFcFp32, FcTest2) { fc->Init(); fc->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc index 108815e73e..32ee1a425e 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc @@ -34,27 +34,27 @@ void InitLstmParam(LstmParameter *lstm_param) { lstm_param->bidirectional_ = false; } -void InitLstmForwardCreator(std::vector *inputs, std::vector *outputs, +void InitLstmForwardCreator(std::vector *inputs, std::vector *outputs, const LstmParameter *lstm_param) { // prepare input std::vector input_data = {1.3889, -0.3006, -0.1787, 2.1504, -0.3181, 0.4945, -0.4758, -0.8187}; - auto *input = new lite::tensor::Tensor; + auto *input = new lite::Tensor; input->set_data_type(kNumberTypeFloat32); input->set_shape({lstm_param->seq_len_, lstm_param->batch_, lstm_param->input_size_}); input->MallocData(); - memcpy(input->Data(), input_data.data(), input_data.size() * sizeof(float)); + memcpy(input->MutableData(), input_data.data(), input_data.size() * sizeof(float)); // prepare weight_i std::vector weight_i_data = {0.21368974, -0.3778776, 0.05025542, 0.09011161, 0.18355745, 0.5491228, -0.14186832, -0.4655916, 0.49541366, -0.44039622, 0.5625571, 0.23325664, 0.3449825, -0.42750397, 0.01911497, -0.4125802, -0.56690466, 0.50593233, -0.29129684, -0.27841482, 0.01964372, -0.42543447, 0.41720617, -0.30054367}; - auto *weight_i = new lite::tensor::Tensor; + auto *weight_i = new lite::Tensor; weight_i->set_data_type(kNumberTypeFloat32); weight_i->SetFormat(schema::Format_NHWC); weight_i->set_shape({1, lstm_param->hidden_size_ * 4, lstm_param->input_size_}); weight_i->MallocData(); - memcpy(weight_i->Data(), weight_i_data.data(), weight_i_data.size() * sizeof(float)); + memcpy(weight_i->MutableData(), weight_i_data.data(), weight_i_data.size() * sizeof(float)); // prepare weight_r std::vector weight_h_data = { @@ -62,33 +62,33 @@ void InitLstmForwardCreator(std::vector *inputs, std::ve 0.09828371, 0.33358777, 0.53381383, -0.39431244, -0.06005383, -0.3520246, 0.42687547, 0.5772828, 0.5380008, -0.16130409, -0.24737108, 0.42409766, -0.50648475, 0.48223662, -0.5221103, -0.49216837, -0.29084128, 0.3408438, 0.34080023, 0.49467337, 0.23473483, 0.01759732, 0.04691631, 0.45574808, -0.29481018, 0.29442167, -0.36718}; - auto *weight_h = new lite::tensor::Tensor; + auto *weight_h = new lite::Tensor; weight_h->set_data_type(kNumberTypeFloat32); weight_h->SetFormat(schema::Format_NHWC); weight_h->set_shape({1, lstm_param->hidden_size_ * 4, lstm_param->hidden_size_}); weight_h->MallocData(); - memcpy(weight_h->Data(), weight_h_data.data(), weight_h_data.size() * sizeof(float)); + memcpy(weight_h->MutableData(), weight_h_data.data(), weight_h_data.size() * sizeof(float)); // prepare bias std::vector bias_data = {-0.00207639, 0.16391152, -0.00069344, -0.32945693, -0.367423, 0.28301108, -0.17930457, 0.5278388, 0.12598747, -0.53130764, 0.1479364, 0.16695255, -0.00708795, -0.46417096, -0.23966661, -0.17496741, -0.19166365, -0.50466555, -0.23593256, -0.3911457, 0.51128435, 0.5128727, 0.253451, -0.51891875}; - auto *bias = new lite::tensor::Tensor; + auto *bias = new lite::Tensor; bias->set_data_type(kNumberTypeFloat32); bias->SetFormat(schema::Format_NHWC); bias->set_shape({1, lstm_param->hidden_size_ * 4 * 2}); bias->MallocData(); - memcpy(bias->Data(), bias_data.data(), bias_data.size() * sizeof(float)); + memcpy(bias->MutableData(), bias_data.data(), bias_data.size() * sizeof(float)); // prepare state std::vector state_data = {0, 0, 0}; - auto *state = new lite::tensor::Tensor; + auto *state = new lite::Tensor; state->set_data_type(kNumberTypeFloat32); state->SetFormat(schema::Format_NHWC); state->set_shape({1, lstm_param->batch_, lstm_param->hidden_size_}); state->MallocData(); - memcpy(state->Data(), state_data.data(), state_data.size() * sizeof(float)); + memcpy(state->MutableData(), state_data.data(), state_data.size() * sizeof(float)); inputs->push_back(input); inputs->push_back(weight_i); @@ -98,39 +98,40 @@ void InitLstmForwardCreator(std::vector *inputs, std::ve inputs->push_back(state); // malloc output buffer, for arm cpu, format: N C4 H W 4 - auto *output = new lite::tensor::Tensor; + auto *output = new lite::Tensor; output->set_data_type(kNumberTypeFloat32); output->set_shape({lstm_param->seq_len_, lstm_param->batch_, lstm_param->hidden_size_}); output->SetFormat(schema::Format_NHWC); output->MallocData(); - memset(output->Data(), 0, output->ElementsNum() * sizeof(float)); + memset(output->MutableData(), 0, output->ElementsNum() * sizeof(float)); - auto *cell_state = new lite::tensor::Tensor; + auto *cell_state = new lite::Tensor; cell_state->set_data_type(kNumberTypeFloat32); cell_state->set_shape({1, lstm_param->batch_, lstm_param->hidden_size_}); cell_state->SetFormat(schema::Format_NHWC); cell_state->MallocData(); - memset(cell_state->Data(), 0, cell_state->ElementsNum() * sizeof(float)); + memset(cell_state->MutableData(), 0, cell_state->ElementsNum() * sizeof(float)); - auto *hidden_state = new lite::tensor::Tensor; + auto *hidden_state = new lite::Tensor; hidden_state->set_data_type(kNumberTypeFloat32); hidden_state->set_shape({1, lstm_param->batch_, lstm_param->hidden_size_}); hidden_state->SetFormat(schema::Format_NHWC); hidden_state->MallocData(); - memset(hidden_state->Data(), 0, hidden_state->ElementsNum() * sizeof(float)); + memset(hidden_state->MutableData(), 0, hidden_state->ElementsNum() * sizeof(float)); outputs->push_back(output); outputs->push_back(cell_state); outputs->push_back(hidden_state); } -void CompareOutput(lite::tensor::Tensor *output, std::vector data) { +void CompareOutput(lite::Tensor *output, std::vector data) { for (int i = 0; i < output->ElementsNum(); i++) { - std::cout << reinterpret_cast(output->Data())[i] << ", "; + std::cout << reinterpret_cast(output->MutableData())[i] << ", "; } std::cout << std::endl; - CommonTest::CompareOutputData(reinterpret_cast(output->Data()), data.data(), output->ElementsNum(), 0.0001); + CommonTest::CompareOutputData(reinterpret_cast(output->MutableData()), data.data(), output->ElementsNum(), + 0.0001); } TEST_F(LstmFp32, LstmForwardFp32Accuracy) { @@ -143,16 +144,16 @@ TEST_F(LstmFp32, LstmForwardFp32Accuracy) { ctx->thread_num_ = 1; // init tensor - std::vector inputs; - std::vector outputs; + std::vector inputs; + std::vector outputs; InitLstmForwardCreator(&inputs, &outputs, lstm_param); // register op kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, mindspore::schema::PrimitiveType_Lstm}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - kernel::LiteKernel *kernel = creator(inputs, outputs, reinterpret_cast(lstm_param), ctx, desc, - nullptr); + kernel::LiteKernel *kernel = + creator(inputs, outputs, reinterpret_cast(lstm_param), ctx, desc, nullptr); ASSERT_NE(kernel, nullptr); // op run kernel->Run(); @@ -179,15 +180,15 @@ TEST_F(LstmFp32, LstmForwardFp32Accuracy) { MS_LOG(INFO) << "LstmFp32 forward accuracy passed"; } -void InitLstmBackwardCreator(std::vector *inputs, std::vector *outputs, +void InitLstmBackwardCreator(std::vector *inputs, std::vector *outputs, const LstmParameter *lstm_param) { // prepare input std::vector input_data = {1.4305, 0.5342, -0.9221, 0.0527, 2.3770, -0.3697, -0.2833, -2.1285}; - auto *input = new lite::tensor::Tensor; + auto *input = new lite::Tensor; input->set_data_type(kNumberTypeFloat32); input->set_shape({lstm_param->seq_len_, lstm_param->batch_, lstm_param->input_size_}); input->MallocData(); - memcpy(input->Data(), input_data.data(), input_data.size() * sizeof(float)); + memcpy(input->MutableData(), input_data.data(), input_data.size() * sizeof(float)); // prepare weight_i std::vector weight_i_data = { @@ -197,12 +198,12 @@ void InitLstmBackwardCreator(std::vector *inputs, std::v 0.5512280, -0.2763766, -0.3593936, -0.5181975, 0.3469863, -0.38533931, 0.010202527, -0.46598294, -0.5740513, 0.06127524, -0.03960543, 0.2478809, -0.17296993, 0.19159525, -0.4976995, 0.05985528, 0.3653409, 0.386924, 0.3170289, -0.08830952, -0.31105759, 0.3110240, 0.15174299, 0.287579894}; - auto *weight_i = new lite::tensor::Tensor; + auto *weight_i = new lite::Tensor; weight_i->set_data_type(kNumberTypeFloat32); weight_i->SetFormat(schema::Format_NHWC); weight_i->set_shape({2, lstm_param->hidden_size_ * 4, lstm_param->input_size_}); weight_i->MallocData(); - memcpy(weight_i->Data(), weight_i_data.data(), weight_i_data.size() * sizeof(float)); + memcpy(weight_i->MutableData(), weight_i_data.data(), weight_i_data.size() * sizeof(float)); // prepare weight_r std::vector weight_h_data = { @@ -215,12 +216,12 @@ void InitLstmBackwardCreator(std::vector *inputs, std::v 0.0121276974, -0.53553336, 0.121099889, 0.060554087, 0.46259057, -0.49666053, 0.090806663, 0.20542401, -0.38674920, -0.23874849, -0.5222138, 0.57537007, 0.113343358, -0.35233467, -0.25532332, 0.159506142, 0.35996592, -0.201961308, -0.16323345, 0.119177639, -0.12677872, -0.175229549, -0.160024613, -0.21058899}; - auto *weight_h = new lite::tensor::Tensor; + auto *weight_h = new lite::Tensor; weight_h->set_data_type(kNumberTypeFloat32); weight_h->SetFormat(schema::Format_NHWC); weight_h->set_shape({2, lstm_param->hidden_size_ * 4, lstm_param->hidden_size_}); weight_h->MallocData(); - memcpy(weight_h->Data(), weight_h_data.data(), weight_h_data.size() * sizeof(float)); + memcpy(weight_h->MutableData(), weight_h_data.data(), weight_h_data.size() * sizeof(float)); // prepare bias std::vector bias_data = { @@ -230,21 +231,21 @@ void InitLstmBackwardCreator(std::vector *inputs, std::v 0.48683023, 0.282384872, 0.13399660, -0.382526844, -0.23370727, -0.184681564, 0.45679104, -0.339453905, 0.452010273, 0.0552094578, 0.328843057, 0.127738714, -0.127084732, -0.334061294, -0.46742400, -0.401568055, 0.23712641, -0.052937567, 0.272351622, 0.42767739, 0.303884744, -0.46025499, -0.43985402, 0.256422877}; - auto *bias = new lite::tensor::Tensor; + auto *bias = new lite::Tensor; bias->set_data_type(kNumberTypeFloat32); bias->SetFormat(schema::Format_NHWC); bias->set_shape({2, lstm_param->hidden_size_ * 4 * 2}); bias->MallocData(); - memcpy(bias->Data(), bias_data.data(), bias_data.size() * sizeof(float)); + memcpy(bias->MutableData(), bias_data.data(), bias_data.size() * sizeof(float)); // prepare state std::vector state_data = {0, 0, 0, 0, 0, 0}; - auto *state = new lite::tensor::Tensor; + auto *state = new lite::Tensor; state->set_data_type(kNumberTypeFloat32); state->SetFormat(schema::Format_NHWC); state->set_shape({2, lstm_param->batch_, lstm_param->hidden_size_}); state->MallocData(); - memcpy(state->Data(), state_data.data(), state_data.size() * sizeof(float)); + memcpy(state->MutableData(), state_data.data(), state_data.size() * sizeof(float)); inputs->push_back(input); inputs->push_back(weight_i); @@ -254,26 +255,26 @@ void InitLstmBackwardCreator(std::vector *inputs, std::v inputs->push_back(state); // malloc output buffer, for arm cpu, format: N C4 H W 4 - auto *output = new lite::tensor::Tensor; + auto *output = new lite::Tensor; output->set_data_type(kNumberTypeFloat32); output->set_shape({lstm_param->seq_len_, 2, lstm_param->batch_, lstm_param->hidden_size_}); output->SetFormat(schema::Format_NHWC); output->MallocData(); - memset(output->Data(), 0, output->ElementsNum() * sizeof(float)); + memset(output->MutableData(), 0, output->ElementsNum() * sizeof(float)); - auto *cell_state = new lite::tensor::Tensor; + auto *cell_state = new lite::Tensor; cell_state->set_data_type(kNumberTypeFloat32); cell_state->set_shape({2, lstm_param->batch_, lstm_param->hidden_size_}); cell_state->SetFormat(schema::Format_NHWC); cell_state->MallocData(); - memset(cell_state->Data(), 0, cell_state->ElementsNum() * sizeof(float)); + memset(cell_state->MutableData(), 0, cell_state->ElementsNum() * sizeof(float)); - auto *hidden_state = new lite::tensor::Tensor; + auto *hidden_state = new lite::Tensor; hidden_state->set_data_type(kNumberTypeFloat32); hidden_state->set_shape({2, lstm_param->batch_, lstm_param->hidden_size_}); hidden_state->SetFormat(schema::Format_NHWC); hidden_state->MallocData(); - memset(hidden_state->Data(), 0, hidden_state->ElementsNum() * sizeof(float)); + memset(hidden_state->MutableData(), 0, hidden_state->ElementsNum() * sizeof(float)); outputs->push_back(output); outputs->push_back(cell_state); @@ -291,16 +292,16 @@ TEST_F(LstmFp32, LstmBackwardFp32Accuracy) { ctx->thread_num_ = 1; // init tensor - std::vector inputs; - std::vector outputs; + std::vector inputs; + std::vector outputs; InitLstmBackwardCreator(&inputs, &outputs, lstm_param); // register op kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, mindspore::schema::PrimitiveType_Lstm}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - kernel::LiteKernel *kernel = creator(inputs, outputs, reinterpret_cast(lstm_param), ctx, desc, - nullptr); + kernel::LiteKernel *kernel = + creator(inputs, outputs, reinterpret_cast(lstm_param), ctx, desc, nullptr); ASSERT_NE(kernel, nullptr); // op run kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc index d9c2d011d6..444c5ce9db 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc @@ -179,52 +179,44 @@ TEST_F(TestMatMulFp32, Row8x82RowTest4) { CompareOutputData(out, co, 64, 0.0001); } -int MMTestInit(std::vector *inputs_, std::vector *outputs_, - float *a_ptr, float *b_ptr, std::vector a_shape, std::vector b_shape, - std::vector c_shape) { - auto in_t = - new lite::tensor::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, static_cast(1)); +int MMTestInit(std::vector *inputs_, std::vector *outputs_, float *a_ptr, float *b_ptr, + std::vector a_shape, std::vector b_shape, std::vector c_shape) { + auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t->MallocData(); - memcpy(in_t->Data(), a_ptr, sizeof(float) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto weight_t = - new lite::tensor::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, static_cast(1)); + auto weight_t = new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); weight_t->MallocData(); - memcpy(weight_t->Data(), b_ptr, sizeof(float) * weight_t->ElementsNum()); + memcpy(weight_t->MutableData(), b_ptr, sizeof(float) * weight_t->ElementsNum()); inputs_->push_back(weight_t); - auto out_t = - new lite::tensor::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, static_cast(1)); + auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); outputs_->push_back(out_t); return out_t->ElementsNum(); } -int MMTestInit2(std::vector *inputs_, std::vector *outputs_, - float *a_ptr, float *b_ptr, float *bias_ptr, std::vector a_shape, std::vector b_shape, - std::vector bias_shape, std::vector c_shape) { - auto in_t = - new lite::tensor::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, static_cast(1)); +int MMTestInit2(std::vector *inputs_, std::vector *outputs_, float *a_ptr, float *b_ptr, + float *bias_ptr, std::vector a_shape, std::vector b_shape, std::vector bias_shape, + std::vector c_shape) { + auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t->MallocData(); - memcpy(in_t->Data(), a_ptr, sizeof(float) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto weight_t = - new lite::tensor::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, static_cast(1)); + auto weight_t = new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); weight_t->MallocData(); - memcpy(weight_t->Data(), b_ptr, sizeof(float) * weight_t->ElementsNum()); + memcpy(weight_t->MutableData(), b_ptr, sizeof(float) * weight_t->ElementsNum()); inputs_->push_back(weight_t); - auto bias_t = - new lite::tensor::Tensor(kNumberTypeFloat, bias_shape, schema::Format_NHWC, static_cast(1)); + auto bias_t = new lite::Tensor(kNumberTypeFloat, bias_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); bias_t->MallocData(); - memcpy(bias_t->Data(), bias_ptr, sizeof(float) * bias_t->ElementsNum()); + memcpy(bias_t->MutableData(), bias_ptr, sizeof(float) * bias_t->ElementsNum()); inputs_->push_back(bias_t); - auto out_t = - new lite::tensor::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, static_cast(1)); + auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); outputs_->push_back(out_t); @@ -232,8 +224,8 @@ int MMTestInit2(std::vector *inputs_, std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto matmul_param = new MatMulParameter(); matmul_param->a_transpose_ = false; matmul_param->b_transpose_ = false; @@ -255,15 +247,15 @@ TEST_F(TestMatMulFp32, simple) { mm->Run(); float correct[] = {-0.1256939023733139, -0.07744802534580231, 0.07410638779401779, -0.3049793541431427, -0.027687929570674896, -0.18109679222106934}; - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete mm; for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; } TEST_F(TestMatMulFp32, simple_bias) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto matmul_param = new MatMulParameter(); matmul_param->a_transpose_ = false; matmul_param->b_transpose_ = false; @@ -287,15 +279,15 @@ TEST_F(TestMatMulFp32, simple_bias) { mm->Run(); float correct[] = {-0.1256939023733139 + 1, -0.07744802534580231 + 2, 0.07410638779401779 + 3, -0.3049793541431427 + 1, -0.027687929570674896 + 2, -0.18109679222106934 + 3}; - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete mm; for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; } TEST_F(TestMatMulFp32, simple2) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto matmul_param = new MatMulParameter(); matmul_param->a_transpose_ = false; matmul_param->b_transpose_ = false; @@ -377,15 +369,15 @@ TEST_F(TestMatMulFp32, simple2) { 346, 486, 451, 451, 490, 475, 339, 319, 409, 315, 324, 367, 493, 286, 348, 185, 240, 287, 214, 312, 265, 237, 218, 261, 316, 279, 186, 377, 319, 279, 304, 281, 207, 261, 209, 287, 270, 415, 378, 312, 388, 423, 273, 230, 294, 239, 243, 319, 346}; - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete mm; for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; } TEST_F(TestMatMulFp32, simple_transb) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto matmul_param = new MatMulParameter(); matmul_param->a_transpose_ = false; matmul_param->b_transpose_ = true; @@ -406,15 +398,15 @@ TEST_F(TestMatMulFp32, simple_transb) { mm->Init(); mm->Run(); float correct[] = {0.00533547, 0.002545945, 0.062974121, -0.445441471, -0.246223617, -0.142070031}; - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete mm; for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; } TEST_F(TestMatMulFp32, batch) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto matmul_param = new MatMulParameter(); matmul_param->a_transpose_ = false; matmul_param->b_transpose_ = true; @@ -459,7 +451,7 @@ TEST_F(TestMatMulFp32, batch) { -17.63555145263672, -8.490625381469727, 5.317771911621094, -14.561882019042969, -7.251564025878906, -2.508212089538574, 5.86458683013916, -3.466249465942383, 8.869029998779297, 25.034008026123047}; - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete mm; for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc index 1e53abf055..1ec026b87d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc @@ -25,39 +25,33 @@ class TestPowerFp32 : public mindspore::CommonTest { TestPowerFp32() {} }; -int PowerTestInit(std::vector *inputs_, std::vector *outputs_, - float *a_ptr, float *b_ptr, std::vector a_shape, std::vector b_shape, - std::vector c_shape) { - auto in_t = - new lite::tensor::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, static_cast(1)); +int PowerTestInit(std::vector *inputs_, std::vector *outputs_, float *a_ptr, + float *b_ptr, std::vector a_shape, std::vector b_shape, std::vector c_shape) { + auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t->MallocData(); - memcpy(in_t->Data(), a_ptr, sizeof(float) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto weight_t = - new lite::tensor::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, static_cast(1)); + auto weight_t = new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); weight_t->MallocData(); - memcpy(weight_t->Data(), b_ptr, sizeof(float) * weight_t->ElementsNum()); + memcpy(weight_t->MutableData(), b_ptr, sizeof(float) * weight_t->ElementsNum()); inputs_->push_back(weight_t); - auto out_t = - new lite::tensor::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, static_cast(1)); + auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); outputs_->push_back(out_t); return out_t->ElementsNum(); } -int PowerTestInit2(std::vector *inputs_, std::vector *outputs_, - float *a_ptr, std::vector a_shape, std::vector c_shape) { - auto in_t = - new lite::tensor::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, static_cast(1)); +int PowerTestInit2(std::vector *inputs_, std::vector *outputs_, float *a_ptr, + std::vector a_shape, std::vector c_shape) { + auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t->MallocData(); - memcpy(in_t->Data(), a_ptr, sizeof(float) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto out_t = - new lite::tensor::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, static_cast(1)); + auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); outputs_->push_back(out_t); @@ -65,8 +59,8 @@ int PowerTestInit2(std::vector *inputs_, std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto param = new PowerParameter(); param->scale_ = 1; param->shift_ = 0; @@ -83,16 +77,16 @@ TEST_F(TestPowerFp32, Simple) { op->Init(); op->Run(); float correct[] = {1, 64, 2187, 65536}; - float *output = reinterpret_cast(outputs_[0]->Data()); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + float *output = reinterpret_cast(outputs_[0]->MutableData()); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete op; for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; } TEST_F(TestPowerFp32, Broadcast) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto param = new PowerParameter(); param->power_ = 2; param->scale_ = 1; @@ -108,7 +102,7 @@ TEST_F(TestPowerFp32, Broadcast) { op->Init(); op->Run(); float correct[] = {1, 4, 9, 16}; - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete op; for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc index c252afd199..e8235408a2 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc @@ -15,7 +15,7 @@ */ #include #include "mindspore/lite/src/lite_kernel.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" #include "common/common_test.h" #include "nnacl/resize_parameter.h" #include "mindspore/lite/src/kernel_registry.h" @@ -34,10 +34,10 @@ class TestResizeBilinearFp32 : public mindspore::CommonTest { public: float err_tol = 1e-5; - lite::tensor::Tensor in_tensor_; - lite::tensor::Tensor out_tensor_; - std::vector inputs_{&in_tensor_}; - std::vector outputs_{&out_tensor_}; + lite::Tensor in_tensor_; + lite::Tensor out_tensor_; + std::vector inputs_{&in_tensor_}; + std::vector outputs_{&out_tensor_}; ResizeParameter param_ = {{}}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Resize}; lite::Context ctx_ = lite::Context(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc index b1fae684ed..2b6b983dcd 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc @@ -30,10 +30,10 @@ class TestResizeNearestNeighborFp32 : public mindspore::CommonTest { public: float err_tol = 1e-5; - lite::tensor::Tensor in_tensor_; - lite::tensor::Tensor out_tensor_; - std::vector inputs_{&in_tensor_}; - std::vector outputs_{&out_tensor_}; + lite::Tensor in_tensor_; + lite::Tensor out_tensor_; + std::vector inputs_{&in_tensor_}; + std::vector outputs_{&out_tensor_}; ResizeParameter param_ = {{}}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Resize}; lite::Context ctx_ = lite::Context(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc index a948947f5c..3f2e4512af 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc @@ -27,9 +27,9 @@ class TestReverseSequenceFp32 : public mindspore::CommonTest { }; TEST_F(TestReverseSequenceFp32, BatchLessSeq) { - lite::tensor::Tensor in_tensor0(kNumberTypeFloat32, {2, 3, 4, 2}); - lite::tensor::Tensor in_tensor1(kNumberTypeInt32, {3}); - lite::tensor::Tensor out_tensor(kNumberTypeFloat32, {2, 3, 4, 2}); + lite::Tensor in_tensor0(kNumberTypeFloat32, {2, 3, 4, 2}); + lite::Tensor in_tensor1(kNumberTypeInt32, {3}); + lite::Tensor out_tensor(kNumberTypeFloat32, {2, 3, 4, 2}); float input_data0[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47}; @@ -38,8 +38,8 @@ TEST_F(TestReverseSequenceFp32, BatchLessSeq) { in_tensor0.SetData(input_data0); in_tensor1.SetData(input_data1); out_tensor.SetData(output_data); - std::vector inputs = {&in_tensor0, &in_tensor1}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor0, &in_tensor1}; + std::vector outputs = {&out_tensor}; ReverseSequenceParameter parameter = {0}; parameter.batch_axis_ = 1; @@ -70,9 +70,9 @@ TEST_F(TestReverseSequenceFp32, BatchLessSeq) { } TEST_F(TestReverseSequenceFp32, BatchGreaterSeq) { - lite::tensor::Tensor in_tensor0(kNumberTypeFloat32, {2, 3, 4, 2}); - lite::tensor::Tensor in_tensor1(kNumberTypeInt32, {4}); - lite::tensor::Tensor out_tensor(kNumberTypeFloat32, {2, 3, 4, 2}); + lite::Tensor in_tensor0(kNumberTypeFloat32, {2, 3, 4, 2}); + lite::Tensor in_tensor1(kNumberTypeInt32, {4}); + lite::Tensor out_tensor(kNumberTypeFloat32, {2, 3, 4, 2}); float input_data0[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47}; @@ -81,8 +81,8 @@ TEST_F(TestReverseSequenceFp32, BatchGreaterSeq) { in_tensor0.SetData(input_data0); in_tensor1.SetData(input_data1); out_tensor.SetData(output_data); - std::vector inputs = {&in_tensor0, &in_tensor1}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor0, &in_tensor1}; + std::vector outputs = {&out_tensor}; ReverseSequenceParameter parameter = {0}; parameter.batch_axis_ = 2; @@ -113,9 +113,9 @@ TEST_F(TestReverseSequenceFp32, BatchGreaterSeq) { } TEST_F(TestReverseSequenceFp32, BatchSeqNotAdjacent) { - lite::tensor::Tensor in_tensor0(kNumberTypeFloat32, {2, 3, 4, 2}); - lite::tensor::Tensor in_tensor1(kNumberTypeInt32, {2}); - lite::tensor::Tensor out_tensor(kNumberTypeFloat32, {2, 3, 4, 2}); + lite::Tensor in_tensor0(kNumberTypeFloat32, {2, 3, 4, 2}); + lite::Tensor in_tensor1(kNumberTypeInt32, {2}); + lite::Tensor out_tensor(kNumberTypeFloat32, {2, 3, 4, 2}); float input_data0[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47}; @@ -124,8 +124,8 @@ TEST_F(TestReverseSequenceFp32, BatchSeqNotAdjacent) { in_tensor0.SetData(input_data0); in_tensor1.SetData(input_data1); out_tensor.SetData(output_data); - std::vector inputs = {&in_tensor0, &in_tensor1}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor0, &in_tensor1}; + std::vector outputs = {&out_tensor}; ReverseSequenceParameter parameter = {0}; parameter.batch_axis_ = 0; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc index 44256f12c4..de7af7555f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc @@ -25,23 +25,19 @@ class TestROIPoolingFp32 : public mindspore::CommonTest { TestROIPoolingFp32() {} }; -int ROIPoolingTestInit(std::vector *inputs_, std::vector *outputs_, - float *a_ptr, float *b_ptr, std::vector a_shape, std::vector b_shape, - std::vector c_shape) { - auto in_t = - new lite::tensor::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, static_cast(1)); +int ROIPoolingTestInit(std::vector *inputs_, std::vector *outputs_, float *a_ptr, + float *b_ptr, std::vector a_shape, std::vector b_shape, std::vector c_shape) { + auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); in_t->MallocData(); - memcpy(in_t->Data(), a_ptr, sizeof(float) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto roi_t = - new lite::tensor::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, static_cast(1)); + auto roi_t = new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); roi_t->MallocData(); - memcpy(roi_t->Data(), b_ptr, sizeof(float) * roi_t->ElementsNum()); + memcpy(roi_t->MutableData(), b_ptr, sizeof(float) * roi_t->ElementsNum()); inputs_->push_back(roi_t); - auto out_t = - new lite::tensor::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, static_cast(1)); + auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); outputs_->push_back(out_t); @@ -49,8 +45,8 @@ int ROIPoolingTestInit(std::vector *inputs_, std::vector } TEST_F(TestROIPoolingFp32, Simple) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto param = new ROIPoolingParameter(); param->scale_ = 1; param->pooledW_ = 2; @@ -69,10 +65,10 @@ TEST_F(TestROIPoolingFp32, Simple) { op->Init(); op->Run(); float correct[] = {25, 31, 34, 35, 25, 31, 34, 35}; - float *output = reinterpret_cast(outputs_[0]->Data()); + float *output = reinterpret_cast(outputs_[0]->MutableData()); for (int i = 0; i < 8; ++i) printf("%f ", output[i]); printf("\n"); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0.0001); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0.0001); delete op; for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc index 59eed62a15..dbacfd1d72 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc @@ -29,11 +29,9 @@ class SpaceToBatchTestFp32 : public mindspore::CommonTest { }; TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest4) { - std::vector input = {1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 16}; + std::vector input = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; const size_t kOutSize = 16; - std::vector expect_out = {1, 2, 3, 4, 9, 10, 11, 12, - 5, 6, 7, 8, 13, 14, 15, 16}; + std::vector expect_out = {1, 2, 3, 4, 9, 10, 11, 12, 5, 6, 7, 8, 13, 14, 15, 16}; float out[kOutSize]; std::vector in_shape = {1, 4, 4, 1}; std::vector out_shape = {2, 2, 4, 1}; @@ -49,11 +47,9 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest4) { } TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest5) { - std::vector input = {1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 16}; + std::vector input = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; size_t kOutSize = 16; - std::vector expect_out = {1, 3, 5, 7, 9, 11, 13, 15, - 2, 4, 6, 8, 10, 12, 14, 16}; + std::vector expect_out = {1, 3, 5, 7, 9, 11, 13, 15, 2, 4, 6, 8, 10, 12, 14, 16}; float out[kOutSize]; std::vector in_shape = {1, 4, 4, 1}; std::vector out_shape = {2, 4, 2, 1}; @@ -69,11 +65,9 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest5) { } TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest6) { - std::vector input = {1, 2, 3, 4, 5, 6, 7, 8, - 9, 10, 11, 12, 13, 14, 15, 16}; + std::vector input = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; size_t kOutSize = 16; - std::vector expect_out = {1, 3, 9, 11, 2, 4, 10, 12, - 5, 7, 13, 15, 6, 8, 14, 16}; + std::vector expect_out = {1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16}; float out[kOutSize]; std::vector in_shape = {1, 4, 4, 1}; std::vector out_shape = {4, 2, 2, 1}; @@ -89,19 +83,13 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest6) { } TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest7) { - std::vector input = {1, 11, 2, 12, 3, 13, 4, 14, - 5, 15, 6, 16, 7, 17, 8, 18, - 9, 19, 10, 110, 11, 111, 12, 112, - 10, 11, 20, 12, 30, 13, 40, 14, - 50, 15, 60, 16, 70, 17, 80, 18, - 13, 113, 14, 114, 15, 115, 16, 116}; + std::vector input = {1, 11, 2, 12, 3, 13, 4, 14, 5, 15, 6, 16, 7, 17, 8, 18, + 9, 19, 10, 110, 11, 111, 12, 112, 10, 11, 20, 12, 30, 13, 40, 14, + 50, 15, 60, 16, 70, 17, 80, 18, 13, 113, 14, 114, 15, 115, 16, 116}; size_t kOutSize = 48; - std::vector expect_out = {1, 11, 3, 13, 9, 19, 11, 111, - 50, 15, 70, 17, 2, 12, 4, 14, - 10, 110, 12, 112, 60, 16, 80, 18, - 5, 15, 7, 17, 10, 11, 30, 13, - 13, 113, 15, 115, 6, 16, 8, 18, - 20, 12, 40, 14, 14, 114, 16, 116}; + std::vector expect_out = {1, 11, 3, 13, 9, 19, 11, 111, 50, 15, 70, 17, 2, 12, 4, 14, + 10, 110, 12, 112, 60, 16, 80, 18, 5, 15, 7, 17, 10, 11, 30, 13, + 13, 113, 15, 115, 6, 16, 8, 18, 20, 12, 40, 14, 14, 114, 16, 116}; float out[kOutSize]; std::vector in_shape = {1, 6, 4, 2}; std::vector out_shape = {4, 3, 2, 2}; @@ -117,11 +105,11 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest7) { } TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest8) { - std::vector input = {1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, + std::vector input = {1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10, 11, -11, 12, -12, 13, -13, 14, -14, 15, -15, 16, -16}; - std::vector expect_out = {1, -1, 2, -2, 3, -3, 4, -4, 0, 0, 5, -5, 6, -6, 7, -7, 8, -8, 0, 0, - 9, -9, 10, -10, 11, -11, 12, -12, 0, 0, 13, -13, 14, -14, 15, -15, 16, -16, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + std::vector expect_out = {1, -1, 2, -2, 3, -3, 4, -4, 0, 0, 5, -5, 6, -6, 7, -7, 8, + -8, 0, 0, 9, -9, 10, -10, 11, -11, 12, -12, 0, 0, 13, -13, 14, -14, + 15, -15, 16, -16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; size_t kOutSize = 50; float out[kOutSize]; std::vector in_shape = {1, 4, 4, 2}; @@ -139,14 +127,12 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest8) { } TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest9) { - std::vector input = {1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, + std::vector input = {1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10, 11, -11, 12, -12, 13, -13, 14, -14, 15, -15, 16, -16}; - std::vector expect_out = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, -1, 2, -2, 3, -3, 4, -4, 0, 0, - 0, 0, 5, -5, 6, -6, 7, -7, 8, -8, 0, 0, - 0, 0, 9, -9, 10, -10, 11, -11, 12, -12, 0, 0, - 0, 0, 13, -13, 14, -14, 15, -15, 16, -16, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + std::vector expect_out = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 2, -2, + 3, -3, 4, -4, 0, 0, 0, 0, 5, -5, 6, -6, 7, -7, 8, -8, 0, 0, + 0, 0, 9, -9, 10, -10, 11, -11, 12, -12, 0, 0, 0, 0, 13, -13, 14, -14, + 15, -15, 16, -16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; size_t kOutSize = 72; float out[kOutSize]; std::vector in_shape = {1, 4, 4, 2}; @@ -164,25 +150,17 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest9) { } TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest10) { - std::vector input = {1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, + std::vector input = {1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8, 9, -9, 10, -10, 11, -11, 12, -12, 13, -13, 14, -14, 15, -15, 16, -16}; - std::vector expect_out = {0, 0, 0, 0, 0, 0, - 0, 0, 6, -6, 8, -8, - 0, 0, 14, -14, 16, -16, - 0, 0, 0, 0, 0, 0, - 5, -5, 7, -7, 0, 0, - 13, -13, 15, -15, 0, 0, - 0, 0, 2, -2, 4, -4, - 0, 0, 10, -10, 12, -12, - 0, 0, 0, 0, 0, 0, - 1, -1, 3, -3, 0, 0, - 9, -9, 11, -11, 0, 0, - 0, 0, 0, 0, 0, 0}; + std::vector expect_out = {0, 0, 0, 0, 0, 0, 0, 0, 6, -6, 8, -8, 0, 0, 14, -14, 16, -16, + 0, 0, 0, 0, 0, 0, 5, -5, 7, -7, 0, 0, 13, -13, 15, -15, 0, 0, + 0, 0, 2, -2, 4, -4, 0, 0, 10, -10, 12, -12, 0, 0, 0, 0, 0, 0, + 1, -1, 3, -3, 0, 0, 9, -9, 11, -11, 0, 0, 0, 0, 0, 0, 0, 0}; size_t kOutSize = 72; float out[kOutSize]; float pedding_out[kOutSize]; std::vector in_shape = {1, 4, 4, 2}; - std::vector pedding_out_shape = {1, 6, 6, 2};; + std::vector pedding_out_shape = {1, 6, 6, 2}; std::vector out_shape = {4, 3, 3, 2}; std::vector padding = {1, 1, 1, 1}; std::vector pedding_h(12, 0); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc index 0444d8bd3a..bf8afb1157 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc @@ -50,24 +50,24 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest1) { TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) { std::vector input = {1, 2, 5, 6, 10, 20, 3, 8, 18, 10, 3, 4, 11, 55, 15, 25}; std::vector in_shape = {1, 4, 4, 1}; - lite::tensor::Tensor input_tensor; + lite::Tensor input_tensor; input_tensor.SetData(input.data()); input_tensor.set_shape(in_shape); input_tensor.SetFormat(schema::Format_NHWC); input_tensor.set_data_type(kNumberTypeFloat32); - std::vector inputs_tensor; + std::vector inputs_tensor; inputs_tensor.push_back(&input_tensor); const int out_size = 16; float expect_out[16] = {1, 2, 10, 20, 5, 6, 3, 8, 18, 10, 11, 55, 3, 4, 15, 25}; std::vector output(16); std::vector out_shape = {1, 2, 2, 4}; - lite::tensor::Tensor output_tensor; + lite::Tensor output_tensor; output_tensor.SetData(output.data()); output_tensor.set_shape(out_shape); output_tensor.SetFormat(schema::Format_NHWC); output_tensor.set_data_type(kNumberTypeFloat32); - std::vector outputs_tensor; + std::vector outputs_tensor; outputs_tensor.push_back(&output_tensor); SpaceToDepthParameter op_param; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc index f8985a6ec1..5582a8b560 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc @@ -27,14 +27,14 @@ class TestTileFp32 : public mindspore::CommonTest { }; TEST_F(TestTileFp32, Tile) { - lite::tensor::Tensor in_tensor(kNumberTypeFloat32, {2, 2}); - lite::tensor::Tensor out_tensor(kNumberTypeFloat32, {4, 6}); + lite::Tensor in_tensor(kNumberTypeFloat32, {2, 2}); + lite::Tensor out_tensor(kNumberTypeFloat32, {4, 6}); float input_data[] = {1, 2, 3, 4}; float output_data[24] = {0}; in_tensor.SetData(input_data); out_tensor.SetData(output_data); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor}; TileParameter parameter = {0}; parameter.in_dim_ = 2; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc index 61123741e5..4bc5417e49 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc @@ -27,17 +27,17 @@ class TestTopKFp32 : public mindspore::CommonTest { }; TEST_F(TestTopKFp32, TopK) { - lite::tensor::Tensor in_tensor(kNumberTypeFloat32, {2, 2, 3}); - lite::tensor::Tensor out_tensor0(kNumberTypeFloat32, {2, 2, 2}); - lite::tensor::Tensor out_tensor1(kNumberTypeInt32, {2, 2, 2}); - float input_data[] = {1, 2, 3, 6, 5, 4, 9, 8, 7, 10, 12, 11}; + lite::Tensor in_tensor(kNumberTypeFloat32, {2, 2, 3}); + lite::Tensor out_tensor0(kNumberTypeFloat32, {2, 2, 2}); + lite::Tensor out_tensor1(kNumberTypeInt32, {2, 2, 2}); + float input_data[] = {1, 2, 3, 6, 5, 4, 9, 8, 7, 10, 12, 11}; float output_data0[8] = {0}; int32_t output_data1[8] = {0}; in_tensor.SetData(input_data); out_tensor0.SetData(output_data0); out_tensor1.SetData(output_data1); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor0, &out_tensor1}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor0, &out_tensor1}; TopkParameter parameter = {{}, 3, 4, 2, true}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_TopK}; @@ -52,8 +52,8 @@ TEST_F(TestTopKFp32, TopK) { auto ret = kernel->Run(); EXPECT_EQ(0, ret); - float expect0[] = {3, 2, 6, 5, 9, 8, 12, 11}; - int32_t expect1[] = {2, 1, 0, 1, 0, 1, 1, 2}; + float expect0[] = {3, 2, 6, 5, 9, 8, 12, 11}; + int32_t expect1[] = {2, 1, 0, 1, 0, 1, 1, 2}; for (int i = 0; i < 8; ++i) { EXPECT_EQ(output_data0[i], expect0[i]); EXPECT_EQ(output_data1[i], expect1[i]); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc index 5af17aaa93..b5f5357c59 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc @@ -182,20 +182,20 @@ TEST_F(TestTransposeFp32, TransposeFp32_test5) { param.out_strides_[i] = out_strides[i]; } - lite::tensor::Tensor input_tensor; + lite::Tensor input_tensor; input_tensor.SetData(input.data()); input_tensor.set_shape(input_shape); input_tensor.SetFormat(schema::Format_NHWC); input_tensor.set_data_type(kNumberTypeFloat32); - std::vector inputs_tensor; + std::vector inputs_tensor; inputs_tensor.emplace_back(&input_tensor); - lite::tensor::Tensor output_tensor; + lite::Tensor output_tensor; output_tensor.SetData(output.data()); output_tensor.set_shape(output_shape); output_tensor.SetFormat(schema::Format_NHWC); output_tensor.set_data_type(kNumberTypeFloat32); - std::vector outputs_tensor; + std::vector outputs_tensor; outputs_tensor.emplace_back(&output_tensor); lite::Context ctx; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc index 420912e578..3125d35b0d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc @@ -27,17 +27,17 @@ class TestUniqueFp32 : public mindspore::CommonTest { }; TEST_F(TestUniqueFp32, Unique) { - lite::tensor::Tensor in_tensor(kNumberTypeFloat32, {9}); - lite::tensor::Tensor out_tensor0(kNumberTypeFloat32, {9}); - lite::tensor::Tensor out_tensor1(kNumberTypeInt32, {9}); + lite::Tensor in_tensor(kNumberTypeFloat32, {9}); + lite::Tensor out_tensor0(kNumberTypeFloat32, {9}); + lite::Tensor out_tensor1(kNumberTypeInt32, {9}); float input_data[] = {1, 1, 2, 4, 4, 4, 7, 8, 8}; float output_data0[9] = {0}; int output_data1[9] = {0}; in_tensor.SetData(input_data); out_tensor0.SetData(output_data0); out_tensor1.SetData(output_data1); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor0, &out_tensor1}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor0, &out_tensor1}; OpParameter parameter = {0}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Unique}; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc index f9d7fb83db..26dc00f64a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc @@ -27,11 +27,11 @@ class TestUnstackFp32 : public mindspore::CommonTest { }; TEST_F(TestUnstackFp32, Unstack) { - lite::tensor::Tensor in_tensor(kNumberTypeFloat32, {3, 4, 2}); - lite::tensor::Tensor out_tensor0(kNumberTypeFloat32, {3, 2}); - lite::tensor::Tensor out_tensor1(kNumberTypeFloat32, {3, 2}); - lite::tensor::Tensor out_tensor2(kNumberTypeFloat32, {3, 2}); - lite::tensor::Tensor out_tensor3(kNumberTypeFloat32, {3, 2}); + lite::Tensor in_tensor(kNumberTypeFloat32, {3, 4, 2}); + lite::Tensor out_tensor0(kNumberTypeFloat32, {3, 2}); + lite::Tensor out_tensor1(kNumberTypeFloat32, {3, 2}); + lite::Tensor out_tensor2(kNumberTypeFloat32, {3, 2}); + lite::Tensor out_tensor3(kNumberTypeFloat32, {3, 2}); float input_data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}; float output_data0[6] = {0}; float output_data1[6] = {0}; @@ -42,8 +42,8 @@ TEST_F(TestUnstackFp32, Unstack) { out_tensor1.SetData(output_data1); out_tensor2.SetData(output_data2); out_tensor3.SetData(output_data3); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor0, &out_tensor1, &out_tensor2, &out_tensor3}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor0, &out_tensor1, &out_tensor2, &out_tensor3}; UnstackParameter parameter = {{}, 4, -2, 3, 4, 2}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Unstack}; @@ -77,10 +77,10 @@ TEST_F(TestUnstackFp32, Unstack) { } TEST_F(TestUnstackFp32, Unstack2) { - lite::tensor::Tensor in_tensor(kNumberTypeFloat32, {3, 4, 2}); - lite::tensor::Tensor out_tensor0(kNumberTypeFloat32, {4, 2}); - lite::tensor::Tensor out_tensor1(kNumberTypeFloat32, {4, 2}); - lite::tensor::Tensor out_tensor2(kNumberTypeFloat32, {4, 2}); + lite::Tensor in_tensor(kNumberTypeFloat32, {3, 4, 2}); + lite::Tensor out_tensor0(kNumberTypeFloat32, {4, 2}); + lite::Tensor out_tensor1(kNumberTypeFloat32, {4, 2}); + lite::Tensor out_tensor2(kNumberTypeFloat32, {4, 2}); float input_data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}; float output_data0[8] = {0}; float output_data1[8] = {0}; @@ -89,8 +89,8 @@ TEST_F(TestUnstackFp32, Unstack2) { out_tensor0.SetData(output_data0); out_tensor1.SetData(output_data1); out_tensor2.SetData(output_data2); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor0, &out_tensor1, &out_tensor2}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor0, &out_tensor1, &out_tensor2}; UnstackParameter parameter = {{}, 3, 0, 1, 3, 8}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Unstack}; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc index 65a885b095..5f8a6fe6ed 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc @@ -23,7 +23,7 @@ #include "src/common/file_utils.h" #include "src/common/file_utils_ext.h" #include "mindspore/lite/src/kernel_registry.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" #include "mindspore/lite/src/lite_kernel.h" #include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h" #include "nnacl/fp32_grad/activation_grad.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc index 241ebdfcdd..f6d595ad5b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc @@ -36,15 +36,15 @@ TEST_F(TestBiasGradFp32, BiasGradFp32) { std::string input_path = "./test_data/operators/biasgradfp32_1_dy_10_28_28_7.bin"; auto input_data = reinterpret_cast(mindspore::lite::ReadFile(input_path.c_str(), &input_size)); std::vector dim_dy({10, 28, 28, 7}); - lite::tensor::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); + lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); dy_tensor.SetData(input_data); - std::vector inputs = {&dy_tensor}; + std::vector inputs = {&dy_tensor}; auto output_data = new float[7]; std::vector dim_dw = {7}; - lite::tensor::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw); + lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw); dw_tensor.SetData(output_data); - std::vector outputs = {&dw_tensor}; + std::vector outputs = {&dw_tensor}; kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_BiasGrad}; @@ -61,7 +61,7 @@ TEST_F(TestBiasGradFp32, BiasGradFp32) { std::string output_path = "./test_data/operators/biasgradfp32_1_db_7.bin"; lite::CompareOutput(output_data, output_path); - delete [] input_data; + delete[] input_data; delete[] output_data; // delete bias_param; dy_tensor.SetData(nullptr); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc index 7a29177034..83fb12095f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc @@ -29,13 +29,13 @@ namespace mindspore { class TestBNGradFp32 : public mindspore::CommonTest { public: TestBNGradFp32() {} - lite::tensor::Tensor *CreateInTensor(std::string file_name, std::vector dim); + lite::Tensor *CreateInTensor(std::string file_name, std::vector dim); }; -lite::tensor::Tensor *TestBNGradFp32::CreateInTensor(std::string file_name, std::vector dim) { +lite::Tensor *TestBNGradFp32::CreateInTensor(std::string file_name, std::vector dim) { size_t input_size = 0; auto input_data = reinterpret_cast(mindspore::lite::ReadFile(file_name.c_str(), &input_size)); - auto tensor = new lite::tensor::Tensor(TypeId::kNumberTypeFloat32, dim); + auto tensor = new lite::Tensor(TypeId::kNumberTypeFloat32, dim); tensor->SetData(input_data); EXPECT_EQ(input_size, tensor->Size()); return tensor; @@ -57,15 +57,15 @@ TEST_F(TestBNGradFp32, BNGradFp32) { auto mean_tensor = CreateInTensor("./test_data/bngrad/save_mean_3.bin", {1, 1, 1, channels}); auto var_tensor = CreateInTensor("././test_data/bngrad/save_var_3.bin", {1, 1, 1, channels}); // prepare output tensors - lite::tensor::Tensor dx_tensor(TypeId::kNumberTypeFloat32, {batch, height, width, channels}); + lite::Tensor dx_tensor(TypeId::kNumberTypeFloat32, {batch, height, width, channels}); dx_tensor.MallocData(); - lite::tensor::Tensor dscale_tensor(TypeId::kNumberTypeFloat32, {1, 1, 1, channels}); + lite::Tensor dscale_tensor(TypeId::kNumberTypeFloat32, {1, 1, 1, channels}); dscale_tensor.MallocData(); - lite::tensor::Tensor dbias_tensor(TypeId::kNumberTypeFloat32, {1, 1, 1, channels}); + lite::Tensor dbias_tensor(TypeId::kNumberTypeFloat32, {1, 1, 1, channels}); dbias_tensor.MallocData(); - std::vector inputs = {dy_tensor, x_tensor, scale_tensor, mean_tensor, var_tensor}; - std::vector outputs = {&dx_tensor, &dscale_tensor, &dbias_tensor}; + std::vector inputs = {dy_tensor, x_tensor, scale_tensor, mean_tensor, var_tensor}; + std::vector outputs = {&dx_tensor, &dscale_tensor, &dbias_tensor}; kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_BNGrad}; @@ -86,22 +86,22 @@ TEST_F(TestBNGradFp32, BNGradFp32) { auto time_avg = cost / loop_count; std::cout << "single thread running time : " << time_avg << "us\n"; std::cout << "==========dx==========\n"; - auto dx = reinterpret_cast(outputs[0]->Data()); + auto dx = reinterpret_cast(outputs[0]->MutableData()); for (int i = 0; i < 7; i++) std::cout << dx[i] << " "; std::cout << "\n=======dscale=======\n"; - auto dscale = reinterpret_cast(outputs[1]->Data()); + auto dscale = reinterpret_cast(outputs[1]->MutableData()); for (int i = 0; i < channels; i++) std::cout << dscale[i] << " "; std::cout << "\n"; int res = mindspore::lite::CompareRelativeOutput(dscale, "./test_data/bngrad/output_dscale_3.bin"); EXPECT_EQ(res, 0); std::cout << "==========dbias==========\n"; - auto dbias = reinterpret_cast(outputs[2]->Data()); + auto dbias = reinterpret_cast(outputs[2]->MutableData()); for (int i = 0; i < 3; i++) std::cout << dbias[i] << " "; std::cout << "\n"; res = mindspore::lite::CompareRelativeOutput(dscale, "./test_data/bngrad/output_dscale_3.bin"); EXPECT_EQ(res, 0); for (auto v : inputs) { - delete[] reinterpret_cast(v->Data()); + delete[] reinterpret_cast(v->MutableData()); v->SetData(nullptr); // delete v; } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc index 81c8abe2e8..64337817ef 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc @@ -28,7 +28,7 @@ #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { -class TestConvolutionGradFp32 : public mindspore::CommonTest { +class TestConvolutionGradFp32 : public mindspore::CommonTest { public: TestConvolutionGradFp32() {} }; @@ -84,7 +84,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32FilterGrad) { std::string dy_path = "./test_data/conv/convfp32_dy_1_28_28_32.bin"; auto dy_data = reinterpret_cast(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size)); std::vector dim_dy({1, 28, 28, 32}); - lite::tensor::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); + lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); dy_tensor.SetData(dy_data); // runtime part @@ -97,15 +97,15 @@ TEST_F(TestConvolutionGradFp32, ConvFp32FilterGrad) { std::string input_path = "./test_data/conv/convfp32_x_1_28_28_3.bin"; auto input_data = reinterpret_cast(mindspore::lite::ReadFile(input_path.c_str(), &input_size)); std::vector dim_x({1, 28, 28, 3}); - lite::tensor::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); x_tensor.SetData(input_data); auto dw_data = new float[output_data_size]; std::vector dim_dw({32, 3, 3, 3}); - lite::tensor::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw); + lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw); dw_tensor.SetData(dw_data); - std::vector inputs = {&dy_tensor, &x_tensor}; - std::vector outputs = {&dw_tensor}; + std::vector inputs = {&dy_tensor, &x_tensor}; + std::vector outputs = {&dw_tensor}; kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DGradFilter}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); @@ -131,9 +131,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32FilterGrad) { EXPECT_EQ(res, 0); - delete [] input_data; - delete [] dy_data; - delete [] dw_data; + delete[] input_data; + delete[] dy_data; + delete[] dw_data; delete kernel; // delete conv_param; dw_tensor.SetData(nullptr); @@ -151,25 +151,25 @@ TEST_F(TestConvolutionGradFp32, ConvFp32InputGrad) { std::string dy_path = "./test_data/conv/convfp32_dy_1_28_28_32.bin"; auto dy_data = reinterpret_cast(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size)); std::vector dim_dy({1, 28, 28, 32}); - lite::tensor::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); + lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); dy_tensor.SetData(dy_data); size_t w_size; std::string w_path = "./test_data/conv/convfp32_w_32_3_3_3.bin"; auto w_data = reinterpret_cast(mindspore::lite::ReadFile(w_path.c_str(), &w_size)); std::vector dim_dw({32, 3, 3, 3}); - lite::tensor::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_dw); + lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_dw); w_tensor.SetData(w_data); size_t output_data_size = conv_param->input_batch_ * conv_param->input_h_ * conv_param->input_w_ * conv_param->input_channel_; auto dx_data = new float[output_data_size]; std::vector dim_dx({1, 28, 28, 3}); - lite::tensor::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx); + lite::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx); dx_tensor.SetData(dx_data); - std::vector inputs = {&dy_tensor, &w_tensor}; - std::vector outputs = {&dx_tensor}; + std::vector inputs = {&dy_tensor, &w_tensor}; + std::vector outputs = {&dx_tensor}; // runtime part printf("Calculating runtime cost...\n"); @@ -197,9 +197,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32InputGrad) { std::string output_path = "./test_data/conv/convfp32_dx_1_28_28_3.bin"; auto res = lite::CompareRelativeOutput(dx_data, output_path); EXPECT_EQ(res, 0); - delete [] dx_data; - delete [] w_data; - delete [] dy_data; + delete[] dx_data; + delete[] w_data; + delete[] dy_data; w_tensor.SetData(nullptr); dy_tensor.SetData(nullptr); dx_tensor.SetData(nullptr); @@ -218,7 +218,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupFilterGrad) { std::string dy_path = "./test_data/conv/convfp32_dy_g3_1_28_28_18.bin"; auto dy_data = reinterpret_cast(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size)); std::vector dim_dy({1, 28, 28, 18}); - lite::tensor::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); + lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); dy_tensor.SetData(dy_data); // runtime part @@ -231,15 +231,15 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupFilterGrad) { std::string input_path = "./test_data/conv/convfp32_x_g3_1_28_28_3.bin"; auto input_data = reinterpret_cast(mindspore::lite::ReadFile(input_path.c_str(), &input_size)); std::vector dim_x({1, 28, 28, 3}); - lite::tensor::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); x_tensor.SetData(input_data); auto dw_data = new float[output_data_size]; std::vector dim_dw({18, 3, 3, 1}); - lite::tensor::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw); + lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw); dw_tensor.SetData(dw_data); - std::vector inputs = {&dy_tensor, &x_tensor}; - std::vector outputs = {&dw_tensor}; + std::vector inputs = {&dy_tensor, &x_tensor}; + std::vector outputs = {&dw_tensor}; kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DGradFilter}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); @@ -264,9 +264,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupFilterGrad) { auto res = lite::CompareRelativeOutput(dw_data, output_path); EXPECT_EQ(res, 0); - delete [] input_data; - delete [] dy_data; - delete [] dw_data; + delete[] input_data; + delete[] dy_data; + delete[] dw_data; dw_tensor.SetData(nullptr); x_tensor.SetData(nullptr); dy_tensor.SetData(nullptr); @@ -284,25 +284,25 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupInputGrad) { std::string dy_path = "./test_data/conv/convfp32_dy_g3_1_28_28_18.bin"; auto dy_data = reinterpret_cast(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size)); std::vector dim_dy({1, 28, 28, 18}); - lite::tensor::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); + lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); dy_tensor.SetData(dy_data); size_t w_size; std::string w_path = "./test_data/conv/convfp32_w_g3_18_3_3_3.bin"; auto w_data = reinterpret_cast(mindspore::lite::ReadFile(w_path.c_str(), &w_size)); std::vector dim_dw({18, 3, 3, 1}); - lite::tensor::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_dw); + lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_dw); w_tensor.SetData(w_data); size_t output_data_size = conv_param->input_batch_ * conv_param->input_h_ * conv_param->input_w_ * conv_param->input_channel_; auto dx_data = new float[output_data_size]; std::vector dim_dx({1, 28, 28, 3}); - lite::tensor::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx); + lite::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx); dx_tensor.SetData(dx_data); - std::vector inputs = {&dy_tensor, &w_tensor}; - std::vector outputs = {&dx_tensor}; + std::vector inputs = {&dy_tensor, &w_tensor}; + std::vector outputs = {&dx_tensor}; // runtime part printf("Calculating runtime cost...\n"); @@ -330,9 +330,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupInputGrad) { std::string output_path = "./test_data/conv/convfp32_dx_g3_1_28_28_3.bin"; auto res = lite::CompareRelativeOutput(dx_data, output_path); EXPECT_EQ(res, 0); - delete [] dx_data; - delete [] w_data; - delete [] dy_data; + delete[] dx_data; + delete[] w_data; + delete[] dy_data; dx_tensor.SetData(nullptr); w_tensor.SetData(nullptr); dy_tensor.SetData(nullptr); @@ -352,7 +352,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationFilterGrad) { std::string dy_path = "./test_data/conv/convfp32_dy_g3_d2_1_26_26_18.bin"; auto dy_data = reinterpret_cast(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size)); std::vector dim_dy({1, 26, 26, 18}); - lite::tensor::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); + lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); dy_tensor.SetData(dy_data); // runtime part @@ -365,15 +365,15 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationFilterGrad) { std::string input_path = "./test_data/conv/convfp32_x_g3_d2_1_28_28_3.bin"; auto input_data = reinterpret_cast(mindspore::lite::ReadFile(input_path.c_str(), &input_size)); std::vector dim_x({1, 28, 28, 3}); - lite::tensor::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); x_tensor.SetData(input_data); auto dw_data = new float[output_data_size]; std::vector dim_dw({18, 3, 3, 1}); - lite::tensor::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw); + lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw); dw_tensor.SetData(dw_data); - std::vector inputs = {&dy_tensor, &x_tensor}; - std::vector outputs = {&dw_tensor}; + std::vector inputs = {&dy_tensor, &x_tensor}; + std::vector outputs = {&dw_tensor}; kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DGradFilter}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); @@ -397,9 +397,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationFilterGrad) { std::string output_path = "./test_data/conv/convfp32_dw_g3_d2_18_3_3_3.bin"; auto res = lite::CompareRelativeOutput(dw_data, output_path); EXPECT_EQ(res, 0); - delete [] input_data; - delete [] dy_data; - delete [] dw_data; + delete[] input_data; + delete[] dy_data; + delete[] dw_data; dw_tensor.SetData(nullptr); dy_tensor.SetData(nullptr); x_tensor.SetData(nullptr); @@ -417,25 +417,25 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationInputGrad) { std::string dy_path = "./test_data/conv/convfp32_dy_g3_d2_1_26_26_18.bin"; auto dy_data = reinterpret_cast(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size)); std::vector dim_dy({1, 26, 26, 18}); - lite::tensor::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); + lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); dy_tensor.SetData(dy_data); size_t w_size; std::string w_path = "./test_data/conv/convfp32_w_g3_d2_18_3_3_3.bin"; auto w_data = reinterpret_cast(mindspore::lite::ReadFile(w_path.c_str(), &w_size)); std::vector dim_w({18, 3, 3, 1}); - lite::tensor::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_w); + lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_w); w_tensor.SetData(w_data); size_t output_data_size = conv_param->input_batch_ * conv_param->input_h_ * conv_param->input_w_ * conv_param->input_channel_; auto dx_data = new float[output_data_size]; std::vector dim_dx({1, 28, 28, 3}); - lite::tensor::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx); + lite::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx); dx_tensor.SetData(dx_data); - std::vector inputs = {&dy_tensor, &w_tensor}; - std::vector outputs = {&dx_tensor}; + std::vector inputs = {&dy_tensor, &w_tensor}; + std::vector outputs = {&dx_tensor}; // runtime part printf("Calculating runtime cost...\n"); @@ -463,9 +463,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationInputGrad) { std::string output_path = "./test_data/conv/convfp32_dx_g3_d2_1_28_28_3.bin"; auto res = lite::CompareRelativeOutput(dx_data, output_path); EXPECT_EQ(res, 0); - delete [] dx_data; - delete [] w_data; - delete [] dy_data; + delete[] dx_data; + delete[] w_data; + delete[] dy_data; dx_tensor.SetData(nullptr); dy_tensor.SetData(nullptr); w_tensor.SetData(nullptr); @@ -483,37 +483,36 @@ TEST_F(TestConvolutionGradFp32, ConvGroupDilation) { std::string x_path = "./test_data/conv/convfp32_x_g3_d2_1_28_28_3.bin"; auto x_data = reinterpret_cast(mindspore::lite::ReadFile(x_path.c_str(), &x_size)); std::vector dim_x({1, 28, 28, 3}); - lite::tensor::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); x_tensor.SetData(x_data); size_t w_size; std::string w_path = "./test_data/conv/convfp32_w_g3_d2_18_3_3_3.bin"; auto w_data = reinterpret_cast(mindspore::lite::ReadFile(w_path.c_str(), &w_size)); std::vector dim_w({18, 3, 3, 1}); - lite::tensor::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_w); + lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_w); w_tensor.SetData(w_data); size_t output_data_size = conv_param->output_batch_ * conv_param->output_h_ * conv_param->output_w_ * conv_param->output_channel_; auto y_data = new float[output_data_size]; std::vector dim_y({1, 26, 26, 18}); - lite::tensor::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y); + lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y); y_tensor.SetData(y_data); - std::vector inputs = {&x_tensor, &w_tensor}; - std::vector outputs = {&y_tensor}; + std::vector inputs = {&x_tensor, &w_tensor}; + std::vector outputs = {&y_tensor}; // runtime part printf("Calculating runtime cost...\n"); uint64_t time_avg = 0; lite::Context context; - context.device_ctx_.type = lite::DT_CPU; + context.device_type_ = lite::DT_CPU; context.thread_num_ = 1; - - auto *kernel = new mindspore::kernel::ConvolutionTrainCPUKernel(reinterpret_cast(conv_param), - inputs, outputs, &context, 0); + auto *kernel = new mindspore::kernel::ConvolutionTrainCPUKernel(reinterpret_cast(conv_param), inputs, + outputs, &context, 0); kernel->Init(); // kernel::KernelKey desc = {kernel::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Conv2D}; // auto creator = lite::KernelRegistry::GetInstance()->GetKernelCreator(desc); @@ -541,9 +540,9 @@ TEST_F(TestConvolutionGradFp32, ConvGroupDilation) { auto res = lite::CompareRelativeOutput(y_data, output_path); EXPECT_EQ(res, 0); - delete [] y_data; - delete [] x_data; - delete [] w_data; + delete[] y_data; + delete[] x_data; + delete[] w_data; x_tensor.SetData(nullptr); y_tensor.SetData(nullptr); w_tensor.SetData(nullptr); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc index 765a53c32c..69142bd69f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc @@ -39,7 +39,6 @@ class NetworkTest : public mindspore::CommonTest { NetworkTest() {} }; - // INPUT(0) // V // +-------------+ @@ -75,7 +74,6 @@ class NetworkTest : public mindspore::CommonTest { // V dw(9) | // +-----------Update-----+ - TEST_F(NetworkTest, tuning_layer) { const int BATCH_SIZE = 32; const int NUM_CLASSES = 10; @@ -369,7 +367,7 @@ TEST_F(NetworkTest, tuning_layer) { meta_graph.reset(); content = nullptr; auto context = new lite::Context; - context->device_ctx_.type = lite::DT_CPU; + context->device_type_ = lite::DT_CPU; context->cpu_bind_mode_ = lite::NO_BIND; context->thread_num_ = 1; auto session = new session::TrainSession(); @@ -446,24 +444,20 @@ TEST_F(NetworkTest, tuning_layer) { } int32_t fileIterator(mindspore::session::TrainSession *session, const std::string &path, - std::function cb) { + std::function cb) { int32_t res = 0; if (auto dir = opendir(path.c_str())) { while (auto f = readdir(dir)) { if (f->d_name[0] == '.') continue; if (f->d_type == DT_DIR) fileIterator(session, path + f->d_name + "/", cb); - if (f->d_type == DT_REG) - res |= cb(session, path + f->d_name); + if (f->d_type == DT_REG) res |= cb(session, path + f->d_name); } closedir(dir); } return res; } -void replaceExt(const std::string &src, std::string *dst) { - *dst = src.substr(0, src.find_last_of('.')) + ".emb"; -} +void replaceExt(const std::string &src, std::string *dst) { *dst = src.substr(0, src.find_last_of('.')) + ".emb"; } int32_t runEffNet(mindspore::session::TrainSession *session, const std::string &in, const std::string &out) { // setup input @@ -474,7 +468,7 @@ int32_t runEffNet(mindspore::session::TrainSession *session, const std::string & float *data = reinterpret_cast(inTensor->MutableData()); size_t input_size; - float *in_buf = reinterpret_cast(lite::ReadFile(in.c_str(), &input_size)); + float *in_buf = reinterpret_cast(lite::ReadFile(in.c_str(), &input_size)); // ASSERT_NE(nullptr, data); auto input_data = reinterpret_cast(in_buf); // ASSERT_EQ(input_size, inTensor->Size()); @@ -484,7 +478,7 @@ int32_t runEffNet(mindspore::session::TrainSession *session, const std::string & session->RunGraph(); // compare outputs - auto outputs = session->GetOutputs(); + auto outputs = session->GetOutputMap(); auto output = ((outputs.begin())->second); float *output_data = reinterpret_cast(output.at(0)->MutableData()); @@ -498,11 +492,10 @@ TEST_F(NetworkTest, efficient_net) { ReadFile(net.c_str(), &net_size, &buf); auto model = lite::Model::Import(buf, net_size); auto context = new lite::Context; - context->device_ctx_.type = lite::DT_CPU; + context->device_type_ = lite::DT_CPU; context->cpu_bind_mode_ = lite::NO_BIND; context->thread_num_ = 1; - auto session = new mindspore::session::TrainSession(); ASSERT_NE(session, nullptr); auto ret = session->Init(context); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc index 546f4580c9..3a9b9a2238 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc @@ -120,22 +120,22 @@ TEST_F(TestPoolingGradFp32, AvgPoolingKernelGradFp32) { std::string input_path = "./test_data/pooling/avgpoolgradfp32_1_dy_1_28_28_3.bin"; auto input_data = reinterpret_cast(mindspore::lite::ReadFile(input_path.c_str(), &input_size)); std::vector dim_dy({1, 28, 28, 3}); - lite::tensor::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); + lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); dy_tensor.SetData(input_data); std::string input1_path = "./test_data/pooling/avgpoolgradfp32_1_x_1_28_28_3.bin"; auto input1_data = reinterpret_cast(mindspore::lite::ReadFile(input1_path.c_str(), &input_size)); std::vector dim_x({1, 28, 28, 3}); - lite::tensor::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); x_tensor.SetData(input1_data); - std::vector inputs = {&dy_tensor, &x_tensor}; + std::vector inputs = {&dy_tensor, &x_tensor}; auto output_data = new float[output_data_size]; std::vector dim_dx({1, 28, 28, 3}); - lite::tensor::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx); + lite::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx); dx_tensor.SetData(output_data); - std::vector outputs = {&dx_tensor}; + std::vector outputs = {&dx_tensor}; kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; @@ -182,22 +182,22 @@ TEST_F(TestPoolingGradFp32, AvgPoolingBatchGradFp32) { std::string input_path = "./test_data/pooling/avgpoolgradfp32_1_dy_3_28_28_3.bin"; auto input_data = reinterpret_cast(mindspore::lite::ReadFile(input_path.c_str(), &input_size)); std::vector dim_dy({1, 28, 28, 3}); - lite::tensor::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); + lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); dy_tensor.SetData(input_data); std::string input1_path = "./test_data/pooling/avgpoolgradfp32_1_x_3_28_28_3.bin"; auto input1_data = reinterpret_cast(mindspore::lite::ReadFile(input1_path.c_str(), &input_size)); std::vector dim_x({1, 28, 28, 3}); - lite::tensor::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); x_tensor.SetData(input1_data); - std::vector inputs = {&dy_tensor, &x_tensor}; + std::vector inputs = {&dy_tensor, &x_tensor}; auto output_data = new float[output_data_size]; std::vector dim_dx({1, 28, 28, 3}); - lite::tensor::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx); + lite::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx); dx_tensor.SetData(output_data); - std::vector outputs = {&dx_tensor}; + std::vector outputs = {&dx_tensor}; kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; @@ -245,21 +245,21 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride2Fp32) { auto x_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/avgpoolgradfp32_s2_x_3_28_28_3.bin", &input_size)); std::vector dim_x({pool->output_batch_, pool->input_h_, pool->input_w_, pool->input_channel_}); - lite::tensor::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); x_tensor.SetData(x_data); auto yt_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/avgpoolgradfp32_s2_dy_3_28_28_3.bin", &input_size)); std::vector dim_y({pool->output_batch_, pool->output_h_, pool->output_w_, pool->output_channel_}); - lite::tensor::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y); + lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y); yt_tensor.SetData(yt_data); auto out_data = new float[y_data_size]; - lite::tensor::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x); out_tensor.SetData(out_data); - std::vector inputs = {&yt_tensor, &x_tensor}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&yt_tensor, &x_tensor}; + std::vector outputs = {&out_tensor}; // ---------------------------------------- kernel::KernelKey pool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; auto pool_creator = lite::KernelRegistry::GetInstance()->GetCreator(pool_desc); @@ -308,21 +308,21 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride3Fp32) { auto x_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/avgpoolgradfp32_s3_x_3_28_28_3.bin", &input_size)); std::vector dim_x({pool->output_batch_, pool->input_h_, pool->input_w_, pool->input_channel_}); - lite::tensor::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); x_tensor.SetData(x_data); auto yt_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/avgpoolgradfp32_s3_dy_3_28_28_3.bin", &input_size)); std::vector dim_y({pool->output_batch_, pool->output_h_, pool->output_w_, pool->output_channel_}); - lite::tensor::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y); + lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y); yt_tensor.SetData(yt_data); auto out_data = new float[y_data_size]; - lite::tensor::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x); out_tensor.SetData(out_data); - std::vector inputs = {&yt_tensor, &x_tensor}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&yt_tensor, &x_tensor}; + std::vector outputs = {&out_tensor}; // ---------------------------------------- kernel::KernelKey pool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; auto pool_creator = lite::KernelRegistry::GetInstance()->GetCreator(pool_desc); @@ -433,20 +433,20 @@ TEST_F(TestPoolingGradFp32, MaxPoolingKernelGradFp32) { auto x_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_2_x_1_30_30_3.bin", &input_size)); std::vector dim_x({1, 30, 30, 3}); - lite::tensor::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); x_tensor.SetData(x_data); - std::vector maxpool_inputs = {&x_tensor}; + std::vector maxpool_inputs = {&x_tensor}; auto y_data = new float[y_data_size]; std::vector dim_y({1, 10, 10, 3}); - lite::tensor::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y); + lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y); y_tensor.SetData(y_data); auto ind_data = new int[y_data_size]; - lite::tensor::Tensor ind_tensor(TypeId::kNumberTypeInt32, dim_y); + lite::Tensor ind_tensor(TypeId::kNumberTypeInt32, dim_y); ind_tensor.SetData(ind_data); - std::vector maxpool_outputs = {&y_tensor, &ind_tensor}; + std::vector maxpool_outputs = {&y_tensor, &ind_tensor}; kernel::KernelKey maxpool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Pooling}; auto maxpool_creator = lite::KernelRegistry::GetInstance()->GetCreator(maxpool_desc); @@ -481,7 +481,7 @@ TEST_F(TestPoolingGradFp32, MaxPoolingKernelGradFp32) { auto dy_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_2_dy_1_10_10_3.bin", &input_size)); std::vector dim_dy({1, 3, 10, 10}); - lite::tensor::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); + lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy); dy_tensor.SetData(dy_data); #if 0 @@ -491,17 +491,17 @@ TEST_F(TestPoolingGradFp32, MaxPoolingKernelGradFp32) { for (int i=0; i < output_data_size; i++) i_data[i] = static_cast(ill_data[i]); std::vector dim_ind({1, 3, 10, 10}); - lite::tensor::Tensor ind_tensor(TypeId::kNumberTypeInt32, dim_ind); + lite::Tensor ind_tensor(TypeId::kNumberTypeInt32, dim_ind); ind_tensor.SetData(i_data); #endif - std::vector inputs = {&dy_tensor, &ind_tensor}; + std::vector inputs = {&dy_tensor, &ind_tensor}; auto output_data = new float[output_data_size]; std::vector dim_dx({1, 3, 30, 30}); - lite::tensor::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx); + lite::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx); dx_tensor.SetData(output_data); - std::vector outputs = {&dx_tensor}; + std::vector outputs = {&dx_tensor}; kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); @@ -539,26 +539,26 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradBatchFp32) { auto x_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_1_x_3_28_28_3.bin", &input_size)); std::vector dim_x({3, 28, 28, 3}); - lite::tensor::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); x_tensor.SetData(x_data); auto y_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_1_dx_3_28_28_3.bin", &input_size)); std::vector dim_y({3, 28, 28, 3}); - lite::tensor::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y); + lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y); y_tensor.SetData(y_data); auto yt_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_1_dy_3_28_28_3.bin", &input_size)); - lite::tensor::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y); + lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y); yt_tensor.SetData(yt_data); auto out_data = new float[y_data_size]; - lite::tensor::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x); out_tensor.SetData(out_data); - std::vector maxpool_inputs = {&x_tensor, &y_tensor, &yt_tensor}; - std::vector maxpool_outputs = {&out_tensor}; + std::vector maxpool_inputs = {&x_tensor, &y_tensor, &yt_tensor}; + std::vector maxpool_outputs = {&out_tensor}; // ---------------------------------------- kernel::KernelKey maxpool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; auto maxpool_creator = lite::KernelRegistry::GetInstance()->GetCreator(maxpool_desc); @@ -611,26 +611,26 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride2Fp32) { auto x_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s2_x_3_28_28_3.bin", &input_size)); std::vector dim_x({maxpool->output_batch_, maxpool->input_h_, maxpool->input_w_, maxpool->input_channel_}); - lite::tensor::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); x_tensor.SetData(x_data); auto y_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s2_dx_3_28_28_3.bin", &input_size)); std::vector dim_y({maxpool->output_batch_, maxpool->output_h_, maxpool->output_w_, maxpool->output_channel_}); - lite::tensor::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y); + lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y); y_tensor.SetData(y_data); auto yt_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s2_dy_3_28_28_3.bin", &input_size)); - lite::tensor::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y); + lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y); yt_tensor.SetData(yt_data); auto out_data = new float[y_data_size]; - lite::tensor::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x); out_tensor.SetData(out_data); - std::vector maxpool_inputs = {&x_tensor, &y_tensor, &yt_tensor}; - std::vector maxpool_outputs = {&out_tensor}; + std::vector maxpool_inputs = {&x_tensor, &y_tensor, &yt_tensor}; + std::vector maxpool_outputs = {&out_tensor}; // ---------------------------------------- kernel::KernelKey maxpool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; auto maxpool_creator = lite::KernelRegistry::GetInstance()->GetCreator(maxpool_desc); @@ -683,26 +683,26 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride3Fp32) { auto x_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s3_x_3_28_28_3.bin", &input_size)); std::vector dim_x({maxpool->output_batch_, maxpool->input_h_, maxpool->input_w_, maxpool->input_channel_}); - lite::tensor::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x); x_tensor.SetData(x_data); auto y_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s3_dx_3_28_28_3.bin", &input_size)); std::vector dim_y({maxpool->output_batch_, maxpool->output_h_, maxpool->output_w_, maxpool->output_channel_}); - lite::tensor::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y); + lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y); y_tensor.SetData(y_data); auto yt_data = reinterpret_cast( mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s3_dy_3_28_28_3.bin", &input_size)); - lite::tensor::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y); + lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y); yt_tensor.SetData(yt_data); auto out_data = new float[y_data_size]; - lite::tensor::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x); + lite::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x); out_tensor.SetData(out_data); - std::vector maxpool_inputs = {&x_tensor, &y_tensor, &yt_tensor}; - std::vector maxpool_outputs = {&out_tensor}; + std::vector maxpool_inputs = {&x_tensor, &y_tensor, &yt_tensor}; + std::vector maxpool_outputs = {&out_tensor}; // ---------------------------------------- kernel::KernelKey maxpool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; auto maxpool_creator = lite::KernelRegistry::GetInstance()->GetCreator(maxpool_desc); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc index d3bc737393..cb4d2a421f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc @@ -36,7 +36,7 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) { std::string input_path = "./test_data/operators/sce_fp32_1_y_6_4.bin"; auto input_data = reinterpret_cast(mindspore::lite::ReadFile(input_path.c_str(), &input_size)); std::vector dim_y({6, 4}); - lite::tensor::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y); + lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y); y_tensor.SetData(input_data); std::string label_path = "./test_data/operators/sce_fp32_1_l_6.bin"; @@ -45,19 +45,19 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) { for (int i = 0; i < 6; i++) labels[i] = static_cast(ll_labels[i]); std::vector dim_l({6}); - lite::tensor::Tensor l_tensor(TypeId::kNumberTypeInt32, dim_l); + lite::Tensor l_tensor(TypeId::kNumberTypeInt32, dim_l); l_tensor.SetData(labels); - std::vector inputs = {&y_tensor, &l_tensor}; + std::vector inputs = {&y_tensor, &l_tensor}; auto loss = new float[1]; std::vector dim_dw({1}); - lite::tensor::Tensor loss_tensor(TypeId::kNumberTypeFloat32, dim_dw); + lite::Tensor loss_tensor(TypeId::kNumberTypeFloat32, dim_dw); loss_tensor.SetData(loss); auto grad = new float[24]; - lite::tensor::Tensor grad_tensor(TypeId::kNumberTypeFloat32, dim_y); + lite::Tensor grad_tensor(TypeId::kNumberTypeFloat32, dim_y); grad_tensor.SetData(grad); - std::vector outputs = {&loss_tensor, &grad_tensor}; + std::vector outputs = {&loss_tensor, &grad_tensor}; kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_SoftmaxCrossEntropy}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc index edb53248e7..c6be1af139 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc @@ -28,9 +28,9 @@ class TestQuantizedAdd : public mindspore::CommonTest { }; TEST_F(TestQuantizedAdd, Add) { - lite::tensor::Tensor in_tensor0(kNumberTypeInt8, {1, 1, 2, 5}); - lite::tensor::Tensor in_tensor1(kNumberTypeInt8, {1, 1, 2, 5}); - lite::tensor::Tensor out_tensor(kNumberTypeInt8, {1, 1, 2, 5}); + lite::Tensor in_tensor0(kNumberTypeInt8, {1, 1, 2, 5}); + lite::Tensor in_tensor1(kNumberTypeInt8, {1, 1, 2, 5}); + lite::Tensor out_tensor(kNumberTypeInt8, {1, 1, 2, 5}); int8_t input_data0[] = {-102, 25, -51, 89, -102, 25, -51, 89, -102, 25}; // -0.8 0.2 -0.4 0.7 int8_t input_data1[] = {38, 51, 64, -102, 38, 51, 64, -102, 38, 51}; // 0.3 0.4 0.5 -0.8 @@ -39,15 +39,15 @@ TEST_F(TestQuantizedAdd, Add) { in_tensor1.SetData(input_data1); out_tensor.SetData(output_data); - const lite::tensor::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255 - const lite::tensor::QuantArg quant_in1 = {0.00784314f, 0}; - const lite::tensor::QuantArg quant_out = {0.00784314f, 0}; + const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255 + const lite::QuantArg quant_in1 = {0.00784314f, 0}; + const lite::QuantArg quant_out = {0.00784314f, 0}; in_tensor0.AddQuantParam(quant_in0); in_tensor1.AddQuantParam(quant_in1); out_tensor.AddQuantParam(quant_out); - std::vector inputs = {&in_tensor0, &in_tensor1}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor0, &in_tensor1}; + std::vector outputs = {&out_tensor}; OpParameter parameter = {}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Add}; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc index 09ba8ca312..ff5026f3e2 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/nnacl/arithmetic_self_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" namespace mindspore { @@ -39,28 +39,28 @@ TEST_F(TestArithmeticSelfInt8, floor_quant0_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -98,28 +98,28 @@ TEST_F(TestArithmeticSelfInt8, floor_quant1_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 0.8; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.5; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -157,28 +157,28 @@ TEST_F(TestArithmeticSelfInt8, round_quant0_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -216,28 +216,28 @@ TEST_F(TestArithmeticSelfInt8, round_quant1_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 0.8; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.5; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -275,28 +275,28 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant0_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -334,28 +334,28 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant1_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 0.8; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.5; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -393,28 +393,28 @@ TEST_F(TestArithmeticSelfInt8, abs_quant0_thread0) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -452,28 +452,28 @@ TEST_F(TestArithmeticSelfInt8, abs_quant1_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 0.8; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.5; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -511,28 +511,28 @@ TEST_F(TestArithmeticSelfInt8, sin_quant0_thread2) { const int output_size = 4; int8_t output[4]; std::vector output_shape = {2, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -570,28 +570,28 @@ TEST_F(TestArithmeticSelfInt8, cos_quant0_thread2) { const int output_size = 4; int8_t output[4]; std::vector output_shape = {2, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -629,28 +629,28 @@ TEST_F(TestArithmeticSelfInt8, log_quant0_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -688,28 +688,28 @@ TEST_F(TestArithmeticSelfInt8, sqrt_quant0_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -747,28 +747,28 @@ TEST_F(TestArithmeticSelfInt8, rsqrt_quant0_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -806,28 +806,28 @@ TEST_F(TestArithmeticSelfInt8, square_quant0_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -865,28 +865,28 @@ TEST_F(TestArithmeticSelfInt8, square_quant1_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 0.8; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.5; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; @@ -924,28 +924,28 @@ TEST_F(TestArithmeticSelfInt8, logical_not_quant0_thread2) { const int output_size = 12; int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); output0_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(1); + std::vector outputs_tensor(1); outputs_tensor[0] = output0_tensor; ArithmeticSelfParameter op_param; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc index b14eafba3a..3e58171fe0 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc @@ -34,8 +34,8 @@ TEST_F(TestBatchnormInt8, FusedTest) { std::vector in_data2 = {8, 33}; std::vector in_data3 = {35, 55}; std::vector in_data4 = {2, 3}; - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; BatchNormParameter op_param; op_param.op_parameter_.type_ = schema::PrimitiveType_FusedBatchNorm; @@ -44,30 +44,30 @@ TEST_F(TestBatchnormInt8, FusedTest) { std::vector shape = {1, 1, 6, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 0.1; input_quant_arg.zeroPoint = 1; - lite::tensor::QuantArg input_quant_arg_1; + lite::QuantArg input_quant_arg_1; input_quant_arg_1.scale = 0.5; input_quant_arg_1.zeroPoint = 2; - lite::tensor::QuantArg input_quant_arg_2; + lite::QuantArg input_quant_arg_2; input_quant_arg_2.scale = 0.02; input_quant_arg_2.zeroPoint = 3; - lite::tensor::QuantArg input_quant_arg_3; + lite::QuantArg input_quant_arg_3; input_quant_arg_3.scale = 0.5; input_quant_arg_3.zeroPoint = 15; - lite::tensor::QuantArg input_quant_arg_4; + lite::QuantArg input_quant_arg_4; input_quant_arg_4.scale = 0.25; input_quant_arg_4.zeroPoint = 1; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 0.8; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor input0_tensor; - lite::tensor::Tensor input1_tensor; - lite::tensor::Tensor input2_tensor; - lite::tensor::Tensor input3_tensor; - lite::tensor::Tensor input4_tensor; + lite::Tensor input0_tensor; + lite::Tensor input1_tensor; + lite::Tensor input2_tensor; + lite::Tensor input3_tensor; + lite::Tensor input4_tensor; inputs_tensor.push_back(&input0_tensor); inputs_tensor.push_back(&input1_tensor); inputs_tensor.push_back(&input2_tensor); @@ -92,7 +92,7 @@ TEST_F(TestBatchnormInt8, FusedTest) { std::vector output(12); // std::vector corr_out = {-18, -22, -16, -21, -14, -19, -22, -34, -24, -35, -26, -36 }; std::vector corr_out = {-22, -28, -20, -26, -17, -24, -28, -42, -30, -44, -33, -46}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.set_shape(shape); @@ -130,8 +130,8 @@ TEST_F(TestBatchnormInt8, BNTest) { std::vector in_data = {11, 41, 21, 51, 31, 61, -11, -41, -21, -51, -31, -61}; std::vector in_data1 = {4, 14}; std::vector in_data2 = {29, 39}; - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; BatchNormParameter op_param; op_param.op_parameter_.type_ = schema::PrimitiveType_BatchNorm; @@ -140,22 +140,22 @@ TEST_F(TestBatchnormInt8, BNTest) { std::vector shape = {1, 1, 6, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 0.1; input_quant_arg.zeroPoint = 1; - lite::tensor::QuantArg input_quant_arg_1; + lite::QuantArg input_quant_arg_1; input_quant_arg_1.scale = 0.05; input_quant_arg_1.zeroPoint = 2; - lite::tensor::QuantArg input_quant_arg_2; + lite::QuantArg input_quant_arg_2; input_quant_arg_2.scale = 0.1; input_quant_arg_2.zeroPoint = -1; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 0.5; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor input0_tensor; - lite::tensor::Tensor input1_tensor; - lite::tensor::Tensor input2_tensor; + lite::Tensor input0_tensor; + lite::Tensor input1_tensor; + lite::Tensor input2_tensor; inputs_tensor.push_back(&input0_tensor); inputs_tensor.push_back(&input1_tensor); inputs_tensor.push_back(&input2_tensor); @@ -172,7 +172,7 @@ TEST_F(TestBatchnormInt8, BNTest) { std::vector output(12); std::vector corr_out = {1, 3, 2, 4, 3, 5, -2, -5, -3, -6, -4, -7}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.set_shape(shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/bias_add_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/bias_add_int8_tests.cc index 6312d5f2f2..9b9c58980b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/bias_add_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/bias_add_int8_tests.cc @@ -30,17 +30,17 @@ class TestBiasAddInt8 : public mindspore::CommonTest { }; TEST_F(TestBiasAddInt8, BiasAdd) { - lite::tensor::Tensor in_tensor0(kNumberTypeInt8, {1, 2, 3, 2}); - lite::tensor::Tensor in_tensor1(kNumberTypeInt8, {2}); - lite::tensor::Tensor out_tensor(kNumberTypeInt8, {1, 2, 3, 2}); + lite::Tensor in_tensor0(kNumberTypeInt8, {1, 2, 3, 2}); + lite::Tensor in_tensor1(kNumberTypeInt8, {2}); + lite::Tensor out_tensor(kNumberTypeInt8, {1, 2, 3, 2}); int8_t input_data0[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; int8_t input_data1[] = {1, 1}; int8_t output_data[12] = {0}; in_tensor0.SetData(input_data0); in_tensor1.SetData(input_data1); out_tensor.SetData(output_data); - std::vector inputs = {&in_tensor0, &in_tensor1}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor0, &in_tensor1}; + std::vector outputs = {&out_tensor}; ArithmeticParameter parameter = {}; int dims[] = {1, 2, 3, 4}; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc index 9554a59c06..cb71c664a4 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/nnacl/concat_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" namespace mindspore { @@ -42,32 +42,32 @@ TEST_F(TestConcatInt8, Concat1_axis0) { int8_t output[12]; std::vector output_shape = {6, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - lite::tensor::Tensor *input_tensor2 = new lite::tensor::Tensor; + lite::Tensor *input_tensor2 = new lite::Tensor; input_tensor2->SetData(input2.data()); input_tensor2->set_shape(shape2); input_tensor2->AddQuantParam(input_quant_arg); input_tensor2->set_data_type(tid_int8); - std::vector inputs_tensor(2); + std::vector inputs_tensor(2); inputs_tensor[0] = input_tensor1; inputs_tensor[1] = input_tensor2; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -113,32 +113,32 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2) { int8_t output[16]; std::vector output_shape = {2, 4, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - lite::tensor::Tensor *input_tensor2 = new lite::tensor::Tensor; + lite::Tensor *input_tensor2 = new lite::Tensor; input_tensor2->SetData(input2.data()); input_tensor2->set_shape(shape2); input_tensor2->AddQuantParam(input_quant_arg); input_tensor2->set_data_type(tid_int8); - std::vector inputs_tensor(2); + std::vector inputs_tensor(2); inputs_tensor[0] = input_tensor1; inputs_tensor[1] = input_tensor2; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -185,32 +185,32 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2_quant1) { int8_t output[16]; std::vector output_shape = {2, 4, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 2.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - lite::tensor::Tensor *input_tensor2 = new lite::tensor::Tensor; + lite::Tensor *input_tensor2 = new lite::Tensor; input_tensor2->SetData(input2.data()); input_tensor2->set_shape(shape2); input_tensor2->AddQuantParam(input_quant_arg); input_tensor2->set_data_type(tid_int8); - std::vector inputs_tensor(2); + std::vector inputs_tensor(2); inputs_tensor[0] = input_tensor1; inputs_tensor[1] = input_tensor2; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc index 3fe09b3e93..0f11a371e0 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc @@ -22,7 +22,7 @@ #include "mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h" namespace mindspore { -using lite::tensor::Tensor; +using lite::Tensor; class TestConv1x1Int8 : public mindspore::CommonTest { public: TestConv1x1Int8() {} @@ -69,37 +69,36 @@ TEST_F(TestConv1x1Int8, Input1x1PrePack2) { delete conv_param; } -int Conv1x1Int8TestInit1_perchannel(std::vector *inputs_, - std::vector *outputs_, ConvParameter *conv_param, - int8_t **correct) { - Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, static_cast(1)); - auto in_quant_arg = new mindspore::lite::tensor::QuantArg(); +int Conv1x1Int8TestInit1_perchannel(std::vector *inputs_, std::vector *outputs_, + ConvParameter *conv_param, int8_t **correct) { + Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST); + auto in_quant_arg = new mindspore::lite::QuantArg(); in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647; in_t->AddQuantParam(*in_quant_arg); in_t->MallocData(); int8_t in[] = {62, -14, 88, 2, -35, 43, 83, -111, 75, 26, 14, -121, -78, 56, 37, -31, 15, -75, -10, -115, -71, 74, -65, -15}; - memcpy(in_t->Data(), in, in_t->ElementsNum() * sizeof(int8_t)); + memcpy(in_t->MutableData(), in, in_t->ElementsNum() * sizeof(int8_t)); inputs_->push_back(in_t); - Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, static_cast(1)); + Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST); weight_t->MallocData(); - auto weight_quant_arg1 = new mindspore::lite::tensor::QuantArg(); + auto weight_quant_arg1 = new mindspore::lite::QuantArg(); weight_quant_arg1->zeroPoint = 66, weight_quant_arg1->scale = 0.96439215686275; - auto weight_quant_arg2 = new mindspore::lite::tensor::QuantArg(); + auto weight_quant_arg2 = new mindspore::lite::QuantArg(); weight_quant_arg2->zeroPoint = 33, weight_quant_arg2->scale = 0.76439215686275; - auto weight_quant_arg3 = new mindspore::lite::tensor::QuantArg(); + auto weight_quant_arg3 = new mindspore::lite::QuantArg(); weight_quant_arg3->zeroPoint = -20, weight_quant_arg3->scale = 0.99117647; weight_t->AddQuantParam(*weight_quant_arg1); weight_t->AddQuantParam(*weight_quant_arg2); weight_t->AddQuantParam(*weight_quant_arg3); int8_t weight[] = {65, 67, 65, 65, 32, 33, 34, 33, -19, -20, -19, -20}; - memcpy(weight_t->Data(), weight, weight_t->ElementsNum() * sizeof(int8_t)); + memcpy(weight_t->MutableData(), weight, weight_t->ElementsNum() * sizeof(int8_t)); inputs_->push_back(weight_t); - Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, static_cast(1)); + Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); - auto output_quant_arg = new mindspore::lite::tensor::QuantArg(); + auto output_quant_arg = new mindspore::lite::QuantArg(); output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.294321233; out_t->AddQuantParam(*output_quant_arg); outputs_->push_back(out_t); @@ -117,8 +116,8 @@ int Conv1x1Int8TestInit1_perchannel(std::vector *inputs_ } TEST_F(TestConv1x1Int8, Conv1x1TestPerChannel) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto conv_param = new ConvParameter(); int8_t *correct; auto ctx = new lite::Context; @@ -129,7 +128,7 @@ TEST_F(TestConv1x1Int8, Conv1x1TestPerChannel) { conv1x1->Init(); conv1x1->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 70); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 70); delete conv1x1; for (auto t : inputs_) delete t; @@ -137,10 +136,10 @@ TEST_F(TestConv1x1Int8, Conv1x1TestPerChannel) { free(correct); } -int Conv1x1Int8TestInit1(std::vector *inputs_, std::vector *outputs_, +int Conv1x1Int8TestInit1(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, int8_t **correct) { - Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, static_cast(1)); - auto in_quant_arg = new mindspore::lite::tensor::QuantArg(); + Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST); + auto in_quant_arg = new mindspore::lite::QuantArg(); in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647; in_t->AddQuantParam(*in_quant_arg); in_t->MallocData(); @@ -148,23 +147,23 @@ int Conv1x1Int8TestInit1(std::vector *inputs_, std::vect 13.71383, 8.055829, 6.5845337, -9.25232, -4.24519, 11.550042, 9.262012, 1.2780352, 6.7263746, -3.9301445, 3.764492, -8.602078, -3.3558068, 13.619035, -2.6694393, 3.2008505}; Quantize(in, in_t->ElementsNum(), in_quant_arg->scale, in_quant_arg->zeroPoint, - reinterpret_cast(in_t->Data())); + reinterpret_cast(in_t->MutableData())); inputs_->push_back(in_t); - Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, static_cast(1)); - auto weight_quant_arg = new mindspore::lite::tensor::QuantArg(); + Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST); + auto weight_quant_arg = new mindspore::lite::QuantArg(); weight_quant_arg->zeroPoint = 66, weight_quant_arg->scale = 0.036439215686275; weight_t->AddQuantParam(*weight_quant_arg); weight_t->MallocData(); float weight[] = {-0.7308652, 0.5257509, -0.87825793, -1.123181, -1.2206168, 0.562695, 1.5382664, -0.5020635, 0.8591602, -0.26410004, 1.1262615, 0.073132955}; Quantize(weight, weight_t->ElementsNum(), weight_quant_arg->scale, weight_quant_arg->zeroPoint, - reinterpret_cast(weight_t->Data())); + reinterpret_cast(weight_t->MutableData())); inputs_->push_back(weight_t); - Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, static_cast(1)); + Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); - auto output_quant_arg = new mindspore::lite::tensor::QuantArg(); + auto output_quant_arg = new mindspore::lite::QuantArg(); output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.234321233; out_t->AddQuantParam(*output_quant_arg); outputs_->push_back(out_t); @@ -184,8 +183,8 @@ int Conv1x1Int8TestInit1(std::vector *inputs_, std::vect } TEST_F(TestConv1x1Int8, Conv1x1Int8Test1) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto conv_param = new ConvParameter(); int8_t *correct; auto ctx = new lite::Context; @@ -196,7 +195,7 @@ TEST_F(TestConv1x1Int8, Conv1x1Int8Test1) { conv1x1->Init(); conv1x1->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 2); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 2); delete conv1x1; for (auto t : inputs_) delete t; @@ -204,42 +203,42 @@ TEST_F(TestConv1x1Int8, Conv1x1Int8Test1) { free(correct); } -int Conv1x1Int8TestInit2(std::vector *inputs_, std::vector *outputs_, +int Conv1x1Int8TestInit2(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, int8_t **correct) { size_t buffer_size; - Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, static_cast(1)); - auto in_quant_arg = new mindspore::lite::tensor::QuantArg(); + Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST); + auto in_quant_arg = new mindspore::lite::QuantArg(); in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647; in_t->AddQuantParam(*in_quant_arg); in_t->MallocData(); std::string input_path = "./input"; auto input = mindspore::lite::ReadFile(input_path.c_str(), &buffer_size); - memcpy(in_t->Data(), input, buffer_size); + memcpy(in_t->MutableData(), input, buffer_size); inputs_->push_back(in_t); delete[] input; - Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, static_cast(1)); - auto weight_quant_arg = new mindspore::lite::tensor::QuantArg(); + Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST); + auto weight_quant_arg = new mindspore::lite::QuantArg(); weight_quant_arg->zeroPoint = 66, weight_quant_arg->scale = 0.036439215686275; weight_t->AddQuantParam(*weight_quant_arg); weight_t->MallocData(); std::string weight_path = "./weight"; auto weight = mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size); - memcpy(weight_t->Data(), weight, buffer_size); + memcpy(weight_t->MutableData(), weight, buffer_size); inputs_->push_back(weight_t); delete[] weight; - Tensor *bias_t = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, static_cast(1)); + Tensor *bias_t = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, lite::Tensor::Category::CONST); weight_t->MallocData(); std::string bias_path = "./bias"; auto bias = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size); - memcpy(bias_t->Data(), bias, buffer_size); + memcpy(bias_t->MutableData(), bias, buffer_size); inputs_->push_back(bias_t); delete[] bias; - Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, static_cast(1)); + Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST); out_t->MallocData(); - auto output_quant_arg = new mindspore::lite::tensor::QuantArg(); + auto output_quant_arg = new mindspore::lite::QuantArg(); output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.234321233; out_t->AddQuantParam(*output_quant_arg); outputs_->push_back(out_t); @@ -259,8 +258,8 @@ int Conv1x1Int8TestInit2(std::vector *inputs_, std::vect } TEST_F(TestConv1x1Int8, Conv1x1Int8Test2) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto conv_param = new ConvParameter(); int8_t *correct; auto ctx = new lite::Context; @@ -271,7 +270,7 @@ TEST_F(TestConv1x1Int8, Conv1x1Int8Test2) { conv1x1->Init(); conv1x1->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 2); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 2); delete conv1x1; for (auto t : inputs_) delete t; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc index f5ed79d72c..6d991674ca 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/nnacl/crop_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" namespace mindspore { @@ -39,25 +39,25 @@ TEST_F(TestCropInt8, crop_1d_axis0_offset0_quant0_thread2) { const int output_size = 7; int8_t output[7]; std::vector output_shape = {7}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -102,25 +102,25 @@ TEST_F(TestCropInt8, crop_2d_axis1_offset0_quant0_thread2) { const int output_size = 14; int8_t output[14]; std::vector output_shape = {2, 7}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -165,25 +165,25 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread0) { const int output_size = 2; int8_t output[2]; std::vector output_shape = {2, 1, 1}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -229,25 +229,25 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread2) { const int output_size = 14; int8_t output[14]; std::vector output_shape = {2, 7, 1}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -292,25 +292,25 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread0) { const int output_size = 1; int8_t output[1]; std::vector output_shape = {1, 1, 1, 1}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -355,25 +355,25 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset0_quant0_thread0) { const int output_size = 2; int8_t output[2]; std::vector output_shape = {2, 1, 1, 1}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -418,25 +418,25 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant0_thread0) { const int output_size = 4; int8_t output[4]; std::vector output_shape = {1, 1, 2, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -484,25 +484,25 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant1_thread0) { const int output_size = 4; int8_t output[4]; std::vector output_shape = {1, 1, 2, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 2.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -552,25 +552,25 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread2) { const int output_size = 7; int8_t output[7]; std::vector output_shape = {1, 7, 1, 1}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -617,25 +617,25 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread3) { const int output_size = 7; int8_t output[7]; std::vector output_shape = {1, 7, 1, 1}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc index 9484e6e528..3147fc8aed 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc @@ -28,8 +28,8 @@ using mindspore::lite::DeviceType; namespace mindspore { -using mindspore::lite::tensor::QuantArg; -using mindspore::lite::tensor::Tensor; +using mindspore::lite::QuantArg; +using mindspore::lite::Tensor; using mindspore::schema::Format_NHWC; using mindspore::schema::NodeType_Parameter; class TestDeconvInt8 : public mindspore::CommonTest { @@ -306,31 +306,31 @@ TEST_F(TestDeconvInt8, PostAddTest1) { CompareOutputData(out, co_relu6, 50, 1); } -int DeConvInt8TestInit1(std::vector *inputs_, std::vector *outputs_, +int DeConvInt8TestInit1(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, int8_t **correct) { /* float data from deconv fp32 testcase : DeConvTestInit2 */ /* vq = (vi - zp) * s vi = vq / s + zp */ - Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 4, 2, 3}, Format_NHWC, NodeType_Parameter); + Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 4, 2, 3}, Format_NHWC, lite::TensorCategory(NodeType_Parameter)); in_t->MallocData(); int8_t in[] = {6, 43, 38, 24, -8, 12, 41, -24, -20, 41, -19, -6, -26, -6, 23, -31, 34, 45, 8, 45, -39, -27, -48, 12}; - memcpy(in_t->Data(), in, sizeof(int8_t) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum()); QuantArg *in_quant_arg = new QuantArg(); in_quant_arg->zeroPoint = -19, in_quant_arg->scale = 0.31228156; in_t->AddQuantParam(*in_quant_arg); inputs_->push_back(in_t); - Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 3, 3, 2}, Format_NHWC, NodeType_Parameter); + Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 3, 3, 2}, Format_NHWC, lite::TensorCategory(NodeType_Parameter)); weight_t->MallocData(); int8_t weight[] = {66, 89, 98, 74, 95, 86, 125, 95, 105, 83, 116, 94, 90, 80, 86, 59, 72, 92, 64, 76, 92, 80, 90, 87, 106, 55, 105, 60, 75, 53, 81, 81, 98, 81, 86, 59, 74, 82, 97, 105, 71, 67, 79, 87, 72, 79, 80, 76, 96, 80, 83, 71, 61, 79}; - memcpy(weight_t->Data(), weight, sizeof(int8_t) * weight_t->ElementsNum()); + memcpy(weight_t->MutableData(), weight, sizeof(int8_t) * weight_t->ElementsNum()); QuantArg *w_quant_arg = new QuantArg(); w_quant_arg->zeroPoint = 83, w_quant_arg->scale = 0.023649725490196; weight_t->AddQuantParam(*w_quant_arg); inputs_->push_back(weight_t); - Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 7, 3, 2}, Format_NHWC, NodeType_Parameter); + Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 7, 3, 2}, Format_NHWC, lite::TensorCategory(NodeType_Parameter)); out_t->MallocData(); QuantArg *out_quant_arg = new QuantArg(); out_quant_arg->zeroPoint = 31, out_quant_arg->scale = 0.3439215686275; @@ -350,8 +350,8 @@ int DeConvInt8TestInit1(std::vector *inputs_, std::vecto } TEST_F(TestDeconvInt8, DeConvInt8Test1) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto deconv_param = new ConvParameter(); lite::Context *ctx = new lite::Context; ctx->thread_num_ = 1; @@ -362,7 +362,7 @@ TEST_F(TestDeconvInt8, DeConvInt8Test1) { deconv->Init(); deconv->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 3); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 3); delete deconv_param; delete deconv; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/div_int8_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/div_int8_test.cc index 4cdb92440c..5a0973e953 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/div_int8_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/div_int8_test.cc @@ -29,9 +29,9 @@ class TestDivInt8 : public mindspore::CommonTest { }; TEST_F(TestDivInt8, DivInt8) { - lite::tensor::Tensor in_tensor0(kNumberTypeInt8, {1, 1, 2, 5}); - lite::tensor::Tensor in_tensor1(kNumberTypeInt8, {1, 1, 2, 5}); - lite::tensor::Tensor out_tensor(kNumberTypeInt8, {1, 1, 2, 5}); + lite::Tensor in_tensor0(kNumberTypeInt8, {1, 1, 2, 5}); + lite::Tensor in_tensor1(kNumberTypeInt8, {1, 1, 2, 5}); + lite::Tensor out_tensor(kNumberTypeInt8, {1, 1, 2, 5}); int8_t input_data0[] = {105, 35, -27, 0, -63, 99, 16, 45, 67, -49}; int8_t input_data1[] = {126, -38, -115, 106, -98, 119, 103, 81, -114, 68}; @@ -40,15 +40,15 @@ TEST_F(TestDivInt8, DivInt8) { in_tensor1.SetData(input_data1); out_tensor.SetData(output_data); - const lite::tensor::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255 - const lite::tensor::QuantArg quant_in1 = {0.00784314f, 0}; - const lite::tensor::QuantArg quant_out = {0.00784314f, 0}; + const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255 + const lite::QuantArg quant_in1 = {0.00784314f, 0}; + const lite::QuantArg quant_out = {0.00784314f, 0}; in_tensor0.AddQuantParam(quant_in0); in_tensor1.AddQuantParam(quant_in1); out_tensor.AddQuantParam(quant_out); - std::vector inputs = {&in_tensor0, &in_tensor1}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor0, &in_tensor1}; + std::vector outputs = {&out_tensor}; OpParameter parameter = {}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Div}; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc index 9584d9fa33..8ca1db2dd3 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc @@ -23,7 +23,7 @@ #include "mindspore/lite/src/lite_kernel.h" namespace mindspore { -using lite::tensor::Tensor; +using lite::Tensor; class TestFcInt8 : public mindspore::CommonTest { public: TestFcInt8() {} @@ -39,19 +39,18 @@ struct TensorInfo { }; extern void QuantProcess(float *input, int len, float min, float max, float *scale, int *zero_point, int8_t *output); -extern lite::tensor::Tensor *MakeQuantTensor(int8_t *data, int len, std::vector *shape, float scale, int zp); +extern lite::Tensor *MakeQuantTensor(int8_t *data, int len, std::vector *shape, float scale, int zp); -lite::tensor::Tensor *MakeIntTensor(int *data, int len, std::vector *shape) { - auto tensor = - new lite::tensor::Tensor(kNumberTypeInt32, *shape, schema::Format_NHWC, static_cast(1)); +lite::Tensor *MakeIntTensor(int *data, int len, std::vector *shape) { + auto tensor = new lite::Tensor(kNumberTypeInt32, *shape, schema::Format_NHWC, lite::Tensor::Category::CONST); tensor->MallocData(); - auto tensor_ptr = reinterpret_cast(tensor->Data()); + auto tensor_ptr = reinterpret_cast(tensor->MutableData()); memcpy(tensor_ptr, data, len * sizeof(int)); return tensor; } -void FcInt8TestInit(std::vector *inputs, std::vector *outputs, - TensorInfo *in, TensorInfo *weight, TensorInfo *bias, TensorInfo *out) { +void FcInt8TestInit(std::vector *inputs, std::vector *outputs, TensorInfo *in, + TensorInfo *weight, TensorInfo *bias, TensorInfo *out) { float in_scale, weight_scale, out_scale; int in_zp, weight_zp, out_zp; int8_t *in_data = new int8_t[in->len]; @@ -131,8 +130,8 @@ TEST_F(TestFcInt8, fctest1) { fc_param->b_transpose_ = true; fc_param->has_bias_ = true; fc_param->act_type_ = ActType_No; - std::vector inputs; - std::vector outputs; + std::vector inputs; + std::vector outputs; FcInt8TestInit(&inputs, &outputs, &in_params, &weight_params, &bias_params, &out_params); auto ctx = new lite::Context; ctx->thread_num_ = 2; @@ -146,7 +145,7 @@ TEST_F(TestFcInt8, fctest1) { int out_zp; QuantProcess(correct, out_params.len, out_params.min, out_params.max, &out_scale, &out_zp, nullptr); float *out = new float[out_params.len]; - Dequantize(reinterpret_cast(outputs[0]->Data()), outputs[0]->ElementsNum(), out_scale, out_zp, out); + Dequantize(reinterpret_cast(outputs[0]->MutableData()), outputs[0]->ElementsNum(), out_scale, out_zp, out); CompareOutputData(out, correct, 6, 0.3); delete fc; for (auto t : inputs) delete t; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc index 53ad94b4ea..265c127f93 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc @@ -32,8 +32,8 @@ TEST_F(TestGatherNdInt8, GatherNdTest) { std::vector in_data1 = {2, 4, 4, 2, 2, 4, 2, 4, 2}; // std::vector in_data1 = {2, 2, 2, 4}; - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; GatherNdParameter op_param; op_param.op_parameter_.type_ = schema::PrimitiveType_GatherNd; @@ -41,18 +41,18 @@ TEST_F(TestGatherNdInt8, GatherNdTest) { std::vector shape = {1, 2, 2, 5}; std::vector out_shape = {1, 3, 5}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 0.5; input_quant_arg.zeroPoint = 1; - lite::tensor::QuantArg input_quant_arg_1; + lite::QuantArg input_quant_arg_1; input_quant_arg_1.scale = 0.5; input_quant_arg_1.zeroPoint = 2; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor input0_tensor; - lite::tensor::Tensor input1_tensor; + lite::Tensor input0_tensor; + lite::Tensor input1_tensor; inputs_tensor.push_back(&input0_tensor); inputs_tensor.push_back(&input1_tensor); @@ -69,7 +69,7 @@ TEST_F(TestGatherNdInt8, GatherNdTest) { std::vector output(15); // std::vector corr_out = {1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0}; std::vector corr_out = {6, 7, 8, 9, 0, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.set_shape(out_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc index 27f955153b..1003b93471 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc @@ -30,8 +30,8 @@ class TestGatherInt8 : public mindspore::CommonTest { TEST_F(TestGatherInt8, GatherTest) { std::vector in_data = {11, 41, 21, 51, 31, 61, -11, -41, -21, -51, -31, -61}; std::vector in_data1 = {4, 2}; - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; GatherParameter op_param; op_param.op_parameter_.type_ = schema::PrimitiveType_Gather; @@ -39,18 +39,18 @@ TEST_F(TestGatherInt8, GatherTest) { op_param.batchDims_ = 1; std::vector shape = {2, 1, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 0.1; input_quant_arg.zeroPoint = 1; - lite::tensor::QuantArg input_quant_arg_1; + lite::QuantArg input_quant_arg_1; input_quant_arg_1.scale = 0.5; input_quant_arg_1.zeroPoint = 2; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 0.1; output_quant_arg.zeroPoint = 1; - lite::tensor::Tensor input0_tensor; - lite::tensor::Tensor input1_tensor; + lite::Tensor input0_tensor; + lite::Tensor input1_tensor; inputs_tensor.push_back(&input0_tensor); inputs_tensor.push_back(&input1_tensor); @@ -67,7 +67,7 @@ TEST_F(TestGatherInt8, GatherTest) { std::vector output(12); // std::vector corr_out = {-18, -22, -16, -21, -14, -19, -22, -34, -24, -35, -26, -36 }; std::vector corr_out = {-11, -41, -21, -51, -31, -61, 11, 41, 21, 51, 31, 61}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.set_shape(shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc index b3df1eab89..46a74a1f2d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc @@ -31,21 +31,21 @@ class TestHSwishInt8 : public mindspore::CommonTest { }; TEST_F(TestHSwishInt8, HSwish) { - lite::tensor::Tensor in_tensor(kNumberTypeInt8, {4, 4}); - lite::tensor::Tensor out_tensor(kNumberTypeInt8, {4, 4}); + lite::Tensor in_tensor(kNumberTypeInt8, {4, 4}); + lite::Tensor out_tensor(kNumberTypeInt8, {4, 4}); int8_t input_data[] = {-116, -105, -93, -35, 23, 35, 46, 104}; // -3.5f, -3.0f, -2.5f, 0.f, 2.5f, 3.0f, 3.5f, 6.0f int8_t output_data[8] = {0}; in_tensor.SetData(input_data); out_tensor.SetData(output_data); - const lite::tensor::QuantArg quant_in = {0.0431373f, -35}; // -4.0 -- 7.0 - const lite::tensor::QuantArg quant_out = {0.0392157f, -52}; // -3.0 -- 7.0 + const lite::QuantArg quant_in = {0.0431373f, -35}; // -4.0 -- 7.0 + const lite::QuantArg quant_out = {0.0392157f, -52}; // -3.0 -- 7.0 in_tensor.AddQuantParam(quant_in); out_tensor.AddQuantParam(quant_out); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor}; ActivationParameter parameter = {0}; parameter.op_parameter_.type_ = schema::PrimitiveType_Activation; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc index 49a93eafb0..9c53eaaea5 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc @@ -46,23 +46,22 @@ void QuantProcess(float *input, int len, float min, float max, float *scale, int } } -lite::tensor::Tensor *MakeQuantTensor(int8_t *data, int len, std::vector *shape, float scale, int zp) { - auto tensor = - new lite::tensor::Tensor(kNumberTypeInt8, *shape, schema::Format_NHWC, static_cast(1)); +lite::Tensor *MakeQuantTensor(int8_t *data, int len, std::vector *shape, float scale, int zp) { + auto tensor = new lite::Tensor(kNumberTypeInt8, *shape, schema::Format_NHWC, lite::Tensor::Category::CONST); tensor->MallocData(); if (data) { - auto tensor_ptr = reinterpret_cast(tensor->Data()); + auto tensor_ptr = reinterpret_cast(tensor->MutableData()); memcpy(tensor_ptr, data, len * sizeof(int8_t)); } - auto quant_arg = new mindspore::lite::tensor::QuantArg(); + auto quant_arg = new mindspore::lite::QuantArg(); quant_arg->zeroPoint = zp; quant_arg->scale = scale; tensor->AddQuantParam(*quant_arg); return tensor; } -void MMInt8TestInit(std::vector *inputs, std::vector *outputs, - TensorInfo *in, TensorInfo *weight, TensorInfo *out) { +void MMInt8TestInit(std::vector *inputs, std::vector *outputs, TensorInfo *in, + TensorInfo *weight, TensorInfo *out) { float in_scale, weight_scale, out_scale; int in_zp, weight_zp, out_zp; int8_t *in_data = new int8_t[in->len]; @@ -171,8 +170,8 @@ TEST_F(TestMatmulInt8, mmtest1) { matmul_param->a_transpose_ = false; matmul_param->b_transpose_ = true; matmul_param->has_bias_ = false; - std::vector inputs; - std::vector outputs; + std::vector inputs; + std::vector outputs; MMInt8TestInit(&inputs, &outputs, &in_params, &weight_params, &out_params); auto ctx = new lite::Context; ctx->thread_num_ = 1; @@ -185,7 +184,7 @@ TEST_F(TestMatmulInt8, mmtest1) { int out_zp; QuantProcess(correct, out_params.len, out_params.min, out_params.max, &out_scale, &out_zp, nullptr); float *out = new float[out_params.len]; - Dequantize(reinterpret_cast(outputs[0]->Data()), outputs[0]->ElementsNum(), out_scale, out_zp, out); + Dequantize(reinterpret_cast(outputs[0]->MutableData()), outputs[0]->ElementsNum(), out_scale, out_zp, out); CompareOutputData(out, correct, 6, 0.3); delete mm; for (auto t : inputs) delete t; @@ -287,8 +286,8 @@ TEST_F(TestMatmulInt8, mmtest2) { matmul_param->a_transpose_ = false; matmul_param->b_transpose_ = false; matmul_param->has_bias_ = false; - std::vector inputs; - std::vector outputs; + std::vector inputs; + std::vector outputs; MMInt8TestInit(&inputs, &outputs, &in_params, &weight_params, &out_params); auto ctx = new lite::Context; ctx->thread_num_ = 2; @@ -301,7 +300,7 @@ TEST_F(TestMatmulInt8, mmtest2) { int out_zp; QuantProcess(correct, out_params.len, out_params.min, out_params.max, &out_scale, &out_zp, nullptr); float *out = new float[out_params.len]; - Dequantize(reinterpret_cast(outputs[0]->Data()), outputs[0]->ElementsNum(), out_scale, out_zp, out); + Dequantize(reinterpret_cast(outputs[0]->MutableData()), outputs[0]->ElementsNum(), out_scale, out_zp, out); CompareOutputData(out, correct, 6, 0.6); delete mm; for (auto t : inputs) delete t; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc index a818c536ba..161aab6cf1 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/nnacl/mul_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" namespace mindspore { @@ -42,32 +42,32 @@ TEST_F(TestMulInt8, Mul_quant0) { int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - lite::tensor::Tensor *input_tensor2 = new lite::tensor::Tensor; + lite::Tensor *input_tensor2 = new lite::Tensor; input_tensor2->SetData(input2.data()); input_tensor2->set_shape(shape2); input_tensor2->AddQuantParam(input_quant_arg); input_tensor2->set_data_type(tid_int8); - std::vector inputs_tensor(2); + std::vector inputs_tensor(2); inputs_tensor[0] = input_tensor1; inputs_tensor[1] = input_tensor2; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -112,32 +112,32 @@ TEST_F(TestMulInt8, Mul_quant0_thread0) { int8_t output[18]; std::vector output_shape = {2, 3, 3}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - lite::tensor::Tensor *input_tensor2 = new lite::tensor::Tensor; + lite::Tensor *input_tensor2 = new lite::Tensor; input_tensor2->SetData(input2.data()); input_tensor2->set_shape(shape2); input_tensor2->AddQuantParam(input_quant_arg); input_tensor2->set_data_type(tid_int8); - std::vector inputs_tensor(2); + std::vector inputs_tensor(2); inputs_tensor[0] = input_tensor1; inputs_tensor[1] = input_tensor2; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -182,32 +182,32 @@ TEST_F(TestMulInt8, Mul_quant1) { int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 2.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - lite::tensor::Tensor *input_tensor2 = new lite::tensor::Tensor; + lite::Tensor *input_tensor2 = new lite::Tensor; input_tensor2->SetData(input2.data()); input_tensor2->set_shape(shape2); input_tensor2->AddQuantParam(input_quant_arg); input_tensor2->set_data_type(tid_int8); - std::vector inputs_tensor(2); + std::vector inputs_tensor(2); inputs_tensor[0] = input_tensor1; inputs_tensor[1] = input_tensor2; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -252,32 +252,32 @@ TEST_F(TestMulInt8, Mul_quant1_thread1) { int8_t output[12]; std::vector output_shape = {2, 3, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 2.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - lite::tensor::Tensor *input_tensor2 = new lite::tensor::Tensor; + lite::Tensor *input_tensor2 = new lite::Tensor; input_tensor2->SetData(input2.data()); input_tensor2->set_shape(shape2); input_tensor2->AddQuantParam(input_quant_arg); input_tensor2->set_data_type(tid_int8); - std::vector inputs_tensor(2); + std::vector inputs_tensor(2); inputs_tensor[0] = input_tensor1; inputs_tensor[1] = input_tensor2; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc index 373cafa471..ad4bc33b22 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc @@ -17,16 +17,16 @@ #include #include "schema/inner/model_generated.h" #include "include/context.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "common/common_test.h" #include "src/common/file_utils.h" #include "nnacl/pad_parameter.h" #include "src/runtime/kernel/arm/int8/pad_int8.h" namespace mindspore { -using mindspore::lite::tensor::QuantArg; -using mindspore::lite::tensor::Tensor; - +using mindspore::lite::QuantArg; +using mindspore::lite::Tensor; +using mindspore::schema::NodeType_Parameter; class TestPadInt8 : public mindspore::CommonTest { public: TestPadInt8() {} @@ -34,16 +34,16 @@ class TestPadInt8 : public mindspore::CommonTest { int PadInt8TestInit1(std::vector *inputs_, std::vector *outputs_, PadParameter *pad_param, int8_t **correct) { - Tensor *in_t = new Tensor(kNumberTypeInt8, {3}, schema::Format_NHWC, schema::NodeType_Parameter); + Tensor *in_t = new Tensor(kNumberTypeInt8, {3}, schema::Format_NHWC, lite::TensorCategory(NodeType_Parameter)); in_t->MallocData(); int8_t in[] = {1, 1, 1}; - memcpy(in_t->Data(), in, sizeof(int8_t) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum()); QuantArg *in_quant_arg = new QuantArg(); in_quant_arg->zeroPoint = 10, in_quant_arg->scale = 0.31228156; in_t->AddQuantParam(*in_quant_arg); inputs_->push_back(in_t); - Tensor *out_t = new Tensor(kNumberTypeInt8, {7}, schema::Format_NHWC, schema::NodeType_Parameter); + Tensor *out_t = new Tensor(kNumberTypeInt8, {7}, schema::Format_NHWC, lite::TensorCategory(NodeType_Parameter)); out_t->MallocData(); QuantArg *out_quant_arg = new QuantArg(); out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156; @@ -62,8 +62,8 @@ int PadInt8TestInit1(std::vector *inputs_, std::vector *outp } TEST_F(TestPadInt8, PadInt8Test1) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto pad_param = new PadParameter(); lite::Context *ctx = new lite::Context; int8_t *correct; @@ -73,7 +73,7 @@ TEST_F(TestPadInt8, PadInt8Test1) { pad->Init(); pad->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0); delete pad_param; delete pad; @@ -84,16 +84,16 @@ TEST_F(TestPadInt8, PadInt8Test1) { int PadInt8TestInit2(std::vector *inputs_, std::vector *outputs_, PadParameter *pad_param, int8_t **correct) { - Tensor *in_t = new Tensor(kNumberTypeInt8, {6, 2}, schema::Format_NHWC, schema::NodeType_Parameter); + Tensor *in_t = new Tensor(kNumberTypeInt8, {6, 2}, schema::Format_NHWC, lite::TensorCategory(NodeType_Parameter)); in_t->MallocData(); int8_t in[] = {18, 71, 99, -6, 5, -119, 86, 13, 15, -85, -41, -77}; - memcpy(in_t->Data(), in, sizeof(int8_t) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum()); QuantArg *in_quant_arg = new QuantArg(); in_quant_arg->zeroPoint = 10, in_quant_arg->scale = 0.31228156; in_t->AddQuantParam(*in_quant_arg); inputs_->push_back(in_t); - Tensor *out_t = new Tensor(kNumberTypeInt8, {10, 5}, schema::Format_NHWC, schema::NodeType_Parameter); + Tensor *out_t = new Tensor(kNumberTypeInt8, {10, 5}, schema::Format_NHWC, lite::TensorCategory(NodeType_Parameter)); out_t->MallocData(); QuantArg *out_quant_arg = new QuantArg(); out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156; @@ -114,8 +114,8 @@ int PadInt8TestInit2(std::vector *inputs_, std::vector *outp } TEST_F(TestPadInt8, PadInt8Test2) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto pad_param = new PadParameter(); lite::Context *ctx = new lite::Context; int8_t *correct; @@ -125,7 +125,7 @@ TEST_F(TestPadInt8, PadInt8Test2) { pad->Init(); pad->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0); delete pad_param; delete pad; @@ -136,16 +136,18 @@ TEST_F(TestPadInt8, PadInt8Test2) { int PadInt8TestInit4(std::vector *inputs_, std::vector *outputs_, PadParameter *pad_param, int8_t **correct) { - Tensor *in_t = new Tensor(kNumberTypeInt8, {2, 3, 2, 1}, schema::Format_NHWC, schema::NodeType_Parameter); + Tensor *in_t = + new Tensor(kNumberTypeInt8, {2, 3, 2, 1}, schema::Format_NHWC, lite::TensorCategory(NodeType_Parameter)); in_t->MallocData(); int8_t in[] = {73, 24, 7, -31, -109, -2, 69, -64, 51, -45, 38, 53}; - memcpy(in_t->Data(), in, sizeof(int8_t) * in_t->ElementsNum()); + memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum()); QuantArg *in_quant_arg = new QuantArg(); in_quant_arg->zeroPoint = 10, in_quant_arg->scale = 0.31228156; in_t->AddQuantParam(*in_quant_arg); inputs_->push_back(in_t); - Tensor *out_t = new Tensor(kNumberTypeInt8, {6, 6, 4, 3}, schema::Format_NHWC, schema::NodeType_Parameter); + Tensor *out_t = + new Tensor(kNumberTypeInt8, {6, 6, 4, 3}, schema::Format_NHWC, lite::TensorCategory(NodeType_Parameter)); out_t->MallocData(); QuantArg *out_quant_arg = new QuantArg(); out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156; @@ -180,8 +182,8 @@ int PadInt8TestInit4(std::vector *inputs_, std::vector *outp } TEST_F(TestPadInt8, PadInt8TestInit4) { - std::vector inputs_; - std::vector outputs_; + std::vector inputs_; + std::vector outputs_; auto pad_param = new PadParameter(); lite::Context *ctx = new lite::Context; ctx->thread_num_ = 2; @@ -192,7 +194,7 @@ TEST_F(TestPadInt8, PadInt8TestInit4) { pad->Init(); pad->Run(); - CompareOutputData(reinterpret_cast(outputs_[0]->Data()), correct, total_size, 0); + CompareOutputData(reinterpret_cast(outputs_[0]->MutableData()), correct, total_size, 0); delete pad_param; delete pad; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc index 91eaac5a40..895963e64e 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc @@ -30,8 +30,8 @@ class TestPowerInt8 : public mindspore::CommonTest { }; TEST_F(TestPowerInt8, PowerInt8) { - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; PowerParameter op_param; op_param.op_parameter_.type_ = schema::PrimitiveType_Power; @@ -39,17 +39,17 @@ TEST_F(TestPowerInt8, PowerInt8) { op_param.scale_ = 1; op_param.shift_ = 0; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 0.0156863; input_quant_arg.zeroPoint = -128; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 0.0627451; output_quant_arg.zeroPoint = -128; std::vector input = {-64, -1, 63, 127}; std::vector in_shape = {1, 1, 1, 4}; - lite::tensor::Tensor input0_tensor; + lite::Tensor input0_tensor; TypeId tid_int8 = kNumberTypeInt8; inputs_tensor.push_back(&input0_tensor); input0_tensor.SetData(input.data()); @@ -60,7 +60,7 @@ TEST_F(TestPowerInt8, PowerInt8) { std::vector output(4); std::vector output_shape = {1, 1, 1, 4}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.AddQuantParam(output_quant_arg); @@ -85,23 +85,23 @@ TEST_F(TestPowerInt8, PowerInt8) { } TEST_F(TestPowerInt8, normal) { - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; PowerParameter op_param; op_param.op_parameter_.type_ = schema::PrimitiveType_Power; op_param.scale_ = 1; op_param.shift_ = 0; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 0.0156863; input_quant_arg.zeroPoint = -128; - lite::tensor::QuantArg exp_quant_arg; + lite::QuantArg exp_quant_arg; exp_quant_arg.scale = 0.0156863; exp_quant_arg.zeroPoint = -128; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 0.0352941; output_quant_arg.zeroPoint = -128; @@ -111,7 +111,7 @@ TEST_F(TestPowerInt8, normal) { std::vector input1 = {127, 63, -1, -64}; std::vector in_shape1 = {1, 1, 1, 4}; - lite::tensor::Tensor input0_tensor, input1_tensor; + lite::Tensor input0_tensor, input1_tensor; TypeId tid_int8 = kNumberTypeInt8; inputs_tensor.push_back(&input0_tensor); inputs_tensor.push_back(&input1_tensor); @@ -128,7 +128,7 @@ TEST_F(TestPowerInt8, normal) { std::vector output(4); std::vector output_shape = {1, 1, 1, 4}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.AddQuantParam(output_quant_arg); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc index adbf65265d..7e6ea228d2 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/nnacl/quantization/quantize.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" namespace mindspore { @@ -39,25 +39,25 @@ TEST_F(TestPreluInt8, prelu_1) { const int output_size = 8; int8_t output[8]; std::vector output_shape = {8}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc index 2fa92efaa6..4d80fb5ab1 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc @@ -31,7 +31,7 @@ class QuantDTypeCastTestFp32 : public mindspore::CommonTest { }; TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) { - const lite::tensor::QuantArg quant_arg{0.21176, 5}; + const lite::QuantArg quant_arg{0.21176, 5}; QuantDTypeCastParameter param; param.srcT = kNumberTypeInt8; param.dstT = kNumberTypeFloat32; @@ -39,14 +39,14 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) { std::vector input = {10, 14, 29, 33, 52, 99, 19, 43, 90, 52, 19, 24, 57, 127, 76, 123}; std::vector in_shape = {1, 4, 4, 1}; - lite::tensor::Tensor input_tensor; + lite::Tensor input_tensor; input_tensor.SetData(input.data()); input_tensor.set_shape(in_shape); input_tensor.set_data_type(kNumberTypeInt8); input_tensor.SetFormat(schema::Format_NHWC); input_tensor.AddQuantParam(quant_arg); - std::vector inputs_tensor; + std::vector inputs_tensor; inputs_tensor.emplace_back(&input_tensor); const int out_size = 16; @@ -54,12 +54,12 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) { 20.1172, 12.07032, 5.082240, 6.14104, 13.12912, 27.95232, 17.15256, 27.10528}; std::vector output(16); std::vector out_shape = {1, 4, 4, 1}; - lite::tensor::Tensor output_tensor; + lite::Tensor output_tensor; output_tensor.SetData(output.data()); output_tensor.set_shape(out_shape); output_tensor.set_data_type(kNumberTypeFloat32); // output_tensor.SetFormat(schema::Format_NHWC); - std::vector outputs_tensor; + std::vector outputs_tensor; outputs_tensor.emplace_back(&output_tensor); lite::Context ctx; @@ -80,32 +80,32 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) { } TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest2) { - const lite::tensor::QuantArg quant_arg = {0.3515625, -57}; + const lite::QuantArg quant_arg = {0.3515625, -57}; QuantDTypeCastParameter param; param.op_parameter_.type_ = schema::PrimitiveType_QuantDTypeCast; param.dstT = kNumberTypeInt8; param.srcT = kNumberTypeFloat32; std::vector input = {1, 2, 5, 6, 10, -20, 3, 8, 18, 10, 3, 4, 11, 16, 15, 25}; std::vector in_shape = {1, 4, 4, 1}; - lite::tensor::Tensor input_tensor; + lite::Tensor input_tensor; input_tensor.SetData(input.data()); input_tensor.set_shape(in_shape); // input_tensor.SetFormat(schema::Format_NHWC); input_tensor.set_data_type(kNumberTypeFloat32); input_tensor.AddQuantParam(quant_arg); - std::vector inputs_tensor; + std::vector inputs_tensor; inputs_tensor.emplace_back(&input_tensor); const int out_size = 16; int8_t expect_out[16] = {-54, -51, -43, -40, -29, -114, -48, -34, -6, -29, -48, -46, -26, -11, -14, 14}; std::vector output(16); std::vector out_shape = {1, 4, 4, 1}; - lite::tensor::Tensor output_tensor; + lite::Tensor output_tensor; output_tensor.SetData(output.data()); output_tensor.set_shape(out_shape); output_tensor.SetFormat(schema::Format_NHWC); output_tensor.set_data_type(kNumberTypeInt8); - std::vector outputs_tensor; + std::vector outputs_tensor; outputs_tensor.emplace_back(&output_tensor); lite::Context ctx; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc index 59e0f08032..4f4a189cdf 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc @@ -17,13 +17,13 @@ #include "schema/inner/model_generated.h" #include "utils/log_adapter.h" #include "common/common_test.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "mindspore/lite/src/kernel_registry.h" #include "nnacl/fp32/reduce.h" namespace mindspore { -using mindspore::lite::tensor::QuantArg; -using mindspore::lite::tensor::Tensor; +using mindspore::lite::QuantArg; +using mindspore::lite::Tensor; using mindspore::schema::ReduceMode; using mindspore::schema::ReduceMode_ReduceMax; using mindspore::schema::ReduceMode_ReduceMean; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc index c584bfa310..d25c4501fd 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc @@ -29,21 +29,21 @@ class TestReluXInt8 : public mindspore::CommonTest { }; TEST_F(TestReluXInt8, Relu) { - lite::tensor::Tensor in_tensor(kNumberTypeInt8, {2, 2}); - lite::tensor::Tensor out_tensor(kNumberTypeInt8, {2, 2}); + lite::Tensor in_tensor(kNumberTypeInt8, {2, 2}); + lite::Tensor out_tensor(kNumberTypeInt8, {2, 2}); int8_t input_data[] = {-102, 25, -51, 89}; // -0.8 0.2 -0.4 0.7 int8_t output_data[4] = {0}; in_tensor.SetData(input_data); out_tensor.SetData(output_data); - const lite::tensor::QuantArg quant_in = {0.00784314f, 0}; // -1.0--1.0 -> - const lite::tensor::QuantArg quant_out = {0.00784314f, 0}; + const lite::QuantArg quant_in = {0.00784314f, 0}; // -1.0--1.0 -> + const lite::QuantArg quant_out = {0.00784314f, 0}; in_tensor.AddQuantParam(quant_in); out_tensor.AddQuantParam(quant_out); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor}; ActivationParameter parameter = {0}; parameter.op_parameter_.type_ = schema::PrimitiveType_Activation; @@ -71,8 +71,8 @@ TEST_F(TestReluXInt8, Relu) { } TEST_F(TestReluXInt8, Relu6) { - lite::tensor::Tensor in_tensor(kNumberTypeInt8, {2, 4}); - lite::tensor::Tensor out_tensor(kNumberTypeInt8, {2, 4}); + lite::Tensor in_tensor(kNumberTypeInt8, {2, 4}); + lite::Tensor out_tensor(kNumberTypeInt8, {2, 4}); // -2.5f, -1.5f, 1.25f, 3.0f, 4.5f, 6.0f, 6.5f, 9.0f int8_t input_data[] = {-118, -98, -44, -10, 19, 49, 59, 108}; @@ -80,13 +80,13 @@ TEST_F(TestReluXInt8, Relu6) { in_tensor.SetData(input_data); out_tensor.SetData(output_data); - const lite::tensor::QuantArg quant_in = {0.0509804f, -69}; // -3.0 -- 10.0 - const lite::tensor::QuantArg quant_out = {0.0392157f, -128}; // 0.0 -- 10.0 + const lite::QuantArg quant_in = {0.0509804f, -69}; // -3.0 -- 10.0 + const lite::QuantArg quant_out = {0.0392157f, -128}; // 0.0 -- 10.0 in_tensor.AddQuantParam(quant_in); out_tensor.AddQuantParam(quant_out); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor}; ActivationParameter parameter = {0}; parameter.op_parameter_.type_ = schema::PrimitiveType_Activation; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc index a6313f8d19..7e3010da51 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/nnacl/reshape_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" namespace mindspore { @@ -38,25 +38,25 @@ TEST_F(TestReshapeInt8, reshape_quant0) { int8_t output[12]; std::vector output_shape = {2, 6}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); @@ -97,25 +97,25 @@ TEST_F(TestReshapeInt8, reshape_quant1_thread2) { int8_t output[12]; std::vector output_shape = {2, 6}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 2.0; output_quant_arg.zeroPoint = 1; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc index c7d8a2aaba..796c0115d6 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc @@ -17,14 +17,14 @@ #include #include "schema/inner/model_generated.h" #include "include/context.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "common/common_test.h" #include "mindspore/lite/src/kernel_registry.h" #include "nnacl/int8/resize.h" namespace mindspore { -using mindspore::lite::tensor::QuantArg; -using mindspore::lite::tensor::Tensor; +using mindspore::lite::QuantArg; +using mindspore::lite::Tensor; class TestResizeBilinearInt8 : public mindspore::CommonTest { public: @@ -33,11 +33,11 @@ class TestResizeBilinearInt8 : public mindspore::CommonTest { void Prepare(const std::vector &in_shape, const std::vector &out_shape, int8_t *input_data, int8_t *output_data, const QuantArg quant_in, const QuantArg quant_out, const bool align_corners, const int thread_num); - std::vector inputs; - std::vector outputs; + std::vector inputs; + std::vector outputs; ResizeParameter param_ = {}; - lite::tensor::Tensor in_tensor; - lite::tensor::Tensor out_tensor; + lite::Tensor in_tensor; + lite::Tensor out_tensor; kernel::KernelKey desc_ = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Resize}; kernel::KernelCreator creator_ = nullptr; @@ -84,8 +84,8 @@ TEST_F(TestResizeBilinearInt8, Bilinear0) { int8_t output_data[16] = {0}; std::vector in_shape = {1, 2, 2, 1}; std::vector out_shape = {1, 4, 4, 1}; - const lite::tensor::QuantArg quant_in = {0.005f, 0}; - const lite::tensor::QuantArg quant_out = {0.008f, 0}; + const lite::QuantArg quant_in = {0.005f, 0}; + const lite::QuantArg quant_out = {0.008f, 0}; bool align_corners = false; int thread_num = 1; int8_t expect[16] = {0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 2, 2, 2}; @@ -103,8 +103,8 @@ TEST_F(TestResizeBilinearInt8, Bilinear1) { int8_t input_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}; int8_t output_data[160] = {0}; - const lite::tensor::QuantArg quant_in = {0.005f, 0}; - const lite::tensor::QuantArg quant_out = {0.008f, 0}; + const lite::QuantArg quant_in = {0.005f, 0}; + const lite::QuantArg quant_out = {0.008f, 0}; int thread_num = 1; bool align_corners = false; int8_t expect[160] = {0, 1, 1, 2, 2, 2, 2, 3, 3, 4, 3, 4, 4, 5, 6, 3, 4, 4, 5, 6, 3, 4, 4, @@ -129,8 +129,8 @@ TEST_F(TestResizeBilinearInt8, Bilinear2) { 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}; int8_t output_data[160] = {0}; - const lite::tensor::QuantArg quant_in = {0.005f, 0}; - const lite::tensor::QuantArg quant_out = {0.008f, 0}; + const lite::QuantArg quant_in = {0.005f, 0}; + const lite::QuantArg quant_out = {0.008f, 0}; int thread_num = 2; bool align_corners = true; int8_t expect[160] = {0, 1, 1, 2, 2, 1, 2, 2, 3, 4, 2, 3, 3, 4, 5, 3, 4, 4, 5, 6, 2, 3, 3, @@ -155,8 +155,8 @@ TEST_F(TestResizeBilinearInt8, Bilinear3) { 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}; int8_t output_data[160] = {0}; - const lite::tensor::QuantArg quant_in = {0.005f, 2}; - const lite::tensor::QuantArg quant_out = {0.005f, 2}; + const lite::QuantArg quant_in = {0.005f, 2}; + const lite::QuantArg quant_out = {0.005f, 2}; int thread_num = 2; bool align_corners = true; int8_t expect[160] = {0, 1, 2, 3, 4, 2, 3, 4, 5, 6, 3, 4, 5, 6, 7, 5, 6, 7, 8, 9, 3, 4, 5, diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc index 5189cbb7e7..a2777e77d9 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc @@ -17,14 +17,14 @@ #include #include "schema/inner/model_generated.h" #include "include/context.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "common/common_test.h" #include "mindspore/lite/src/kernel_registry.h" #include "nnacl/int8/resize.h" namespace mindspore { -using mindspore::lite::tensor::QuantArg; -using mindspore::lite::tensor::Tensor; +using mindspore::lite::QuantArg; +using mindspore::lite::Tensor; class TestResizeNearestNeighborInt8 : public mindspore::CommonTest { public: @@ -34,11 +34,11 @@ class TestResizeNearestNeighborInt8 : public mindspore::CommonTest { const int thread_num); void TearDown() override; - std::vector inputs; - std::vector outputs; + std::vector inputs; + std::vector outputs; ResizeParameter param_ = {}; - lite::tensor::Tensor in_tensor; - lite::tensor::Tensor out_tensor; + lite::Tensor in_tensor; + lite::Tensor out_tensor; kernel::KernelKey desc_ = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Resize}; kernel::KernelCreator creator_ = nullptr; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc index 152c2549ce..75cb7af07f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc @@ -28,21 +28,21 @@ class TestSigmoidInt8 : public mindspore::CommonTest { }; TEST_F(TestSigmoidInt8, Sigmoid) { - lite::tensor::Tensor in_tensor(kNumberTypeInt8, {4, 4}); - lite::tensor::Tensor out_tensor(kNumberTypeInt8, {4, 4}); + lite::Tensor in_tensor(kNumberTypeInt8, {4, 4}); + lite::Tensor out_tensor(kNumberTypeInt8, {4, 4}); int8_t input_data[] = {0, 0, 0, 0, 1, 1, 1, 1}; // -3.5f, -3.0f, -2.5f, 0.f, 2.5f, 3.0f, 3.5f, 6.0f int8_t output_data[8] = {0}; in_tensor.SetData(input_data); out_tensor.SetData(output_data); - const lite::tensor::QuantArg quant_in = {1.0, 0}; // -4.0 -- 7.0 - const lite::tensor::QuantArg quant_out = {1.0, 0}; // -3.0 -- 7.0 + const lite::QuantArg quant_in = {1.0, 0}; // -4.0 -- 7.0 + const lite::QuantArg quant_out = {1.0, 0}; // -3.0 -- 7.0 in_tensor.AddQuantParam(quant_in); out_tensor.AddQuantParam(quant_out); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor}; ActivationParameter parameter = {0}; parameter.op_parameter_.type_ = schema::PrimitiveType_Activation; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc index 50857b264e..6940779e2c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc @@ -29,21 +29,21 @@ class TestSliceInt8 : public mindspore::CommonTest { }; TEST_F(TestSliceInt8, SliceInt8) { - lite::tensor::Tensor in_tensor(kNumberTypeInt8, {1, 3, 2, 3}); - lite::tensor::Tensor out_tensor(kNumberTypeInt8, {1, 1, 2, 3}); + lite::Tensor in_tensor(kNumberTypeInt8, {1, 3, 2, 3}); + lite::Tensor out_tensor(kNumberTypeInt8, {1, 1, 2, 3}); int8_t input_data[] = {105, 35, -27, 0, -63, 99, 16, 45, 67, -49, -115, 106, -98, 119, 103, 81, -114, 68}; int8_t output_data[12]; in_tensor.SetData(input_data); out_tensor.SetData(output_data); - const lite::tensor::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255 - const lite::tensor::QuantArg quant_out = {0.00784314f, 0}; + const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255 + const lite::QuantArg quant_out = {0.00784314f, 0}; in_tensor.AddQuantParam(quant_in0); out_tensor.AddQuantParam(quant_out); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor}; SliceParameter parameter; parameter.begin_[0] = 1; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc index e3363cb752..b69ee4d133 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc @@ -30,8 +30,8 @@ class TestSoftmaxInt8 : public mindspore::CommonTest { }; TEST_F(TestSoftmaxInt8, SoftmaxInt8) { - std::vector inputs_tensor; - std::vector outputs_tensor; + std::vector inputs_tensor; + std::vector outputs_tensor; SoftmaxParameter op_param; op_param.op_parameter_.type_ = schema::PrimitiveType_SoftMax; @@ -42,10 +42,10 @@ TEST_F(TestSoftmaxInt8, SoftmaxInt8) { op_param.input_shape_[2] = 3; op_param.input_shape_[3] = 4; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 0.0352941; input_quant_arg.zeroPoint = -128; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 0.00392157; output_quant_arg.zeroPoint = -128; @@ -53,7 +53,7 @@ TEST_F(TestSoftmaxInt8, SoftmaxInt8) { -100, -71, -43, -15, 14, 42, 70, 99, 42, 70, 99, 127}; std::vector in_shape = {1, 2, 3, 4}; - lite::tensor::Tensor input0_tensor; + lite::Tensor input0_tensor; TypeId tid_int8 = kNumberTypeInt8; inputs_tensor.push_back(&input0_tensor); input0_tensor.SetData(input.data()); @@ -64,7 +64,7 @@ TEST_F(TestSoftmaxInt8, SoftmaxInt8) { std::vector output(24); std::vector output_shape = {1, 2, 3, 4}; - lite::tensor::Tensor output0_tensor; + lite::Tensor output0_tensor; outputs_tensor.push_back(&output0_tensor); output0_tensor.SetData(output.data()); output0_tensor.AddQuantParam(output_quant_arg); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc index 348ae0f0df..51f5446edf 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/nnacl/split_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" namespace mindspore { @@ -43,33 +43,33 @@ TEST_F(TestSplitInt8, Split_quant0_thread2) { std::vector output1_shape = {2, 1, 2}; std::vector output2_shape = {2, 2, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output1_tensor = new lite::tensor::Tensor; + lite::Tensor *output1_tensor = new lite::Tensor; output1_tensor->SetData(output1); output1_tensor->set_shape(output1_shape); output1_tensor->AddQuantParam(output_quant_arg); output1_tensor->set_data_type(tid_int8); - lite::tensor::Tensor *output2_tensor = new lite::tensor::Tensor; + lite::Tensor *output2_tensor = new lite::Tensor; output2_tensor->SetData(output2); output2_tensor->set_shape(output2_shape); output2_tensor->AddQuantParam(output_quant_arg); output2_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(2); + std::vector outputs_tensor(2); outputs_tensor[0] = output1_tensor; outputs_tensor[1] = output2_tensor; @@ -127,38 +127,38 @@ TEST_F(TestSplitInt8, Split_quant0_thread2_num) { std::vector output2_shape = {2, 1, 2}; std::vector output3_shape = {2, 1, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output1_tensor = new lite::tensor::Tensor; + lite::Tensor *output1_tensor = new lite::Tensor; output1_tensor->SetData(output1); output1_tensor->set_shape(output1_shape); output1_tensor->AddQuantParam(output_quant_arg); output1_tensor->set_data_type(tid_int8); - lite::tensor::Tensor *output2_tensor = new lite::tensor::Tensor; + lite::Tensor *output2_tensor = new lite::Tensor; output2_tensor->SetData(output2); output2_tensor->set_shape(output2_shape); output2_tensor->AddQuantParam(output_quant_arg); output2_tensor->set_data_type(tid_int8); - lite::tensor::Tensor *output3_tensor = new lite::tensor::Tensor; + lite::Tensor *output3_tensor = new lite::Tensor; output3_tensor->SetData(output3); output3_tensor->set_shape(output3_shape); output3_tensor->AddQuantParam(output_quant_arg); output3_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(3); + std::vector outputs_tensor(3); outputs_tensor[0] = output1_tensor; outputs_tensor[1] = output2_tensor; outputs_tensor[2] = output3_tensor; @@ -223,38 +223,38 @@ TEST_F(TestSplitInt8, Split_quant1_thread2_num) { std::vector output2_shape = {2, 1, 2}; std::vector output3_shape = {2, 1, 2}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 2.0; output_quant_arg.zeroPoint = 0; TypeId tid_int8 = kNumberTypeInt8; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - lite::tensor::Tensor *output1_tensor = new lite::tensor::Tensor; + lite::Tensor *output1_tensor = new lite::Tensor; output1_tensor->SetData(output1); output1_tensor->set_shape(output1_shape); output1_tensor->AddQuantParam(output_quant_arg); output1_tensor->set_data_type(tid_int8); - lite::tensor::Tensor *output2_tensor = new lite::tensor::Tensor; + lite::Tensor *output2_tensor = new lite::Tensor; output2_tensor->SetData(output2); output2_tensor->set_shape(output2_shape); output2_tensor->AddQuantParam(output_quant_arg); output2_tensor->set_data_type(tid_int8); - lite::tensor::Tensor *output3_tensor = new lite::tensor::Tensor; + lite::Tensor *output3_tensor = new lite::Tensor; output3_tensor->SetData(output3); output3_tensor->set_shape(output3_shape); output3_tensor->AddQuantParam(output_quant_arg); output3_tensor->set_data_type(tid_int8); - std::vector outputs_tensor(3); + std::vector outputs_tensor(3); outputs_tensor[0] = output1_tensor; outputs_tensor[1] = output2_tensor; outputs_tensor[2] = output3_tensor; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc index fb52a780d8..330cf010bf 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/nnacl/squeeze_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" namespace mindspore { @@ -39,25 +39,25 @@ TEST_F(TestSqueezeInt8, Squeeze_1d_axis0_offset0_quant0_thread2) { const int output_size = 8; int8_t output[8]; std::vector output_shape = {8}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc index 9db4874978..a19b5e426b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc @@ -29,9 +29,9 @@ class TestSubInt8 : public mindspore::CommonTest { }; TEST_F(TestSubInt8, SubInt8) { - lite::tensor::Tensor in_tensor0(kNumberTypeInt8, {1, 1, 2, 5}); - lite::tensor::Tensor in_tensor1(kNumberTypeInt8, {1, 1, 1, 5}); - lite::tensor::Tensor out_tensor(kNumberTypeInt8, {1, 1, 2, 5}); + lite::Tensor in_tensor0(kNumberTypeInt8, {1, 1, 2, 5}); + lite::Tensor in_tensor1(kNumberTypeInt8, {1, 1, 1, 5}); + lite::Tensor out_tensor(kNumberTypeInt8, {1, 1, 2, 5}); int8_t input_data0[] = {105, 35, -27, 0, -63, 99, 16, 122, 67, -49}; int8_t input_data1[] = {24, -38, -115, 106, -98}; @@ -40,15 +40,15 @@ TEST_F(TestSubInt8, SubInt8) { in_tensor1.SetData(input_data1); out_tensor.SetData(output_data); - const lite::tensor::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255 - const lite::tensor::QuantArg quant_in1 = {0.00784314f, 0}; - const lite::tensor::QuantArg quant_out = {0.00784314f, 0}; + const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255 + const lite::QuantArg quant_in1 = {0.00784314f, 0}; + const lite::QuantArg quant_out = {0.00784314f, 0}; in_tensor0.AddQuantParam(quant_in0); in_tensor1.AddQuantParam(quant_in1); out_tensor.AddQuantParam(quant_out); - std::vector inputs = {&in_tensor0, &in_tensor1}; - std::vector outputs = {&out_tensor}; + std::vector inputs = {&in_tensor0, &in_tensor1}; + std::vector outputs = {&out_tensor}; OpParameter parameter = {}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Sub}; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc index 4ef7ba3631..541d25907d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc @@ -28,17 +28,17 @@ class TestTopKInt8 : public mindspore::CommonTest { }; TEST_F(TestTopKInt8, TopK) { - lite::tensor::Tensor in_tensor(kNumberTypeInt8, {2, 2, 3}); - lite::tensor::Tensor out_tensor0(kNumberTypeInt8, {2, 2, 2}); - lite::tensor::Tensor out_tensor1(kNumberTypeInt32, {2, 2, 2}); - int8_t input_data[] = {1, 2, 3, 6, 5, 4, 9, 8, 7, 10, 12, 11}; + lite::Tensor in_tensor(kNumberTypeInt8, {2, 2, 3}); + lite::Tensor out_tensor0(kNumberTypeInt8, {2, 2, 2}); + lite::Tensor out_tensor1(kNumberTypeInt32, {2, 2, 2}); + int8_t input_data[] = {1, 2, 3, 6, 5, 4, 9, 8, 7, 10, 12, 11}; int8_t output_data0[8] = {0}; int32_t output_data1[8] = {0}; in_tensor.SetData(input_data); out_tensor0.SetData(output_data0); out_tensor1.SetData(output_data1); - std::vector inputs = {&in_tensor}; - std::vector outputs = {&out_tensor0, &out_tensor1}; + std::vector inputs = {&in_tensor}; + std::vector outputs = {&out_tensor0, &out_tensor1}; TopkParameter parameter = {{}, 3, 4, 2, true}; kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_TopK}; @@ -52,8 +52,8 @@ TEST_F(TestTopKInt8, TopK) { auto ret = kernel->Run(); EXPECT_EQ(0, ret); - int8_t expect0[] = {3, 2, 6, 5, 9, 8, 12, 11}; - int32_t expect1[] = {2, 1, 0, 1, 0, 1, 1, 2}; + int8_t expect0[] = {3, 2, 6, 5, 9, 8, 12, 11}; + int32_t expect1[] = {2, 1, 0, 1, 0, 1, 1, 2}; for (int i = 0; i < 8; ++i) { EXPECT_EQ(output_data0[i], expect0[i]); EXPECT_EQ(output_data1[i], expect1[i]); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc index 8a6d37e946..33e7ed0dd8 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/nnacl/unsqueeze_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" -#include "mindspore/lite/src/ir/tensor.h" +#include "mindspore/lite/src/tensor.h" namespace mindspore { @@ -39,25 +39,25 @@ TEST_F(TestUnsqueezeInt8, Unsqueeze_1) { const int output_size = 8; int8_t output[8]; std::vector output_shape = {8, 1}; - lite::tensor::QuantArg input_quant_arg; + lite::QuantArg input_quant_arg; input_quant_arg.scale = 1.0; input_quant_arg.zeroPoint = 0; - lite::tensor::QuantArg output_quant_arg; + lite::QuantArg output_quant_arg; output_quant_arg.scale = 1.0; output_quant_arg.zeroPoint = 0; - lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + lite::Tensor *input_tensor1 = new lite::Tensor; TypeId tid_int8 = kNumberTypeInt8; input_tensor1->SetData(input1.data()); input_tensor1->set_shape(shape1); input_tensor1->AddQuantParam(input_quant_arg); input_tensor1->set_data_type(tid_int8); - std::vector inputs_tensor(1); + std::vector inputs_tensor(1); inputs_tensor[0] = input_tensor1; - std::vector outputs_tensor(1); - lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + std::vector outputs_tensor(1); + lite::Tensor *output0_tensor = new lite::Tensor; output0_tensor->SetData(output); output0_tensor->set_shape(output_shape); output0_tensor->AddQuantParam(output_quant_arg); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc index 19422bafe1..2f3dfb2c39 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc @@ -47,8 +47,8 @@ void LoadActivationData(void *dst, size_t dst_size, const std::string &file_path } template -void CompareRes(lite::tensor::Tensor *output_tensor, const std::string &standard_answer_file) { - auto *output_data = reinterpret_cast(output_tensor->Data()); +void CompareRes(lite::Tensor *output_tensor, const std::string &standard_answer_file) { + auto *output_data = reinterpret_cast(output_tensor->MutableData()); size_t output_size = output_tensor->Size(); auto expect_data = reinterpret_cast(mindspore::lite::ReadFile(standard_answer_file.c_str(), &output_size)); constexpr float atol = 0.001; @@ -66,9 +66,9 @@ void CompareRes(lite::tensor::Tensor *output_tensor, const std::string &standard } template -void printf_tensor(const std::string &str, mindspore::lite::tensor::Tensor *in_data) { +void printf_tensor(const std::string &str, mindspore::lite::Tensor *in_data) { MS_LOG(INFO) << str; - auto input_data = reinterpret_cast(in_data->Data()); + auto input_data = reinterpret_cast(in_data->MutableData()); for (int i = 0; i < in_data->ElementsNum(); ++i) { printf("%f ", input_data[i]); } @@ -90,22 +90,22 @@ TEST_F(TestActivationOpenCL, ReluFp_dim4) { std::vector input_shape = {1, 9}; schema::Format format = schema::Format_NC; schema::Format op_format = schema::Format_NC4; - auto tensor_type = schema::NodeType_ValueNode; - auto *input_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, format, tensor_type); + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); + auto *input_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type); if (input_tensor == nullptr) { MS_LOG(ERROR) << "new input tensor error!"; return; } - auto *output_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, format, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type); if (output_tensor == nullptr) { MS_LOG(ERROR) << "new output tensor error!"; delete input_tensor; return; } - std::vector inputs{input_tensor}; - std::vector outputs{output_tensor}; + std::vector inputs{input_tensor}; + std::vector outputs{output_tensor}; inputs[0]->MallocData(allocator); - LoadActivationData(inputs[0]->Data(), inputs[0]->Size(), in_file); + LoadActivationData(inputs[0]->MutableData(), inputs[0]->Size(), in_file); if (enable_fp16) { printf_tensor("ReluFp16:--input data---", inputs[0]); } else { @@ -202,24 +202,24 @@ TEST_F(TestActivationOpenCL, Relu6Fp_dim4) { std::vector input_shape = {1, 9}; schema::Format format = schema::Format_NC; schema::Format op_format = schema::Format_NC4; - auto tensor_type = schema::NodeType_ValueNode; - auto *input_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, format, tensor_type); + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); + auto *input_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type); if (input_tensor == nullptr) { MS_LOG(ERROR) << "new input tensor error!"; return; } - auto *output_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, format, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type); if (output_tensor == nullptr) { MS_LOG(ERROR) << "new output tensor error!"; delete input_tensor; return; } - std::vector inputs{input_tensor}; - std::vector outputs{output_tensor}; + std::vector inputs{input_tensor}; + std::vector outputs{output_tensor}; auto allocator = ocl_runtime->GetAllocator(); inputs[0]->MallocData(allocator); MS_LOG(INFO) << "Initialize input data"; - LoadActivationData(inputs[0]->Data(), inputs[0]->Size(), in_file); + LoadActivationData(inputs[0]->MutableData(), inputs[0]->Size(), in_file); if (enable_fp16) { printf_tensor("Relu6:FP16--input data--", inputs[0]); } else { @@ -317,24 +317,24 @@ TEST_F(TestActivationOpenCL, SigmoidFp_dim4) { std::vector input_shape = {1, 9}; schema::Format format = schema::Format_NC; schema::Format op_format = schema::Format_NC4; - auto tensor_type = schema::NodeType_ValueNode; - auto *input_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, format, tensor_type); + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); + auto *input_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type); if (input_tensor == nullptr) { MS_LOG(ERROR) << "new input tensor error!"; return; } - auto *output_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, format, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type); if (output_tensor == nullptr) { MS_LOG(ERROR) << "new output tensor error!"; delete input_tensor; return; } - std::vector inputs{input_tensor}; - std::vector outputs{output_tensor}; + std::vector inputs{input_tensor}; + std::vector outputs{output_tensor}; auto allocator = ocl_runtime->GetAllocator(); inputs[0]->MallocData(allocator); MS_LOG(INFO) << "Initialize input data"; - LoadActivationData(inputs[0]->Data(), inputs[0]->Size(), in_file); + LoadActivationData(inputs[0]->MutableData(), inputs[0]->Size(), in_file); if (enable_fp16) { printf_tensor("Sigmoid:FP16--input data--", inputs[0]); } else { @@ -430,26 +430,26 @@ TEST_F(TestActivationOpenCL, LeakyReluFp_dim4) { MS_LOG(INFO) << "Init tensors."; std::vector input_shape = {1, 9}; // need modify - auto tensor_type = schema::NodeType_ValueNode; - schema::Format format = schema::Format_NC; // need modify + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); + schema::Format format = schema::Format_NC; // need modify schema::Format op_format = schema::Format_NHWC4; // need modify - auto *input_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, format, tensor_type); + auto *input_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type); if (input_tensor == nullptr) { MS_LOG(ERROR) << "new input tensor error!"; return; } - auto *output_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, format, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type); if (output_tensor == nullptr) { MS_LOG(ERROR) << "new output tensor error!"; delete input_tensor; return; } - std::vector inputs{input_tensor}; - std::vector outputs{output_tensor}; + std::vector inputs{input_tensor}; + std::vector outputs{output_tensor}; auto allocator = ocl_runtime->GetAllocator(); inputs[0]->MallocData(allocator); MS_LOG(INFO) << "Initialize input data"; - LoadActivationData(inputs[0]->Data(), inputs[0]->Size(), in_file); + LoadActivationData(inputs[0]->MutableData(), inputs[0]->Size(), in_file); if (enable_fp16) { printf_tensor("Leaky Relu:FP16--input data--", inputs[0]); } else { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_self_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_self_tests.cc index 1289b7f85e..00f4fa8889 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_self_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_self_tests.cc @@ -55,15 +55,15 @@ TEST_F(TestArithmeticSelfOpenCLfp16, ArithmeticSelfOpenCLFp16) { std::vector shape = {1, 19, 19, 96}; auto data_type = kNumberTypeFloat16; - auto tensor_type = schema::NodeType_ValueNode; - auto *input_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, shape, schema::Format_NHWC, tensor_type); - auto *output_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, shape, schema::Format_NHWC, tensor_type); + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); + auto *input_tensor = new (std::nothrow) lite::Tensor(data_type, shape, schema::Format_NHWC, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(data_type, shape, schema::Format_NHWC, tensor_type); if (input_tensor == nullptr || output_tensor == nullptr) { MS_LOG(INFO) << " new input_tensor or output_tensor failed "; return; } - std::vector inputs{input_tensor}; - std::vector outputs{output_tensor}; + std::vector inputs{input_tensor}; + std::vector outputs{output_tensor}; MS_LOG(INFO) << " initialize param "; auto param = new (std::nothrow) ArithmeticSelfParameter(); @@ -114,11 +114,11 @@ TEST_F(TestArithmeticSelfOpenCLfp16, ArithmeticSelfOpenCLFp16) { } sub_graph->Init(); MS_LOG(INFO) << " initialize input data "; - memcpy(inputs[0]->Data(), input_data1, input1_size); + memcpy(inputs[0]->MutableData(), input_data1, input1_size); std::cout << "==================output data================" << std::endl; sub_graph->Run(); - auto *output_data_gpu = reinterpret_cast(output_tensor->Data()); + auto *output_data_gpu = reinterpret_cast(output_tensor->MutableData()); CompareOutputData1(input_data1, output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.000001); for (auto tensor : inputs) { delete tensor; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc index a752594142..acf64ed405 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc @@ -65,14 +65,14 @@ void TestCase(const std::vector &shape_a, const std::vector &shape_b) auto allocator = ocl_runtime->GetAllocator(); bool is_bias_add = shape_b.empty(); - auto tensorType = schema::NodeType_ValueNode; - - lite::tensor::Tensor *tensor_a = - new (std::nothrow) lite::tensor::Tensor(kNumberTypeFloat32, shape_a, schema::Format_NHWC4, tensorType); - lite::tensor::Tensor *tensor_b = - new (std::nothrow) lite::tensor::Tensor(kNumberTypeFloat32, shape_b, schema::Format_NHWC4, tensorType); - lite::tensor::Tensor *tensor_c = - new (std::nothrow) lite::tensor::Tensor(kNumberTypeFloat32, shape_a, schema::Format_NHWC4, tensorType); + auto tensorType = lite::TensorCategory(schema::NodeType_ValueNode); + + lite::Tensor *tensor_a = + new (std::nothrow) lite::Tensor(kNumberTypeFloat32, shape_a, schema::Format_NHWC4, tensorType); + lite::Tensor *tensor_b = + new (std::nothrow) lite::Tensor(kNumberTypeFloat32, shape_b, schema::Format_NHWC4, tensorType); + lite::Tensor *tensor_c = + new (std::nothrow) lite::Tensor(kNumberTypeFloat32, shape_a, schema::Format_NHWC4, tensorType); if (tensor_a == nullptr || tensor_b == nullptr || tensor_c == nullptr) { MS_LOG(ERROR) << "Create tensor failed!"; delete tensor_a; @@ -110,14 +110,14 @@ void TestCase(const std::vector &shape_a, const std::vector &shape_b) ElementAdd(data_a, data_b, data_c_cpu, element_num); } - std::vector inputs = {tensor_a}; + std::vector inputs = {tensor_a}; if (!is_bias_add) { inputs.push_back(tensor_b); } else { tensor_b->MallocData(); - memcpy(tensor_b->Data(), data_b, sizeof(float)); + memcpy(tensor_b->MutableData(), data_b, sizeof(float)); } - std::vector outputs = {tensor_c}; + std::vector outputs = {tensor_c}; ArithmeticParameter *param = new (std::nothrow) ArithmeticParameter(); if (param == nullptr) { @@ -134,7 +134,7 @@ void TestCase(const std::vector &shape_a, const std::vector &shape_b) param->ndim_ = 4; param->op_parameter_.type_ = PrimitiveType_Add; - std::vector arithmetic_inputs = {tensor_a, tensor_b}; + std::vector arithmetic_inputs = {tensor_a, tensor_b}; lite::Context ctx; auto *arith_kernel = new kernel::ArithmeticOpenCLKernel(reinterpret_cast(param), arithmetic_inputs, outputs, &ctx); @@ -170,19 +170,19 @@ void TestCase(const std::vector &shape_a, const std::vector &shape_b) } kernel->Init(); - memcpy(inputs[0]->Data(), data_a, sizeof(float) * element_num); + memcpy(inputs[0]->MutableData(), data_a, sizeof(float) * element_num); if (!is_bias_add) { - memcpy(inputs[1]->Data(), data_b, sizeof(float) * element_num_b); + memcpy(inputs[1]->MutableData(), data_b, sizeof(float) * element_num_b); } kernel->Run(); - memcpy(data_c_ocl, outputs[0]->Data(), sizeof(float) * element_num); + memcpy(data_c_ocl, outputs[0]->MutableData(), sizeof(float) * element_num); LogData(data_a, 10, "Data A : "); LogData(data_b, tensor_b->shape().empty() ? 1 : 10, "Data B : "); LogData(data_c_cpu, 10, "Expect compute : "); - LogData(outputs[0]->Data(), 10, "OpenCL compute : "); + LogData(outputs[0]->MutableData(), 10, "OpenCL compute : "); bool cmp = DataCompare(data_c_cpu, data_c_ocl, element_num); MS_LOG(INFO) << "Compare " << (cmp ? "success!" : "failed!"); EXPECT_EQ(true, cmp); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc index 31fa220ca3..5dc3a76a7c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc @@ -75,23 +75,23 @@ void RunTestCaseAvgPooling(const std::vector &shape, void *input_data, void } InitAvgPoolingParam(param); std::vector input_shape = {n, h, w, c}; - auto tensor_x_ptr = std::make_unique( - TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), input_shape, schema::Format_NHWC); + auto tensor_x_ptr = std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), + input_shape, schema::Format_NHWC); auto tensor_x = tensor_x_ptr.get(); if (tensor_x == nullptr) { MS_LOG(ERROR) << "tensor_x create error."; return; } std::vector out_shape = {n, oh, ow, c}; - auto tensor_out_ptr = std::make_unique( - TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), out_shape, schema::Format_NHWC); + auto tensor_out_ptr = std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), + out_shape, schema::Format_NHWC); auto tensor_out = tensor_out_ptr.get(); if (tensor_out == nullptr) { MS_LOG(ERROR) << "tensor_out create error."; return; } - std::vector inputs{tensor_x}; - std::vector outputs{tensor_out}; + std::vector inputs{tensor_x}; + std::vector outputs{tensor_out}; auto arith_kernel_ptr = std::make_unique(reinterpret_cast(param), inputs, outputs); auto arith_kernel = arith_kernel_ptr.get(); @@ -111,13 +111,14 @@ void RunTestCaseAvgPooling(const std::vector &shape, void *input_data, void return; } pGraph->Init(); - memcpy(inputs[0]->Data(), input_data, inputs[0]->ElementsNum() * dtype_size); + memcpy(inputs[0]->MutableData(), input_data, inputs[0]->ElementsNum() * dtype_size); pGraph->Run(); if (enable_fp16) { - CompareOutput(outputs[0]->Data(), output_data, outputs[0]->ElementsNum(), static_cast(1e-3), 2e-2); + CompareOutput(outputs[0]->MutableData(), output_data, outputs[0]->ElementsNum(), static_cast(1e-3), + 2e-2); } else { - CompareOutput(outputs[0]->Data(), output_data, outputs[0]->ElementsNum(), static_cast(1e-5)); + CompareOutput(outputs[0]->MutableData(), output_data, outputs[0]->ElementsNum(), static_cast(1e-5)); } inputs[0]->SetData(nullptr); outputs[0]->SetData(nullptr); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/batchnorm_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/batchnorm_tests.cc index e9c90cb776..cde15bc740 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/batchnorm_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/batchnorm_tests.cc @@ -43,7 +43,7 @@ TEST_F(TestBatchnormOpenCLfp16, Batchnormfp16input_dim4) { std::vector input_shape = {1, 256, 256, 48}; std::vector output_shape = {1, 256, 256, 48}; auto data_type = kNumberTypeFloat32; - auto tensor_type = schema::NodeType_ValueNode; + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); // get the input from .bin size_t input_size, output_size; @@ -62,23 +62,21 @@ TEST_F(TestBatchnormOpenCLfp16, Batchnormfp16input_dim4) { auto offset_data = reinterpret_cast(mindspore::lite::ReadFile(offset_path.c_str(), &offset_size)); MS_LOG(INFO) << " construct tensors "; - lite::tensor::Tensor *tensor_data = - new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type); - lite::tensor::Tensor *tensor_mean = - new (std::nothrow) lite::tensor::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); - lite::tensor::Tensor *tensor_var = - new (std::nothrow) lite::tensor::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); - lite::tensor::Tensor *tensor_scale = - new (std::nothrow) lite::tensor::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); - lite::tensor::Tensor *tensor_offset = - new (std::nothrow) lite::tensor::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); + lite::Tensor *tensor_data = new (std::nothrow) lite::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type); + lite::Tensor *tensor_mean = + new (std::nothrow) lite::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); + lite::Tensor *tensor_var = + new (std::nothrow) lite::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); + lite::Tensor *tensor_scale = + new (std::nothrow) lite::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); + lite::Tensor *tensor_offset = + new (std::nothrow) lite::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); if (tensor_data == nullptr || tensor_mean == nullptr || tensor_var == nullptr || tensor_scale == nullptr || tensor_offset == nullptr) { MS_LOG(INFO) << " init tensor failed "; return; } - auto *output_tensor = - new (std::nothrow) lite::tensor::Tensor(data_type, output_shape, schema::Format_NHWC4, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(data_type, output_shape, schema::Format_NHWC4, tensor_type); if (output_tensor == nullptr) { MS_LOG(INFO) << " init tensor failed "; delete tensor_data; @@ -88,8 +86,8 @@ TEST_F(TestBatchnormOpenCLfp16, Batchnormfp16input_dim4) { delete tensor_offset; return; } - std::vector inputs = {tensor_data, tensor_scale, tensor_offset, tensor_mean, tensor_var}; - std::vector outputs{output_tensor}; + std::vector inputs = {tensor_data, tensor_scale, tensor_offset, tensor_mean, tensor_var}; + std::vector outputs{output_tensor}; MS_LOG(INFO) << " initialize tensors "; auto param = new (std::nothrow) BatchNormParameter(); @@ -132,15 +130,15 @@ TEST_F(TestBatchnormOpenCLfp16, Batchnormfp16input_dim4) { } sub_graph->Init(); MS_LOG(INFO) << " init tensors "; - memcpy(inputs[0]->Data(), input_data, input_size); - memcpy(inputs[1]->Data(), scale_data, scale_size); - memcpy(inputs[2]->Data(), offset_data, offset_size); - memcpy(inputs[3]->Data(), mean_data, mean_size); - memcpy(inputs[4]->Data(), var_data, var_size); + memcpy(inputs[0]->MutableData(), input_data, input_size); + memcpy(inputs[1]->MutableData(), scale_data, scale_size); + memcpy(inputs[2]->MutableData(), offset_data, offset_size); + memcpy(inputs[3]->MutableData(), mean_data, mean_size); + memcpy(inputs[4]->MutableData(), var_data, var_size); std::cout << "==================output data================" << std::endl; sub_graph->Run(); - auto *output_data_gpu = reinterpret_cast(output_tensor->Data()); + auto *output_data_gpu = reinterpret_cast(output_tensor->MutableData()); CompareOutputData(output_data_gpu, correct_data, output_tensor->ElementsNum(), 0.01); for (auto tensor : inputs) { delete tensor; @@ -162,7 +160,7 @@ TEST_F(TestBatchnormOpenCLfp32, Batchnormfp32input_dim4) { std::vector input_shape = {1, 256, 256, 47}; std::vector output_shape = {1, 256, 256, 47}; auto data_type = kNumberTypeFloat32; - auto tensor_type = schema::NodeType_ValueNode; + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); // get the input from .bin size_t input_size, output_size; @@ -181,23 +179,21 @@ TEST_F(TestBatchnormOpenCLfp32, Batchnormfp32input_dim4) { auto offset_data = reinterpret_cast(mindspore::lite::ReadFile(offset_path.c_str(), &offset_size)); MS_LOG(INFO) << " construct tensors "; - lite::tensor::Tensor *tensor_data = - new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type); - lite::tensor::Tensor *tensor_mean = - new (std::nothrow) lite::tensor::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); - lite::tensor::Tensor *tensor_var = - new (std::nothrow) lite::tensor::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); - lite::tensor::Tensor *tensor_scale = - new (std::nothrow) lite::tensor::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); - lite::tensor::Tensor *tensor_offset = - new (std::nothrow) lite::tensor::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); + lite::Tensor *tensor_data = new (std::nothrow) lite::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type); + lite::Tensor *tensor_mean = + new (std::nothrow) lite::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); + lite::Tensor *tensor_var = + new (std::nothrow) lite::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); + lite::Tensor *tensor_scale = + new (std::nothrow) lite::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); + lite::Tensor *tensor_offset = + new (std::nothrow) lite::Tensor(data_type, {1, 1, 1, input_shape[3]}, schema::Format_NHWC, tensor_type); if (tensor_data == nullptr || tensor_mean == nullptr || tensor_var == nullptr || tensor_scale == nullptr || tensor_offset == nullptr) { MS_LOG(INFO) << " init tensor failed "; return; } - auto *output_tensor = - new (std::nothrow) lite::tensor::Tensor(data_type, output_shape, schema::Format_NHWC, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(data_type, output_shape, schema::Format_NHWC, tensor_type); if (output_tensor == nullptr) { MS_LOG(INFO) << " init tensor failed "; delete tensor_data; @@ -207,8 +203,8 @@ TEST_F(TestBatchnormOpenCLfp32, Batchnormfp32input_dim4) { delete tensor_offset; return; } - std::vector inputs = {tensor_data, tensor_scale, tensor_offset, tensor_mean, tensor_var}; - std::vector outputs{output_tensor}; + std::vector inputs = {tensor_data, tensor_scale, tensor_offset, tensor_mean, tensor_var}; + std::vector outputs{output_tensor}; MS_LOG(INFO) << " initialize tensors "; auto param = new (std::nothrow) BatchNormParameter(); @@ -251,15 +247,15 @@ TEST_F(TestBatchnormOpenCLfp32, Batchnormfp32input_dim4) { } sub_graph->Init(); MS_LOG(INFO) << " init tensors "; - memcpy(inputs[0]->Data(), input_data, input_size); - memcpy(inputs[1]->Data(), scale_data, scale_size); - memcpy(inputs[2]->Data(), offset_data, offset_size); - memcpy(inputs[3]->Data(), mean_data, mean_size); - memcpy(inputs[4]->Data(), var_data, var_size); + memcpy(inputs[0]->MutableData(), input_data, input_size); + memcpy(inputs[1]->MutableData(), scale_data, scale_size); + memcpy(inputs[2]->MutableData(), offset_data, offset_size); + memcpy(inputs[3]->MutableData(), mean_data, mean_size); + memcpy(inputs[4]->MutableData(), var_data, var_size); std::cout << "==================output data================" << std::endl; sub_graph->Run(); - auto *output_data_gpu = reinterpret_cast(output_tensor->Data()); + auto *output_data_gpu = reinterpret_cast(output_tensor->MutableData()); CompareOutputData(output_data_gpu, correct_data, output_tensor->ElementsNum(), 0.0001); for (auto tensor : inputs) { delete tensor; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/biasadd_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/biasadd_tests.cc index 67975ed934..ee281758b6 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/biasadd_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/biasadd_tests.cc @@ -41,9 +41,9 @@ void LoadDataBiasAdd(void *dst, size_t dst_size, const std::string &file_path) { } template -void CompareOutBiasAdd(lite::tensor::Tensor *output_tensor, const std::string &standard_answer_file) { +void CompareOutBiasAdd(lite::Tensor *output_tensor, const std::string &standard_answer_file) { size_t output_size = output_tensor->ElementsNum(); - auto output_data = reinterpret_cast(output_tensor->Data()); + auto output_data = reinterpret_cast(output_tensor->MutableData()); auto expect_data = reinterpret_cast(mindspore::lite::ReadFile(standard_answer_file.c_str(), &output_size)); constexpr float atol = 0.0002; for (int i = 0; i < output_tensor->ElementsNum(); ++i) { @@ -60,9 +60,9 @@ void CompareOutBiasAdd(lite::tensor::Tensor *output_tensor, const std::string &s } template -void printf_tensor_BiasAdd(const std::string log, mindspore::lite::tensor::Tensor *in_data, int size) { +void printf_tensor_BiasAdd(const std::string log, mindspore::lite::Tensor *in_data, int size) { MS_LOG(INFO) << log; - auto input_data = reinterpret_cast(in_data->Data()); + auto input_data = reinterpret_cast(in_data->MutableData()); for (int i = 0; i < size; ++i) { printf("%f ", input_data[i]); } @@ -81,7 +81,7 @@ TEST_F(TestBiasAddOpenCL, BiasAddFp32_dim4) { ocl_runtime->SetFp16Enable(data_type == kNumberTypeFloat16); std::vector input_shape = {1, 9}; // need modify std::vector output_shape = {1, 9}; // need modify - auto tensor_type = schema::NodeType_ValueNode; + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); schema::Format type = schema::Format_NC; // need modify schema::Format op_format = schema::Format_NC4; // need modify int weight_shape = 0; @@ -90,32 +90,32 @@ TEST_F(TestBiasAddOpenCL, BiasAddFp32_dim4) { } else { weight_shape = input_shape[1]; } - auto *input_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, type, tensor_type); + auto *input_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, type, tensor_type); if (input_tensor == nullptr) { MS_LOG(ERROR) << "new input tensor error!"; return; } - auto *output_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, output_shape, type, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(data_type, output_shape, type, tensor_type); if (output_tensor == nullptr) { MS_LOG(ERROR) << "new output tensor error!"; delete input_tensor; return; } - auto *weight_tensor = new (std::nothrow) - lite::tensor::Tensor(data_type, std::vector{weight_shape}, schema::Format_NHWC, tensor_type); + auto *weight_tensor = + new (std::nothrow) lite::Tensor(data_type, std::vector{weight_shape}, schema::Format_NHWC, tensor_type); if (weight_tensor == nullptr) { MS_LOG(ERROR) << "new weight tensor error!"; delete output_tensor; delete input_tensor; return; } - std::vector inputs{input_tensor, weight_tensor}; - std::vector outputs{output_tensor}; + std::vector inputs{input_tensor, weight_tensor}; + std::vector outputs{output_tensor}; auto allocator = ocl_runtime->GetAllocator(); inputs[0]->MallocData(allocator); inputs[1]->MallocData(allocator); - LoadDataBiasAdd(input_tensor->Data(), input_tensor->Size(), in_file); - LoadDataBiasAdd(weight_tensor->Data(), weight_tensor->Size(), weight_file); + LoadDataBiasAdd(input_tensor->MutableData(), input_tensor->Size(), in_file); + LoadDataBiasAdd(weight_tensor->MutableData(), weight_tensor->Size(), weight_file); if (ocl_runtime->GetFp16Enable()) { printf_tensor_BiasAdd("BiasAdd:FP16--input data", inputs[0], input_tensor->ElementsNum()); printf_tensor_BiasAdd("BiasAdd:FP16--weight data", inputs[1], weight_tensor->ElementsNum()); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/concat_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/concat_tests.cc index 5c8830e26f..c8cefb6338 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/concat_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/concat_tests.cc @@ -64,18 +64,17 @@ TEST_F(TestConcatOpenCLfp16, ConcatFp16_2input_dim4_axis3) { std::vector{1, 19, 19, 96}}; std::vector output_shape = {2, 19, 19, 96}; auto data_type = kNumberTypeFloat16; - auto tensor_type = schema::NodeType_ValueNode; - std::vector inputs; + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); + std::vector inputs; for (auto &shape : input_shapes) { - auto input_temp = new (std::nothrow) lite::tensor::Tensor(data_type, shape, schema::Format_NHWC4, tensor_type); + auto input_temp = new (std::nothrow) lite::Tensor(data_type, shape, schema::Format_NHWC4, tensor_type); inputs.push_back(input_temp); if (input_temp == nullptr) { MS_LOG(INFO) << " new input_tensor failed "; return; } } - auto *output_tensor = - new (std::nothrow) lite::tensor::Tensor(data_type, output_shape, schema::Format_NHWC4, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(data_type, output_shape, schema::Format_NHWC4, tensor_type); if (output_tensor == nullptr) { MS_LOG(INFO) << " new output_tensor failed "; for (auto tensor : inputs) { @@ -83,7 +82,7 @@ TEST_F(TestConcatOpenCLfp16, ConcatFp16_2input_dim4_axis3) { } return; } - std::vector outputs{output_tensor}; + std::vector outputs{output_tensor}; MS_LOG(INFO) << " input_shapes size =: " << input_shapes.size(); MS_LOG(INFO) << " initialize tensors "; @@ -135,19 +134,19 @@ TEST_F(TestConcatOpenCLfp16, ConcatFp16_2input_dim4_axis3) { sub_graph->Init(); MS_LOG(INFO) << " initialize input data "; if (inputs.size() == 2) { - memcpy(inputs[0]->Data(), input_data1, input1_size); - memcpy(inputs[1]->Data(), input_data2, input2_size); + memcpy(inputs[0]->MutableData(), input_data1, input1_size); + memcpy(inputs[1]->MutableData(), input_data2, input2_size); } else if (inputs.size() == 3) { - memcpy(inputs[0]->Data(), input_data1, input1_size); - memcpy(inputs[1]->Data(), input_data2, input2_size); - memcpy(inputs[2]->Data(), input_data3, input3_size); + memcpy(inputs[0]->MutableData(), input_data1, input1_size); + memcpy(inputs[1]->MutableData(), input_data2, input2_size); + memcpy(inputs[2]->MutableData(), input_data3, input3_size); } else { MS_LOG(ERROR) << " input size must be 2 or 3"; } std::cout << "==================output data================" << std::endl; sub_graph->Run(); - auto *output_data_gpu = reinterpret_cast(output_tensor->Data()); + auto *output_data_gpu = reinterpret_cast(output_tensor->MutableData()); CompareOutputData1(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.000001); for (auto tensor : inputs) { delete tensor; @@ -183,18 +182,17 @@ TEST_F(TestConcatOpenCLfp32, ConcatFp32_2input_dim4_axis3) { std::vector{1, 16, 256, 80}, std::vector{1, 16, 256, 80}, std::vector{1, 16, 256, 80}}; std::vector output_shape = {1, 48, 256, 80}; auto data_type = kNumberTypeFloat32; - auto tensor_type = schema::NodeType_ValueNode; - std::vector inputs; + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); + std::vector inputs; for (auto &shape : input_shapes) { - auto input_temp = new (std::nothrow) lite::tensor::Tensor(data_type, shape, schema::Format_NHWC, tensor_type); + auto input_temp = new (std::nothrow) lite::Tensor(data_type, shape, schema::Format_NHWC, tensor_type); inputs.push_back(input_temp); if (input_temp == nullptr) { MS_LOG(INFO) << " new input_tensor failed "; return; } } - auto *output_tensor = - new (std::nothrow) lite::tensor::Tensor(data_type, output_shape, schema::Format_NHWC, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(data_type, output_shape, schema::Format_NHWC, tensor_type); if (output_tensor == nullptr) { MS_LOG(INFO) << " new output_tensor failed "; for (auto tensor : inputs) { @@ -202,7 +200,7 @@ TEST_F(TestConcatOpenCLfp32, ConcatFp32_2input_dim4_axis3) { } return; } - std::vector outputs{output_tensor}; + std::vector outputs{output_tensor}; MS_LOG(INFO) << " input_shapes size=: " << input_shapes.size(); MS_LOG(INFO) << " initialize tensors "; @@ -255,19 +253,19 @@ TEST_F(TestConcatOpenCLfp32, ConcatFp32_2input_dim4_axis3) { sub_graph->Init(); MS_LOG(INFO) << " initialize input data "; if (inputs.size() == 2) { - memcpy(inputs[0]->Data(), input_data1, input1_size); - memcpy(inputs[1]->Data(), input_data2, input2_size); + memcpy(inputs[0]->MutableData(), input_data1, input1_size); + memcpy(inputs[1]->MutableData(), input_data2, input2_size); } else if (inputs.size() == 3) { - memcpy(inputs[0]->Data(), input_data1, input1_size); - memcpy(inputs[1]->Data(), input_data2, input2_size); - memcpy(inputs[2]->Data(), input_data3, input3_size); + memcpy(inputs[0]->MutableData(), input_data1, input1_size); + memcpy(inputs[1]->MutableData(), input_data2, input2_size); + memcpy(inputs[2]->MutableData(), input_data3, input3_size); } else { MS_LOG(ERROR) << " input size must be 2 or 3 "; } std::cout << "==================output data================" << std::endl; sub_graph->Run(); - auto *output_data_gpu = reinterpret_cast(output_tensor->Data()); + auto *output_data_gpu = reinterpret_cast(output_tensor->MutableData()); CompareOutputData1(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.00001); for (auto tensor : inputs) { delete tensor; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_transpose_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_transpose_tests.cc index cf37a850e4..7983820064 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_transpose_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_transpose_tests.cc @@ -52,7 +52,7 @@ void RunTestCaseConv2dTranspose(const std::vector &shape, void *input_data, int ow = 2 * w - 1 + 2 * (kw - 1 - pad) - kw + 1; std::vector input_shape = {n, h, w, ci}; auto tensor_x_ptr = - std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), input_shape); + std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), input_shape); auto tensor_x = tensor_x_ptr.get(); if (tensor_x == nullptr) { MS_LOG(ERROR) << "tensor_x create error."; @@ -61,7 +61,7 @@ void RunTestCaseConv2dTranspose(const std::vector &shape, void *input_data, std::vector weight_shape = {co, kh, kw, ci}; auto tensor_w_ptr = - std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), weight_shape); + std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), weight_shape); auto tensor_w = tensor_w_ptr.get(); if (tensor_w == nullptr) { MS_LOG(ERROR) << "tensor_w create error."; @@ -71,7 +71,7 @@ void RunTestCaseConv2dTranspose(const std::vector &shape, void *input_data, std::vector bias_shape = {co}; auto tensor_bias_ptr = - std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), bias_shape); + std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), bias_shape); auto tensor_bias = tensor_bias_ptr.get(); if (tensor_bias == nullptr) { MS_LOG(ERROR) << "tensor_bias create error."; @@ -81,14 +81,14 @@ void RunTestCaseConv2dTranspose(const std::vector &shape, void *input_data, std::vector out_shape = {1, oh, ow, co}; auto tensor_out_ptr = - std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), out_shape); + std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), out_shape); auto tensor_out = tensor_out_ptr.get(); if (tensor_out == nullptr) { MS_LOG(ERROR) << "tensor_out create error."; return; } - std::vector inputs{tensor_x, tensor_w, tensor_bias}; - std::vector outputs{tensor_out}; + std::vector inputs{tensor_x, tensor_w, tensor_bias}; + std::vector outputs{tensor_out}; auto opParameter_ptr = std::make_unique(); auto opParameter = opParameter_ptr.get(); if (opParameter == nullptr) { @@ -115,7 +115,7 @@ void RunTestCaseConv2dTranspose(const std::vector &shape, void *input_data, inputs[0]->MallocData(allocator); std::vector kernels{op_kernel}; - std::vector inputs_g{tensor_x}; + std::vector inputs_g{tensor_x}; auto pGraph_ptr = std::make_unique(inputs_g, outputs, kernels, kernels, kernels); auto pGraph = pGraph_ptr.get(); if (pGraph == nullptr) { @@ -124,12 +124,12 @@ void RunTestCaseConv2dTranspose(const std::vector &shape, void *input_data, } pGraph->Init(); - memcpy(inputs[0]->Data(), input_data, n * h * w * ci * dtype_size); + memcpy(inputs[0]->MutableData(), input_data, n * h * w * ci * dtype_size); pGraph->Run(); if (enable_fp16) { - CompareOutput(outputs[0]->Data(), output_data, n * oh * ow * co, static_cast(1e-3), 2e-2); + CompareOutput(outputs[0]->MutableData(), output_data, n * oh * ow * co, static_cast(1e-3), 2e-2); } else { - CompareOutput(outputs[0]->Data(), output_data, n * oh * ow * co, static_cast(1e-5)); + CompareOutput(outputs[0]->MutableData(), output_data, n * oh * ow * co, static_cast(1e-5)); } inputs[0]->SetData(nullptr); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/convolution_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/convolution_tests.cc index 8e72a0c935..d15798dd16 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/convolution_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/convolution_tests.cc @@ -26,7 +26,7 @@ using mindspore::kernel::ConvolutionOpenCLKernel; using mindspore::kernel::LiteKernel; using mindspore::kernel::SubGraphOpenCLKernel; -using mindspore::lite::tensor::Tensor; +using mindspore::lite::Tensor; using mindspore::schema::Format; using mindspore::schema::Format_KHWC; using mindspore::schema::Format_NC4HW4; @@ -42,12 +42,12 @@ class TestConvolutionOpenCL : public mindspore::CommonTest {}; void LoadData(Tensor *tensor, const float *src) { if (tensor->data_type() == kNumberTypeFloat16) { auto num = tensor->Size() / 2; - auto tensor_data = reinterpret_cast(tensor->Data()); + auto tensor_data = reinterpret_cast(tensor->MutableData()); for (int i = 0; i < num; ++i) { tensor_data[i] = Float32ToShort(src[i]); } } else { - memcpy(tensor->Data(), src, tensor->Size()); + memcpy(tensor->MutableData(), src, tensor->Size()); } } @@ -55,12 +55,12 @@ void CompareOutput(Tensor *output, const float *expect_data, const float atol) { auto num = (output->data_type() == kNumberTypeFloat16) ? output->Size() / 2 : output->Size() / 4; std::vector output_data(num); if (output->data_type() == kNumberTypeFloat16) { - auto output_data_fp16 = reinterpret_cast(output->Data()); + auto output_data_fp16 = reinterpret_cast(output->MutableData()); for (int i = 0; i < output_data.size(); ++i) { output_data[i] = ShortToFloat32((output_data_fp16[i])); } } else { - memcpy(output_data.data(), output->Data(), output->Size()); + memcpy(output_data.data(), output->MutableData(), output->Size()); } printf("output:"); @@ -163,10 +163,10 @@ void TEST_MAIN(const std::string &attr, Format input_format, Format output_forma std::vector weight_shape = {param->output_channel_, param->kernel_h_, param->kernel_w_, param->input_channel_}; std::vector bias_shape = {param->output_channel_}; std::vector output_shape = {param->output_batch_, param->output_h_, param->output_w_, param->output_channel_}; - auto input = Tensor(data_type, input_shape, input_format, NodeType_ValueNode); - auto weight = Tensor(data_type, weight_shape, Format_KHWC, NodeType_ValueNode); - auto bias = Tensor(data_type, bias_shape, Format_KHWC, NodeType_ValueNode); - auto output = Tensor(data_type, output_shape, output_format, NodeType_ValueNode); + auto input = Tensor(data_type, input_shape, input_format, lite::TensorCategory(NodeType_ValueNode)); + auto weight = Tensor(data_type, weight_shape, Format_KHWC, lite::TensorCategory(NodeType_ValueNode)); + auto bias = Tensor(data_type, bias_shape, Format_KHWC, lite::TensorCategory(NodeType_ValueNode)); + auto output = Tensor(data_type, output_shape, output_format, lite::TensorCategory(NodeType_ValueNode)); MS_LOG(DEBUG) << "allocate memory and initialize weight/bias"; weight.MallocData(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc index 41470d90ba..9a09c6fa56 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc @@ -81,12 +81,12 @@ void DepthWiseTestMain(ConvParameter *conv_param, T2 *input_data, T1 *weight_dat delete[] packed_input; return; } - auto tensor_a = lite::tensor::Tensor(TypeId(dtype), shape_in, format); - auto tensor_b = lite::tensor::Tensor(TypeId(dtype), shape_filter, schema::Format_NHWC); - auto tensor_c = lite::tensor::Tensor(TypeId(dtype), shape_bias, schema::Format_NHWC); - auto tensor_d = lite::tensor::Tensor(TypeId(dtype), shape_out, format); - std::vector inputs{&tensor_a, &tensor_b, &tensor_c}; - std::vector outputs{&tensor_d}; + auto tensor_a = lite::Tensor(TypeId(dtype), shape_in, format); + auto tensor_b = lite::Tensor(TypeId(dtype), shape_filter, schema::Format_NHWC); + auto tensor_c = lite::Tensor(TypeId(dtype), shape_bias, schema::Format_NHWC); + auto tensor_d = lite::Tensor(TypeId(dtype), shape_out, format); + std::vector inputs{&tensor_a, &tensor_b, &tensor_c}; + std::vector outputs{&tensor_d}; // freamework to do!!! inputs[1]->SetData(packed_weight); @@ -102,7 +102,7 @@ void DepthWiseTestMain(ConvParameter *conv_param, T2 *input_data, T1 *weight_dat pKernel->Init(); std::vector kernels{pKernel.get()}; - std::vector inputs_{&tensor_a}; + std::vector inputs_{&tensor_a}; auto pGraph = std::make_unique(inputs_, outputs, kernels, kernels, kernels); if (pGraph.get() == nullptr) { delete[] packed_input; @@ -112,11 +112,11 @@ void DepthWiseTestMain(ConvParameter *conv_param, T2 *input_data, T1 *weight_dat // freamework to do!!! inputs[0]->MallocData(allocator); - memcpy(inputs[0]->Data(), packed_input, sizeof(T2) * pack_input_size); + memcpy(inputs[0]->MutableData(), packed_input, sizeof(T2) * pack_input_size); pGraph->Run(); if (is_compare) { - T2 *packed_output = reinterpret_cast(outputs[0]->Data()); + T2 *packed_output = reinterpret_cast(outputs[0]->MutableData()); auto packed_correct_data = std::make_unique(packed_output_size); if (packed_correct_data.get() == nullptr) { delete[] packed_input; @@ -552,7 +552,7 @@ TEST_F(TestConvolutionDwOpenCL, ProfilingMobilenetv2Fp32) { const size_t wt_size = 576 * 3 * 3; float *weight_data = new (std::nothrow) float[wt_size]; if (weight_data == nullptr) { - delete [] input_data; + delete[] input_data; return; } memset(weight_data, 0, wt_size); @@ -588,8 +588,8 @@ TEST_F(TestConvolutionDwOpenCL, ProfilingMobilenetv2Fp32) { kNumberTypeFloat32, false); } } - delete [] input_data; - delete [] weight_data; + delete[] input_data; + delete[] weight_data; lite::opencl::OpenCLRuntime::DeleteInstance(); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/matmul_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/matmul_tests.cc index 5a6ea2ef89..8a09409592 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/matmul_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/matmul_tests.cc @@ -42,8 +42,8 @@ void RunTestCaseMatMul(const std::vector &shape, void *input_data, void *we int ci = shape[0]; int co = shape[1]; std::vector input_shape = {1, ci}; - auto tensor_x_ptr = std::make_unique( - TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), input_shape, schema::Format_NC); + auto tensor_x_ptr = std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), + input_shape, schema::Format_NC); auto tensor_x = tensor_x_ptr.get(); if (tensor_x == nullptr) { MS_LOG(ERROR) << "tensor_x create error."; @@ -52,7 +52,7 @@ void RunTestCaseMatMul(const std::vector &shape, void *input_data, void *we std::vector w_shape = {co, ci}; auto tensor_w_ptr = - std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), w_shape); + std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), w_shape); auto tensor_w = tensor_w_ptr.get(); if (tensor_w == nullptr) { MS_LOG(ERROR) << "tensor_w create error."; @@ -61,15 +61,15 @@ void RunTestCaseMatMul(const std::vector &shape, void *input_data, void *we tensor_w->SetData(weight_data); std::vector out_shape = {1, co}; - auto tensor_out_ptr = std::make_unique( - TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), out_shape, schema::Format_NC); + auto tensor_out_ptr = std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), + out_shape, schema::Format_NC); auto tensor_out = tensor_out_ptr.get(); if (tensor_out == nullptr) { MS_LOG(ERROR) << "tensor_out create error."; return; } - std::vector inputs{tensor_x, tensor_w}; - std::vector outputs{tensor_out}; + std::vector inputs{tensor_x, tensor_w}; + std::vector outputs{tensor_out}; auto op_kernel_ptr = std::make_unique(nullptr, inputs, outputs, false); auto op_kernel = op_kernel_ptr.get(); if (op_kernel == nullptr) { @@ -81,7 +81,7 @@ void RunTestCaseMatMul(const std::vector &shape, void *input_data, void *we std::vector kernels{op_kernel}; - std::vector inputs_g{tensor_x}; + std::vector inputs_g{tensor_x}; auto pGraph_ptr = std::make_unique(inputs_g, outputs, kernels, kernels, kernels); auto pGraph = pGraph_ptr.get(); if (pGraph == nullptr) { @@ -89,12 +89,12 @@ void RunTestCaseMatMul(const std::vector &shape, void *input_data, void *we return; } pGraph->Init(); - memcpy(inputs[0]->Data(), input_data, ci * dtype_size); + memcpy(inputs[0]->MutableData(), input_data, ci * dtype_size); pGraph->Run(); if (enable_fp16) { - CompareOutput(outputs[0]->Data(), output_data, co, static_cast(1e-3), 2e-2); + CompareOutput(outputs[0]->MutableData(), output_data, co, static_cast(1e-3), 2e-2); } else { - CompareOutput(outputs[0]->Data(), output_data, co, static_cast(1e-5)); + CompareOutput(outputs[0]->MutableData(), output_data, co, static_cast(1e-5)); } tensor_x->SetData(nullptr); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/max_pooling_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/max_pooling_tests.cc index fe453a0ca1..1994d9b620 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/max_pooling_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/max_pooling_tests.cc @@ -75,23 +75,23 @@ void RunTestCaseMaxPooling(const std::vector &shape, void *input_data, void } InitMaxPoolingParam(param); std::vector input_shape = {n, h, w, c}; - auto tensor_x_ptr = std::make_unique( - TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), input_shape, schema::Format_NHWC); + auto tensor_x_ptr = std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), + input_shape, schema::Format_NHWC); auto tensor_x = tensor_x_ptr.get(); if (tensor_x == nullptr) { MS_LOG(ERROR) << "tensor_x create error."; return; } std::vector out_shape = {n, oh, ow, c}; - auto tensor_out_ptr = std::make_unique( - TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), out_shape, schema::Format_NHWC); + auto tensor_out_ptr = std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), + out_shape, schema::Format_NHWC); auto tensor_out = tensor_out_ptr.get(); if (tensor_out == nullptr) { MS_LOG(ERROR) << "tensor_out create error."; return; } - std::vector inputs{tensor_x}; - std::vector outputs{tensor_out}; + std::vector inputs{tensor_x}; + std::vector outputs{tensor_out}; auto arith_kernel_ptr = std::make_unique(reinterpret_cast(param), inputs, outputs); auto arith_kernel = arith_kernel_ptr.get(); @@ -111,13 +111,14 @@ void RunTestCaseMaxPooling(const std::vector &shape, void *input_data, void return; } pGraph->Init(); - memcpy(inputs[0]->Data(), input_data, inputs[0]->ElementsNum() * dtype_size); + memcpy(inputs[0]->MutableData(), input_data, inputs[0]->ElementsNum() * dtype_size); pGraph->Run(); if (enable_fp16) { - CompareOutput(outputs[0]->Data(), output_data, outputs[0]->ElementsNum(), static_cast(1e-3), 2e-2); + CompareOutput(outputs[0]->MutableData(), output_data, outputs[0]->ElementsNum(), static_cast(1e-3), + 2e-2); } else { - CompareOutput(outputs[0]->Data(), output_data, outputs[0]->ElementsNum(), static_cast(1e-5)); + CompareOutput(outputs[0]->MutableData(), output_data, outputs[0]->ElementsNum(), static_cast(1e-5)); } inputs[0]->SetData(nullptr); outputs[0]->SetData(nullptr); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/prelu_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/prelu_tests.cc index 34bfd30689..0d9263393b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/prelu_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/prelu_tests.cc @@ -43,8 +43,8 @@ void LoadDataPRelu(void *dst, size_t dst_size, const std::string &file_path) { } template -void CompareOutPRelu(lite::tensor::Tensor *output_tensor, const std::string &standard_answer_file) { - auto *output_data = reinterpret_cast(output_tensor->Data()); +void CompareOutPRelu(lite::Tensor *output_tensor, const std::string &standard_answer_file) { + auto *output_data = reinterpret_cast(output_tensor->MutableData()); size_t output_size = output_tensor->Size(); auto expect_data = reinterpret_cast(mindspore::lite::ReadFile(standard_answer_file.c_str(), &output_size)); constexpr float atol = 0.0002; @@ -62,9 +62,9 @@ void CompareOutPRelu(lite::tensor::Tensor *output_tensor, const std::string &sta } template -void printf_tensor_Prelu(const std::string &log, mindspore::lite::tensor::Tensor *in_data, int size) { +void printf_tensor_Prelu(const std::string &log, mindspore::lite::Tensor *in_data, int size) { MS_LOG(INFO) << log; - auto input_data = reinterpret_cast(in_data->Data()); + auto input_data = reinterpret_cast(in_data->MutableData()); for (int i = 0; i < size; ++i) { printf("%f ", input_data[i]); } @@ -87,34 +87,34 @@ TEST_F(TestPReluOpenCL, PReluFp32_dim4) { ocl_runtime->SetFp16Enable(data_type == kNumberTypeFloat16); schema::Format format = schema::Format_NHWC; schema::Format op_format = schema::Format_NC4HW4; - auto tensor_type = schema::NodeType_ValueNode; - auto input_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, format, tensor_type); + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); + auto input_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type); if (input_tensor == nullptr) { MS_LOG(ERROR) << "new input_tensor error!"; return; } - auto output_tensor = new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, format, tensor_type); + auto output_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type); if (output_tensor == nullptr) { MS_LOG(ERROR) << "new output_tensor error"; delete input_tensor; return; } - auto weight_tensor = new (std::nothrow) - lite::tensor::Tensor(data_type, std::vector{input_shape[3]}, schema::Format_NHWC, tensor_type); + auto weight_tensor = + new (std::nothrow) lite::Tensor(data_type, std::vector{input_shape[3]}, schema::Format_NHWC, tensor_type); if (weight_tensor == nullptr) { MS_LOG(ERROR) << "new weight_tensor error"; delete input_tensor; delete output_tensor; return; } - std::vector inputs{input_tensor, weight_tensor}; - std::vector outputs{output_tensor}; + std::vector inputs{input_tensor, weight_tensor}; + std::vector outputs{output_tensor}; inputs[0]->MallocData(allocator); inputs[1]->MallocData(allocator); MS_LOG(INFO) << "initialize input data"; - LoadDataPRelu(input_tensor->Data(), input_tensor->Size(), in_file); - LoadDataPRelu(weight_tensor->Data(), weight_tensor->Size(), weight_file); + LoadDataPRelu(input_tensor->MutableData(), input_tensor->Size(), in_file); + LoadDataPRelu(weight_tensor->MutableData(), weight_tensor->Size(), weight_file); if (ocl_runtime->GetFp16Enable()) { printf_tensor_Prelu("PRELU:FP16--input data", input_tensor, inputs[0]->ElementsNum()); printf_tensor_Prelu("PRELU:FP16--weight data", weight_tensor, weight_tensor->ElementsNum()); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/reshape_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/reshape_tests.cc index 72efebd1a3..0fd237c6c2 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/reshape_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/reshape_tests.cc @@ -46,8 +46,8 @@ void RunTestCaseReshape(const std::vector &shape, void *input_data, void *o int oh = shape[4]; int ow = shape[5]; std::vector input_shape = {n, h, w, c}; - auto tensor_x_ptr = std::make_unique( - TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), input_shape, schema::Format_NHWC); + auto tensor_x_ptr = std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), + input_shape, schema::Format_NHWC); auto tensor_x = tensor_x_ptr.get(); if (tensor_x == nullptr) { MS_LOG(ERROR) << "tensor_x create error."; @@ -58,15 +58,15 @@ void RunTestCaseReshape(const std::vector &shape, void *input_data, void *o std::vector out_shape = {n, c}; } auto tensor_out_ptr = - std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), out_shape, - is_output_2d ? schema::Format_NC : schema::Format_NHWC); + std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), out_shape, + is_output_2d ? schema::Format_NC : schema::Format_NHWC); auto tensor_out = tensor_out_ptr.get(); if (tensor_out == nullptr) { MS_LOG(ERROR) << "tensor_out create error."; return; } - std::vector inputs{tensor_x}; - std::vector outputs{tensor_out}; + std::vector inputs{tensor_x}; + std::vector outputs{tensor_out}; auto arith_kernel_ptr = std::make_unique(nullptr, inputs, outputs); auto arith_kernel = arith_kernel_ptr.get(); if (arith_kernel == nullptr) { @@ -85,13 +85,14 @@ void RunTestCaseReshape(const std::vector &shape, void *input_data, void *o return; } pGraph->Init(); - memcpy(inputs[0]->Data(), input_data, inputs[0]->ElementsNum() * dtype_size); + memcpy(inputs[0]->MutableData(), input_data, inputs[0]->ElementsNum() * dtype_size); pGraph->Run(); if (enable_fp16) { - CompareOutput(outputs[0]->Data(), output_data, outputs[0]->ElementsNum(), static_cast(1e-3), 2e-2); + CompareOutput(outputs[0]->MutableData(), output_data, outputs[0]->ElementsNum(), static_cast(1e-3), + 2e-2); } else { - CompareOutput(outputs[0]->Data(), output_data, outputs[0]->ElementsNum(), static_cast(1e-5)); + CompareOutput(outputs[0]->MutableData(), output_data, outputs[0]->ElementsNum(), static_cast(1e-5)); } inputs[0]->SetData(nullptr); outputs[0]->SetData(nullptr); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/slice_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/slice_tests.cc index bef522530b..7a3c7beb35 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/slice_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/slice_tests.cc @@ -52,7 +52,7 @@ TEST_F(TestSliceOpenCLfp32, Slicefp32input_dim4) { std::vector begin = {0, 2, 3, 3}; std::vector size = {1, 10, 10, 13}; auto data_type = kNumberTypeFloat32; - auto tensor_type = schema::NodeType_ValueNode; + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); // get the input from .bin size_t input_size, output_size; @@ -61,21 +61,19 @@ TEST_F(TestSliceOpenCLfp32, Slicefp32input_dim4) { auto input_data = reinterpret_cast(mindspore::lite::ReadFile(input_path.c_str(), &input_size)); auto correct_data = reinterpret_cast(mindspore::lite::ReadFile(output_path.c_str(), &output_size)); MS_LOG(INFO) << " construct tensors "; - lite::tensor::Tensor *tensor_data = - new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type); + lite::Tensor *tensor_data = new (std::nothrow) lite::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type); if (tensor_data == nullptr) { MS_LOG(INFO) << " init tensor failed "; return; } - auto *output_tensor = - new (std::nothrow) lite::tensor::Tensor(data_type, output_shape, schema::Format_NHWC, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(data_type, output_shape, schema::Format_NHWC, tensor_type); if (output_tensor == nullptr) { delete tensor_data; MS_LOG(INFO) << " init tensor failed "; return; } - std::vector inputs = {tensor_data}; - std::vector outputs = {output_tensor}; + std::vector inputs = {tensor_data}; + std::vector outputs = {output_tensor}; MS_LOG(INFO) << "setting SliceParameter "; auto param = new (std::nothrow) SliceParameter(); @@ -132,12 +130,12 @@ TEST_F(TestSliceOpenCLfp32, Slicefp32input_dim4) { sub_graph->Init(); MS_LOG(INFO) << " init tensors "; - memcpy(inputs[0]->Data(), input_data, input_size); + memcpy(inputs[0]->MutableData(), input_data, input_size); std::cout << "==================output data================" << std::endl; sub_graph->Run(); - auto *output_data_gpu = reinterpret_cast(output_tensor->Data()); + auto *output_data_gpu = reinterpret_cast(output_tensor->MutableData()); CompareOutputData1(output_data_gpu, correct_data, output_tensor->ElementsNum(), 0.0001); for (auto tensor : inputs) { delete tensor; @@ -161,7 +159,7 @@ TEST_F(TestSliceOpenCLfp16, Slicefp16input_dim4) { std::vector begin = {0, 1, 1, 7}; std::vector size = {1, 255, 255, 15}; auto data_type = kNumberTypeFloat16; - auto tensor_type = schema::NodeType_ValueNode; + auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode); // get the input from .bin size_t input_size, output_size; @@ -171,21 +169,19 @@ TEST_F(TestSliceOpenCLfp16, Slicefp16input_dim4) { auto correct_data = reinterpret_cast(mindspore::lite::ReadFile(output_path.c_str(), &output_size)); MS_LOG(INFO) << " construct tensors "; - lite::tensor::Tensor *tensor_data = - new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type); + lite::Tensor *tensor_data = new (std::nothrow) lite::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type); if (tensor_data == nullptr) { MS_LOG(INFO) << " init tensor failed "; return; } - auto *output_tensor = - new (std::nothrow) lite::tensor::Tensor(data_type, output_shape, schema::Format_NHWC4, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(data_type, output_shape, schema::Format_NHWC4, tensor_type); if (output_tensor == nullptr) { delete tensor_data; MS_LOG(INFO) << " init tensor failed "; return; } - std::vector inputs = {tensor_data}; - std::vector outputs = {output_tensor}; + std::vector inputs = {tensor_data}; + std::vector outputs = {output_tensor}; MS_LOG(INFO) << " setting SliceParameter "; auto param = new (std::nothrow) SliceParameter(); @@ -242,12 +238,12 @@ TEST_F(TestSliceOpenCLfp16, Slicefp16input_dim4) { sub_graph->Init(); MS_LOG(INFO) << " init tensors "; - memcpy(inputs[0]->Data(), input_data, input_size); + memcpy(inputs[0]->MutableData(), input_data, input_size); std::cout << "==================output data================" << std::endl; sub_graph->Run(); - auto *output_data_gpu = reinterpret_cast(output_tensor->Data()); + auto *output_data_gpu = reinterpret_cast(output_tensor->MutableData()); CompareOutputData1(output_data_gpu, correct_data, output_tensor->ElementsNum(), 0.0001); for (auto tensor : inputs) { delete tensor; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/softmax_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/softmax_tests.cc index fc2ce9dd48..4edd07b730 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/softmax_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/softmax_tests.cc @@ -57,20 +57,20 @@ void RunTestCaseSoftmax(const std::vector &shape, void *input_data, void *o } auto input_format = is_2d ? schema::Format_NC : schema::Format_NHWC; auto input_dtype = enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32; - auto tensor_x_ptr = std::make_unique(TypeId(input_dtype), input_shape, input_format); + auto tensor_x_ptr = std::make_unique(TypeId(input_dtype), input_shape, input_format); auto tensor_x = tensor_x_ptr.get(); if (tensor_x == nullptr) { MS_LOG(ERROR) << "tensor_x create error."; return; } - auto tensor_out_ptr = std::make_unique(TypeId(input_dtype), input_shape, input_format); + auto tensor_out_ptr = std::make_unique(TypeId(input_dtype), input_shape, input_format); auto tensor_out = tensor_out_ptr.get(); if (tensor_out == nullptr) { MS_LOG(ERROR) << "tensor_out create error."; return; } - std::vector inputs{tensor_x}; - std::vector outputs{tensor_out}; + std::vector inputs{tensor_x}; + std::vector outputs{tensor_out}; auto arith_kernel_ptr = std::make_unique(nullptr, inputs, outputs); auto arith_kernel = arith_kernel_ptr.get(); if (arith_kernel == nullptr) { @@ -89,13 +89,14 @@ void RunTestCaseSoftmax(const std::vector &shape, void *input_data, void *o return; } pGraph->Init(); - memcpy(inputs[0]->Data(), input_data, inputs[0]->ElementsNum() * dtype_size); + memcpy(inputs[0]->MutableData(), input_data, inputs[0]->ElementsNum() * dtype_size); pGraph->Run(); if (enable_fp16) { - CompareOutput(outputs[0]->Data(), output_data, outputs[0]->ElementsNum(), static_cast(1e-3), 2e-2); + CompareOutput(outputs[0]->MutableData(), output_data, outputs[0]->ElementsNum(), static_cast(1e-3), + 2e-2); } else { - CompareOutput(outputs[0]->Data(), output_data, outputs[0]->ElementsNum(), static_cast(1e-5)); + CompareOutput(outputs[0]->MutableData(), output_data, outputs[0]->ElementsNum(), static_cast(1e-5)); } inputs[0]->SetData(nullptr); outputs[0]->SetData(nullptr); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/to_format_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/to_format_tests.cc index bbe9d94fa6..220176ee1f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/to_format_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/to_format_tests.cc @@ -29,7 +29,7 @@ class TestToFormatOpenCL : public mindspore::CommonTest { }; TEST_F(TestToFormatOpenCL, ToFormatNHWC2NCHW) { - auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); + auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); ocl_runtime->Init(); auto allocator = ocl_runtime->GetAllocator(); int h = 64; @@ -43,22 +43,21 @@ TEST_F(TestToFormatOpenCL, ToFormatNHWC2NCHW) { return; } std::vector input_shape = {1, h, w, c}; - auto tensor_x_ptr = - std::make_unique(TypeId(kNumberTypeFloat32), input_shape, schema::Format_NHWC4); + auto tensor_x_ptr = std::make_unique(TypeId(kNumberTypeFloat32), input_shape, schema::Format_NHWC4); auto tensor_x = tensor_x_ptr.get(); if (tensor_x == nullptr) { MS_LOG(ERROR) << "tensor_x create error."; return; } std::vector out_shape = {1, c, h, w}; - auto tensor_out_ptr = std::make_unique(TypeId(kNumberTypeFloat32), out_shape); + auto tensor_out_ptr = std::make_unique(TypeId(kNumberTypeFloat32), out_shape); auto tensor_out = tensor_out_ptr.get(); if (tensor_out == nullptr) { MS_LOG(ERROR) << "tensor_out create error."; return; } - std::vector inputs{tensor_x}; - std::vector outputs{tensor_out}; + std::vector inputs{tensor_x}; + std::vector outputs{tensor_out}; auto arith_kernel_ptr = std::make_unique(nullptr, inputs, outputs); auto arith_kernel = arith_kernel_ptr.get(); if (arith_kernel == nullptr) { @@ -77,7 +76,7 @@ TEST_F(TestToFormatOpenCL, ToFormatNHWC2NCHW) { return; } pGraph->Init(); - memcpy(inputs[0]->Data(), input_data, input_size); + memcpy(inputs[0]->MutableData(), input_data, input_size); pGraph->Run(); size_t output_size; @@ -88,7 +87,7 @@ TEST_F(TestToFormatOpenCL, ToFormatNHWC2NCHW) { return; } printf("==================output data=================\n"); - float *output_data = reinterpret_cast(tensor_out->Data()); + float *output_data = reinterpret_cast(tensor_out->MutableData()); std::cout << std::endl; int size_n = h * w * c; size_n = size_n > 100 ? 100 : size_n; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/transpose_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/transpose_tests.cc index dcadb54312..1ebb19f7cf 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/transpose_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/transpose_tests.cc @@ -42,23 +42,23 @@ void RunTestTranspose(const std::vector &shape, void *input_data, void *out int w = shape[1]; int c = shape[2]; std::vector input_shape = {1, h, w, c}; - auto tensor_x_ptr = std::make_unique( - TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), input_shape, schema::Format_NHWC); + auto tensor_x_ptr = std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), + input_shape, schema::Format_NHWC); auto tensor_x = tensor_x_ptr.get(); if (tensor_x == nullptr) { MS_LOG(ERROR) << "tensor_x create error."; return; } std::vector out_shape = {1, c, h, w}; - auto tensor_out_ptr = std::make_unique( - TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), out_shape, schema::Format_NCHW); + auto tensor_out_ptr = std::make_unique(TypeId(enable_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32), + out_shape, schema::Format_NCHW); auto tensor_out = tensor_out_ptr.get(); if (tensor_out == nullptr) { MS_LOG(ERROR) << "tensor_out create error."; return; } - std::vector inputs{tensor_x}; - std::vector outputs{tensor_out}; + std::vector inputs{tensor_x}; + std::vector outputs{tensor_out}; auto arith_kernel_ptr = std::make_unique(nullptr, inputs, outputs); auto arith_kernel = arith_kernel_ptr.get(); if (arith_kernel == nullptr) { @@ -77,13 +77,13 @@ void RunTestTranspose(const std::vector &shape, void *input_data, void *out return; } pGraph->Init(); - memcpy(inputs[0]->Data(), input_data, h * w * c * dtype_size); + memcpy(inputs[0]->MutableData(), input_data, h * w * c * dtype_size); pGraph->Run(); if (enable_fp16) { - CompareOutput(outputs[0]->Data(), output_data, h * w * c, static_cast(1e-3), 2e-2); + CompareOutput(outputs[0]->MutableData(), output_data, h * w * c, static_cast(1e-3), 2e-2); } else { - CompareOutput(outputs[0]->Data(), output_data, h * w * c, static_cast(1e-5)); + CompareOutput(outputs[0]->MutableData(), output_data, h * w * c, static_cast(1e-5)); } inputs[0]->SetData(nullptr); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/utils_tests.h b/mindspore/lite/test/ut/src/runtime/kernel/opencl/utils_tests.h index 80c141c8ba..86eb77e7e7 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/utils_tests.h +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/utils_tests.h @@ -54,10 +54,10 @@ void CompareOutput(void *output, void *expect, size_t elem_num, T atol, float rt } template -void CompareOutput(lite::tensor::Tensor *output_tensor, const std::string &file_path, T atol, float rtol = 1e-5) { +void CompareOutput(lite::Tensor *output_tensor, const std::string &file_path, T atol, float rtol = 1e-5) { size_t output_size; auto expect_data = mindspore::lite::ReadFile(file_path.c_str(), &output_size); - CompareOutput(output_tensor->Data(), expect_data, output_tensor->ElementsNum(), atol, rtol); + CompareOutput(output_tensor->MutableData(), expect_data, output_tensor->ElementsNum(), atol, rtol); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/src/utils_test.cc b/mindspore/lite/test/ut/src/utils_test.cc index a9b80ca94f..ee4ec0f850 100644 --- a/mindspore/lite/test/ut/src/utils_test.cc +++ b/mindspore/lite/test/ut/src/utils_test.cc @@ -36,11 +36,11 @@ TEST_F(UtilsTest, TestSubgraph) { auto kernel1 = std::make_shared(); auto kernel2 = std::make_shared(); - auto tensor0 = std::make_shared(); - auto tensor1 = std::make_shared(); - auto tensor2 = std::make_shared(); - auto tensor3 = std::make_shared(); - auto tensor4 = std::make_shared(); + auto tensor0 = std::make_shared(); + auto tensor1 = std::make_shared(); + auto tensor2 = std::make_shared(); + auto tensor3 = std::make_shared(); + auto tensor4 = std::make_shared(); kernel0->AddOutKernel(kernel1.get()); kernel1->AddInKernel(kernel0.get()); diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc index 7505ff4c9a..dda48616bf 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc @@ -110,9 +110,7 @@ TEST_F(TestTfliteParserHardSwish, AttrValue) { class TestTfliteParserPrelu : public TestTfliteParser { public: TestTfliteParserPrelu() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./prelu.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./prelu.tflite"); } }; TEST_F(TestTfliteParserPrelu, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_addn_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_addn_parser_test.cc index 7480d19b6c..5458e56412 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_addn_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_addn_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserAddN : public TestTfliteParser { public: TestTfliteParserAddN() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./addn.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./addn.tflite"); } }; TEST_F(TestTfliteParserAddN, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_arithmetic_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_arithmetic_parser_test.cc index fdf08db8cf..70c032f71c 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_arithmetic_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_arithmetic_parser_test.cc @@ -99,9 +99,7 @@ TEST_F(TestTfliteParserFloorMod, OpType) { class TestTfliteParserRealDiv : public TestTfliteParser { public: TestTfliteParserRealDiv() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./realdiv.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./realdiv.tflite"); } }; TEST_F(TestTfliteParserRealDiv, OpType) { @@ -114,9 +112,7 @@ TEST_F(TestTfliteParserRealDiv, OpType) { class TestTfliteParserSquaredDifference : public TestTfliteParser { public: TestTfliteParserSquaredDifference() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./squared_difference.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./squared_difference.tflite"); } }; TEST_F(TestTfliteParserSquaredDifference, OpType) { @@ -124,7 +120,7 @@ TEST_F(TestTfliteParserSquaredDifference, OpType) { ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_SquaredDifference) - << "wrong Op Type"; + << "wrong Op Type"; } class TestTfliteParserPow : public TestTfliteParser { @@ -282,9 +278,7 @@ TEST_F(TestTfliteParserLog, OpType) { class TestTfliteParserRound : public TestTfliteParser { public: TestTfliteParserRound() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./round.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./round.tflite"); } }; TEST_F(TestTfliteParserRound, OpType) { @@ -323,9 +317,7 @@ TEST_F(TestTfliteParserFloor, OpType) { class TestTfliteParserEqual : public TestTfliteParser { public: TestTfliteParserEqual() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./equal.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./equal.tflite"); } }; TEST_F(TestTfliteParserEqual, OpType) { @@ -338,9 +330,7 @@ TEST_F(TestTfliteParserEqual, OpType) { class TestTfliteParserNotEqual : public TestTfliteParser { public: TestTfliteParserNotEqual() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./not_equal.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./not_equal.tflite"); } }; TEST_F(TestTfliteParserNotEqual, OpType) { @@ -353,9 +343,7 @@ TEST_F(TestTfliteParserNotEqual, OpType) { class TestTfliteParserGreater : public TestTfliteParser { public: TestTfliteParserGreater() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./greater.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./greater.tflite"); } }; TEST_F(TestTfliteParserGreater, OpType) { @@ -368,9 +356,7 @@ TEST_F(TestTfliteParserGreater, OpType) { class TestTfliteParserGreaterEqual : public TestTfliteParser { public: TestTfliteParserGreaterEqual() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./greater_equal.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./greater_equal.tflite"); } }; TEST_F(TestTfliteParserGreaterEqual, OpType) { @@ -383,9 +369,7 @@ TEST_F(TestTfliteParserGreaterEqual, OpType) { class TestTfliteParserLess : public TestTfliteParser { public: TestTfliteParserLess() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./less.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./less.tflite"); } }; TEST_F(TestTfliteParserLess, OpType) { @@ -398,9 +382,7 @@ TEST_F(TestTfliteParserLess, OpType) { class TestTfliteParserLessEqual : public TestTfliteParser { public: TestTfliteParserLessEqual() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./less_equal.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./less_equal.tflite"); } }; TEST_F(TestTfliteParserLessEqual, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_cast_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_cast_parser_test.cc index 4dd4c41a75..5fc7a4e31d 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_cast_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_cast_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserCast : public TestTfliteParser { public: TestTfliteParserCast() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./cast.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./cast.tflite"); } }; TEST_F(TestTfliteParserCast, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depth_to_space_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depth_to_space_parser_test.cc index 08abf71836..b47b4997b6 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depth_to_space_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depth_to_space_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserDepthToSpace : public TestTfliteParser { public: TestTfliteParserDepthToSpace() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./depth_to_space.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./depth_to_space.tflite"); } }; TEST_F(TestTfliteParserDepthToSpace, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_fill_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_fill_parser_test.cc index 4ce37d77a4..eae2d770d8 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_fill_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_fill_parser_test.cc @@ -31,7 +31,7 @@ TEST_F(TestTfliteParserFill, OpType) { ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Fill) << "wrong Op Type"; } -TEST_F(TestTfliteParserFill, AttrValue) {; +TEST_F(TestTfliteParserFill, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsFill(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsFill(); std::vector dims = {9}; diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_logical_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_logical_parser_test.cc index 5a969bfe41..dd686dfaab 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_logical_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_logical_parser_test.cc @@ -57,5 +57,4 @@ TEST_F(TestTfliteParserLogicalOr, OpType) { ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_LogicalOr) << "wrong Op Type"; } - } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_lrn_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_lrn_parser_test.cc index 8db8351d4b..6451cec50f 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_lrn_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_lrn_parser_test.cc @@ -28,8 +28,8 @@ TEST_F(TestTfliteParserLRN, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, - schema::PrimitiveType_LocalResponseNormalization) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_LocalResponseNormalization) + << "wrong Op Type"; } TEST_F(TestTfliteParserLRN, AttrValue) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc index 99d2500d59..3d9465bc25 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserMaxPooling : public TestTfliteParser { public: TestTfliteParserMaxPooling() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./max_pooling.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./max_pooling.tflite"); } }; TEST_F(TestTfliteParserMaxPooling, OpType) { @@ -55,9 +53,7 @@ TEST_F(TestTfliteParserMaxPooling, AttrValue) { class TestTfliteParserAvgPooling : public TestTfliteParser { public: TestTfliteParserAvgPooling() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./avg_pooling.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./avg_pooling.tflite"); } }; TEST_F(TestTfliteParserAvgPooling, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reshape_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reshape_parser_test.cc index 3d29e9c192..b3bdaad5ae 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reshape_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reshape_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserReshape : public TestTfliteParser { public: TestTfliteParserReshape() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./reshape.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./reshape.tflite"); } }; TEST_F(TestTfliteParserReshape, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc index 397dd3c93c..fe7d37ae02 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserReverseSequence : public TestTfliteParser { public: TestTfliteParserReverseSequence() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./reverse_sequence.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./reverse_sequence.tflite"); } }; TEST_F(TestTfliteParserReverseSequence, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_softmax_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_softmax_parser_test.cc index 88488f946f..a35ebaf8d9 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_softmax_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_softmax_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserSoftmax : public TestTfliteParser { public: TestTfliteParserSoftmax() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./softmax.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./softmax.tflite"); } }; TEST_F(TestTfliteParserSoftmax, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser_test.cc index ae80d1481a..cbc0be98ef 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserSpaceToBatchND : public TestTfliteParser { public: TestTfliteParserSpaceToBatchND() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./space_to_batch_nd.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./space_to_batch_nd.tflite"); } }; TEST_F(TestTfliteParserSpaceToBatchND, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_depth_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_depth_parser_test.cc index d1ed72bee2..87a040edfe 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_depth_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_depth_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserSpaceToDepth : public TestTfliteParser { public: TestTfliteParserSpaceToDepth() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./space_to_depth.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./space_to_depth.tflite"); } }; TEST_F(TestTfliteParserSpaceToDepth, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_sparse_to_dense_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_sparse_to_dense_parser_test.cc index 495087a99a..9e3c967212 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_sparse_to_dense_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_sparse_to_dense_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserSparseToDense : public TestTfliteParser { public: TestTfliteParserSparseToDense() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./sparse_to_dense.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./sparse_to_dense.tflite"); } }; TEST_F(TestTfliteParserSparseToDense, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_strided_slice_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_strided_slice_parser_test.cc index ef6fa94a7a..c7ad4dc069 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_strided_slice_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_strided_slice_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserStridedSlice : public TestTfliteParser { public: TestTfliteParserStridedSlice() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./strided_slice.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./strided_slice.tflite"); } }; TEST_F(TestTfliteParserStridedSlice, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_tile_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_tile_parser_test.cc index fe4d930acd..1060f2a870 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_tile_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_tile_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserTile : public TestTfliteParser { public: TestTfliteParserTile() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./tile.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./tile.tflite"); } }; TEST_F(TestTfliteParserTile, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_topk_v2_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_topk_v2_parser_test.cc index 569f4112f8..62d42ded26 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_topk_v2_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_topk_v2_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserTopKV2 : public TestTfliteParser { public: TestTfliteParserTopKV2() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./topk_v2.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./topk_v2.tflite"); } }; TEST_F(TestTfliteParserTopKV2, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_transpose_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_transpose_parser_test.cc index f5891da3bf..d2af32eab7 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_transpose_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_transpose_parser_test.cc @@ -26,18 +26,18 @@ class TestTfliteParserTranspose : public TestTfliteParser { }; TEST_F(TestTfliteParserTranspose, OpType) { -ASSERT_NE(meta_graph, nullptr); -ASSERT_GT(meta_graph->nodes.size(), 0); -ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); -ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Transpose) << "wrong Op Type"; + ASSERT_NE(meta_graph, nullptr); + ASSERT_GT(meta_graph->nodes.size(), 0); + ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Transpose) << "wrong Op Type"; } TEST_F(TestTfliteParserTranspose, AttrValue) { -ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsTranspose(), nullptr); -auto val = meta_graph->nodes.front()->primitive->value.AsTranspose(); -ASSERT_EQ(val->conjugate, false); -std::vector perm = {1, 0}; -ASSERT_EQ(val->perm, perm); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsTranspose(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsTranspose(); + ASSERT_EQ(val->conjugate, false); + std::vector perm = {1, 0}; + ASSERT_EQ(val->perm, perm); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unique_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unique_parser_test.cc index 0273adbfe6..8cf8415edc 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unique_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unique_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserUnique : public TestTfliteParser { public: TestTfliteParserUnique() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./unique.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./unique.tflite"); } }; TEST_F(TestTfliteParserUnique, OpType) { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unstack_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unstack_parser_test.cc index 44c020d2c6..9cb73131e4 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unstack_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unstack_parser_test.cc @@ -22,9 +22,7 @@ namespace mindspore { class TestTfliteParserUnstack : public TestTfliteParser { public: TestTfliteParserUnstack() = default; - void SetUp() override { - meta_graph = LoadAndConvert("./unstack.tflite"); - } + void SetUp() override { meta_graph = LoadAndConvert("./unstack.tflite"); } }; TEST_F(TestTfliteParserUnstack, OpType) { diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc index cabc4a73fa..524d0c4553 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc @@ -60,7 +60,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType op_type, void *op_node) { input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 2, 2, 3}; input0->offset = -1; - auto input0_data = new(std::nothrow) float[2 * 2 * 3]; + auto input0_data = new (std::nothrow) float[2 * 2 * 3]; for (auto i = 0; i < 2 * 2 * 3; i++) { input0_data[i] = i; } @@ -77,7 +77,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType op_type, void *op_node) { input1->dims = {1, 2, 2, 3}; input1->offset = -1; input1->data.resize(sizeof(float) * 2 * 2 * 3); - auto input1_data = new(std::nothrow) float[2 * 2 * 3]; + auto input1_data = new (std::nothrow) float[2 * 2 * 3]; for (auto i = 0; i < 2 * 2 * 3; i++) { input1_data[i] = i; } @@ -119,7 +119,7 @@ MetaGraphTptr BuildGraphForOneInput(schema::PrimitiveType op_type, void *op_node input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 2, 2, 3}; input0->offset = -1; - auto input0_data = new(std::nothrow) float[2 * 2 * 3]; + auto input0_data = new (std::nothrow) float[2 * 2 * 3]; for (auto i = 0; i < 2 * 2 * 3; i++) { input0_data[i] = i + 1; } @@ -172,7 +172,7 @@ MetaGraphTptr BuildMixGraph() { input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 2, 2, 3}; input0->offset = -1; - auto input0_data = new(std::nothrow) float[2 * 2 * 3]; + auto input0_data = new (std::nothrow) float[2 * 2 * 3]; for (auto i = 0; i < 2 * 2 * 3; i++) { input0_data[i] = i; } @@ -189,7 +189,7 @@ MetaGraphTptr BuildMixGraph() { input1->dims = {1, 2, 2, 3}; input1->offset = -1; input1->data.resize(sizeof(float) * 2 * 2 * 3); - auto input1_data = new(std::nothrow) float[2 * 2 * 3]; + auto input1_data = new (std::nothrow) float[2 * 2 * 3]; for (auto i = 0; i < 2 * 2 * 3; i++) { input1_data[i] = i; } @@ -205,7 +205,7 @@ MetaGraphTptr BuildMixGraph() { add_output->dims = {1, 2, 2, 3}; add_output->offset = -1; add_output->data.resize(sizeof(float) * 2 * 2 * 3); - auto add_output_data = new(std::nothrow) float[2 * 2 * 3]; + auto add_output_data = new (std::nothrow) float[2 * 2 * 3]; memcpy(add_output->data.data(), add_output_data, 2 * 2 * 3 * sizeof(float)); delete[] add_output_data; meta_graph->allTensors.emplace_back(std::move(add_output)); @@ -218,7 +218,7 @@ MetaGraphTptr BuildMixGraph() { input2->dims = {1, 2, 2, 3}; input2->offset = -1; input2->data.resize(sizeof(float) * 2 * 2 * 3); - auto input2_data = new(std::nothrow) float[2 * 2 * 3]; + auto input2_data = new (std::nothrow) float[2 * 2 * 3]; for (auto i = 0; i < 2 * 2 * 3; i++) { input2_data[i] = 10; } @@ -282,7 +282,7 @@ MetaGraphTptr BuildSplitGraph() { input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 2, 2, 3}; input0->offset = -1; - auto input0_data = new(std::nothrow) float[2 * 2 * 3]; + auto input0_data = new (std::nothrow) float[2 * 2 * 3]; for (auto i = 0; i < 2 * 2 * 3; i++) { input0_data[i] = i; } @@ -299,7 +299,7 @@ MetaGraphTptr BuildSplitGraph() { split_output1->dims = {1, 1, 2, 3}; split_output1->offset = -1; split_output1->data.resize(sizeof(float) * 1 * 2 * 3); - auto split_output_data1 = new(std::nothrow) float[1 * 2 * 3]; + auto split_output_data1 = new (std::nothrow) float[1 * 2 * 3]; memcpy(split_output1->data.data(), split_output_data1, 1 * 2 * 3 * sizeof(float)); delete[] split_output_data1; meta_graph->allTensors.emplace_back(std::move(split_output1)); @@ -312,7 +312,7 @@ MetaGraphTptr BuildSplitGraph() { split_output2->dims = {1, 1, 2, 3}; split_output2->offset = -1; split_output2->data.resize(sizeof(float) * 1 * 2 * 3); - auto split_output_data2 = new(std::nothrow) float[1 * 2 * 3]; + auto split_output_data2 = new (std::nothrow) float[1 * 2 * 3]; memcpy(split_output2->data.data(), split_output_data2, 1 * 2 * 3 * sizeof(float)); delete[] split_output_data2; meta_graph->allTensors.emplace_back(std::move(split_output2)); @@ -325,7 +325,7 @@ MetaGraphTptr BuildSplitGraph() { input1->dims = {1, 1, 2, 3}; input1->offset = -1; input1->data.resize(sizeof(float) * 2 * 3); - auto input1_data = new(std::nothrow) float[2 * 3]; + auto input1_data = new (std::nothrow) float[2 * 3]; for (auto i = 0; i < 2 * 3; i++) { input1_data[i] = i; } @@ -341,7 +341,7 @@ MetaGraphTptr BuildSplitGraph() { input2->dims = {1, 1, 2, 3}; input2->offset = -1; input2->data.resize(sizeof(float) * 2 * 3); - auto input2_data = new(std::nothrow) float[2 * 3]; + auto input2_data = new (std::nothrow) float[2 * 3]; for (auto i = 0; i < 2 * 3; i++) { input2_data[i] = 10; } diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc index b73d5b652d..ff03a58e72 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc @@ -77,8 +77,7 @@ CNodeTptr BuildDepthwiseConv2D() { return convNode; } -MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, - schema::ActivationType activation_type) { +MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::ActivationType activation_type) { auto meta_graph = std::make_shared(); meta_graph->name = "graph"; // conv node diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc index e5b44ea8c8..4bbf967a15 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc @@ -77,8 +77,7 @@ CNodeTptr BuildDepthwiseConv2D() { return convNode; } -MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, - schema::PrimitiveType add_type) { +MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::PrimitiveType add_type) { auto meta_graph = std::make_shared(); meta_graph->name = "graph"; // conv node diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc index 8dcd9789ba..b2d2892436 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc @@ -146,7 +146,6 @@ MetaGraphTptr BuildCaffeGraph(schema::PrimitiveType conv_type) { input3->data.resize(sizeof(float) * 8 * 5 * 5); meta_graph->allTensors.emplace_back(std::move(input3)); - // final bn output auto output = std::make_unique(); output->nodeType = schema::NodeType::NodeType_Parameter; @@ -193,7 +192,6 @@ MetaGraphTptr BuildTFGraph(schema::PrimitiveType conv_type) { input0->offset = -1; meta_graph->allTensors.emplace_back(std::move(input0)); - // input 1: conv_bias auto input11 = std::make_unique(); input11->nodeType = schema::NodeType::NodeType_ValueNode; diff --git a/mindspore/lite/tools/anf_exporter/anf_exporter.cc b/mindspore/lite/tools/anf_exporter/anf_exporter.cc index aef14f865c..a18b1462ab 100644 --- a/mindspore/lite/tools/anf_exporter/anf_exporter.cc +++ b/mindspore/lite/tools/anf_exporter/anf_exporter.cc @@ -24,7 +24,7 @@ #include "src/ops/quant_dtype_cast.h" #include "abstract/abstract_value.h" #include "mindspore/core/ir/primitive.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "src/param_value_lite.h" #include "src/common/utils.h" @@ -69,7 +69,7 @@ int AnfExporter::ConvertQuantParam(const std::unique_ptr &me auto input_quant_params = primitive->GetInputQuantParams(); auto node_type = (schema::PrimitiveType)primitive->Type(); if (input_quant_params.empty()) { - MS_LOG(WARNING) << "node: " << dst_node->name << " input quant params is empty"; + MS_LOG(DEBUG) << "node: " << dst_node->name << " input quant params is empty"; return RET_OK; } for (size_t i = 0; i < input_quant_params.size(); i++) { @@ -298,16 +298,16 @@ int AnfExporter::ConvertInputValueNode(std::shared_ptr input_anode, auto valueNode = input_anode->cast(); auto paramTensor = std::make_unique(); auto value = valueNode->value(); - if (value->isa()) { + if (value->isa()) { auto valueAbstract = valueNode->abstract(); auto abstractTensor = utils::cast(valueAbstract); auto typePtr = abstractTensor->element()->GetTypeTrack(); paramTensor->dataType = typePtr->type_id(); paramTensor->dims = utils::cast(abstractTensor->BuildShape())->shape(); - paramTensor->nodeType = schema::NodeType_ValueNode; - auto data = value->cast(); + paramTensor->nodeType = schema::NodeType::NodeType_ValueNode; + auto data = value->cast(); paramTensor->data.resize(data->Size()); - memcpy(paramTensor->data.data(), data->Data(), data->Size()); + memcpy(paramTensor->data.data(), data->data_c(), data->Size()); node_id_map_[valueNode->fullname_with_scope()] = meta_graphT->allTensors.size(); output_cnode->inputIndex.emplace_back(meta_graphT->allTensors.size()); meta_graphT->allTensors.emplace_back(std::move(paramTensor)); @@ -317,7 +317,7 @@ int AnfExporter::ConvertInputValueNode(std::shared_ptr input_anode, auto typePtr = abstractScalar->GetTypeTrack(); paramTensor->dataType = typePtr->type_id(); paramTensor->dims = {1}; - paramTensor->nodeType = schema::NodeType_ValueNode; + paramTensor->nodeType = schema::NodeType::NodeType_ValueNode; auto data = value->cast(); paramTensor->data.emplace_back(data->value()); node_id_map_[valueNode->fullname_with_scope()] = meta_graphT->allTensors.size(); diff --git a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc index 3cf04aceaf..907f219ccb 100644 --- a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc +++ b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc @@ -31,7 +31,7 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() { auto &tensor = meta_graph_->allTensors.at(i); MS_ASSERT(tensor != nullptr); // converter weight and graph input into parameter node - if (tensor->nodeType != schema::NodeType_ValueNode) { + if (tensor->nodeType != schema::NodeType::NodeType_ValueNode) { continue; } MS_ASSERT(tensor->dims() != nullptr); @@ -69,7 +69,7 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() { ValueNodePtr AnfImporterFromMetaGraphT::ConvertPrimitive(const std::unique_ptr &cNode) { MS_ASSERT(nullptr != meta_graph_); MS_ASSERT(nullptr != cNode); - auto primitiveCValue = PrimitiveC::UnPackFromSchemaPrimitiveT(cNode->primitive.release()); + auto primitiveCValue = PrimitiveC::Create(cNode->primitive.release()); cNode->primitive = nullptr; // add quant parameter if (cNode->quantType != schema::QuantType_PostTraining) { diff --git a/mindspore/lite/tools/anf_importer/import_from_protobuf.cc b/mindspore/lite/tools/anf_importer/import_from_protobuf.cc index 8eeb0d83ce..8ea0b017ee 100644 --- a/mindspore/lite/tools/anf_importer/import_from_protobuf.cc +++ b/mindspore/lite/tools/anf_importer/import_from_protobuf.cc @@ -32,7 +32,7 @@ #include "ir/func_graph.h" #include "schema/inner/model_generated.h" #include "securec/include/securec.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "src/param_value_lite.h" #include "tools/converter/parser/onnx/onnx.pb.h" #include "utils/log_adapter.h" @@ -54,16 +54,16 @@ enum ParseForm : int { }; static std::map kParseTypeSwitchMap{ - {"type", FORM_PARSE_TYPE}, {"scalar", FORM_PARSE_SCALAR}, {"tensor", FORM_PARSE_TENSOR}}; + {"type", FORM_PARSE_TYPE}, {"scalar", FORM_PARSE_SCALAR}, {"tensor", FORM_PARSE_TENSOR}}; static std::unordered_map kDefaultValueSwitchMap{ - {onnx::TensorProto_DataType_BOOL, kNumberTypeBool}, {onnx::TensorProto_DataType_INT8, kNumberTypeInt8}, - {onnx::TensorProto_DataType_INT16, kNumberTypeInt16}, {onnx::TensorProto_DataType_INT32, kNumberTypeInt32}, - {onnx::TensorProto_DataType_INT64, kNumberTypeInt64}, {onnx::TensorProto_DataType_UINT8, kNumberTypeUInt8}, - {onnx::TensorProto_DataType_UINT16, kNumberTypeUInt16}, {onnx::TensorProto_DataType_UINT32, kNumberTypeUInt32}, - {onnx::TensorProto_DataType_UINT64, kNumberTypeUInt64}, {onnx::TensorProto_DataType_FLOAT16, kNumberTypeFloat16}, - {onnx::TensorProto_DataType_FLOAT, kNumberTypeFloat32}, {onnx::TensorProto_DataType_DOUBLE, kNumberTypeFloat64}, - {onnx::TensorProto_DataType_STRING, kObjectTypeString}, + {onnx::TensorProto_DataType_BOOL, kNumberTypeBool}, {onnx::TensorProto_DataType_INT8, kNumberTypeInt8}, + {onnx::TensorProto_DataType_INT16, kNumberTypeInt16}, {onnx::TensorProto_DataType_INT32, kNumberTypeInt32}, + {onnx::TensorProto_DataType_INT64, kNumberTypeInt64}, {onnx::TensorProto_DataType_UINT8, kNumberTypeUInt8}, + {onnx::TensorProto_DataType_UINT16, kNumberTypeUInt16}, {onnx::TensorProto_DataType_UINT32, kNumberTypeUInt32}, + {onnx::TensorProto_DataType_UINT64, kNumberTypeUInt64}, {onnx::TensorProto_DataType_FLOAT16, kNumberTypeFloat16}, + {onnx::TensorProto_DataType_FLOAT, kNumberTypeFloat32}, {onnx::TensorProto_DataType_DOUBLE, kNumberTypeFloat64}, + {onnx::TensorProto_DataType_STRING, kObjectTypeString}, }; std::shared_ptr ParserScalarAttrValue(const std::string &attr_name, @@ -124,8 +124,8 @@ std::shared_ptr ParserScalarAttrValue(const std::string &attr_name, return {}; } -std::shared_ptr -ParserAttrShape(const std::string &attr_name, const std::unordered_map &kv) { +std::shared_ptr ParserAttrShape( + const std::string &attr_name, const std::unordered_map &kv) { std::string str = attr_name; auto replace = [&](const string &orgStr, const string &newStr) { std::string::size_type pos(0); @@ -235,12 +235,12 @@ bool AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &nod node->set_abstract(abstract_tensor); if (default_para_map_.find(value_proto.name()) != default_para_map_.end()) { - tensor::Tensor *tensor_info = new tensor::Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape); + Tensor *tensor_info = new Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape); MS_EXCEPTION_IF_NULL(tensor_info); tensor_info->MallocData(); const onnx::TensorProto initialize_proto = default_para_map_[value_proto.name()]; std::string initial_data = initialize_proto.raw_data(); - auto *tensor_data_buf = reinterpret_cast(tensor_info->Data()); + auto *tensor_data_buf = reinterpret_cast(tensor_info->MutableData()); MS_EXCEPTION_IF_NULL(tensor_data_buf); tensor_info->SetData(nullptr); auto ret = memcpy_s(tensor_data_buf, tensor_info->Size(), initial_data.data(), initial_data.size()); @@ -325,7 +325,8 @@ ValuePtr AnfImporterFromProtobuf::ObtainCNodeAttrInScalarForm(const onnx::Tensor case onnx::TensorProto_DataType_BOOL: { return ParseAttrInScalar_int32_bool(attr_tensor); } - default:MS_LOG(ERROR) << "Obtain attr in scalar-form has not support input type: " << attr_tensor_type; + default: + MS_LOG(ERROR) << "Obtain attr in scalar-form has not support input type: " << attr_tensor_type; return {}; } return {}; @@ -343,8 +344,7 @@ bool AnfImporterFromProtobuf::ObtainCNodeAttrInTensorForm(const PrimitivePtr &pr shape.push_back(attr_tensor.dims(i)); } tensor::TensorPtr tensor_info = std::make_shared(kDefaultValueSwitchMap[attr_tensor_type], shape); - tensor_info->MallocData(); - auto *tensor_data_buf = reinterpret_cast(tensor_info->Data()); + auto *tensor_data_buf = reinterpret_cast(tensor_info->data_c()); ret = memcpy_s(tensor_data_buf, tensor_info->Size(), tensor_buf.data(), tensor_buf.size()); prim->set_attr(attr_name, MakeValue(tensor_info)); } else { @@ -400,7 +400,8 @@ bool AnfImporterFromProtobuf::GetAttrValueForCNode(const PrimitivePtr &prim, con case FORM_PARSE_TENSOR: { return ObtainCNodeAttrInTensorForm(prim, attr_name, attr_tensor); } - default:MS_LOG(ERROR) << "parse attr type don't support input of ref_attr_name"; + default: + MS_LOG(ERROR) << "parse attr type don't support input of ref_attr_name"; return false; } } @@ -424,9 +425,8 @@ bool AnfImporterFromProtobuf::ObtainValueNodeInTensorForm(const std::string &val shape.push_back(attr_tensor.dims(i)); } tensor::TensorPtr tensor_info = std::make_shared(kDefaultValueSwitchMap[attr_tensor_type], shape); - tensor_info->MallocData(); const std::string &tensor_buf = attr_tensor.raw_data(); - auto *tensor_data_buf = reinterpret_cast(tensor_info->Data()); + auto *tensor_data_buf = reinterpret_cast(tensor_info->data_c()); auto ret = memcpy_s(tensor_data_buf, tensor_info->Size(), tensor_buf.data(), tensor_buf.size()); if (EOK != ret) { MS_LOG(ERROR) << "memcpy_s error"; @@ -486,7 +486,8 @@ bool AnfImporterFromProtobuf::GetAttrValueForValueNode(const std::string &value_ case FORM_PARSE_TENSOR: { return ObtainValueNodeInTensorForm(value_node_name, attr_tensor); } - default:MS_LOG(ERROR) << "parse attr type don't support input of ref_attr_name"; + default: + MS_LOG(ERROR) << "parse attr type don't support input of ref_attr_name"; return false; } } @@ -517,8 +518,8 @@ bool AnfImporterFromProtobuf::BuildValueNodeForFuncGraph(const onnx::NodeProto & return GetAttrValueForValueNode(value_node_name, attr_proto); } -std::unordered_map -AnfImporterFromProtobuf::GetAbstractForCNode(const onnx::AttributeProto &attr_proto) { +std::unordered_map AnfImporterFromProtobuf::GetAbstractForCNode( + const onnx::AttributeProto &attr_proto) { std::unordered_map kv; for (int i = 0; i < attr_proto.tensors_size(); i++) { std::vector shape_vec; @@ -572,7 +573,7 @@ CNodePtr AnfImporterFromProtobuf::BuildCNodeForFuncGraph(const FuncGraphPtr &out } inputs.push_back(anfnode_build_map_[input_name]); } - auto primitivec_ptr = PrimitiveC::UnPackFromPrimitive(*prim, inputs, quantType); + auto primitivec_ptr = PrimitiveC::Create(*prim, inputs, quantType); if (primitivec_ptr == nullptr) { MS_LOG(ERROR) << "Create PrimitiveC return nullptr, " << prim->name(); return nullptr; @@ -745,7 +746,7 @@ int AnfImporterFromProtobuf::Import(const schema::QuantType &quantType) { onnx::ModelProto *AnfImporterFromProtobuf::ReadOnnxFromBinary(const std::string &model_path) { auto onnx_model = new onnx::ModelProto; - if (ReadProtoFromBinaryFile((const char *) model_path.c_str(), onnx_model) != RET_OK) { + if (ReadProtoFromBinaryFile((const char *)model_path.c_str(), onnx_model) != RET_OK) { MS_LOG(ERROR) << "Read onnx model file failed, model path: " << model_path; return nullptr; } diff --git a/mindspore/lite/tools/anf_importer/import_from_protobuf.h b/mindspore/lite/tools/anf_importer/import_from_protobuf.h index 0a7e0e69d1..1af52451e2 100644 --- a/mindspore/lite/tools/anf_importer/import_from_protobuf.h +++ b/mindspore/lite/tools/anf_importer/import_from_protobuf.h @@ -66,8 +66,8 @@ class AnfImporterFromProtobuf : public AnfImporter { bool ObtainValueNodeInTensorForm(const string &value_node_name, const onnx::TensorProto &attr_tensor); bool GetAttrValueForValueNode(const std::string &value_node_name, const onnx::AttributeProto &attr_proto); bool ObtainValueNodeInTypeForm(const string &value_node_name, const onnx::TensorProto &attr_tensor); - std::unordered_map GetAbstractForCNode(const onnx::AttributeProto &attr_proto); + std::unordered_map GetAbstractForCNode( + const onnx::AttributeProto &attr_proto); private: std::string producer_name_; diff --git a/mindspore/lite/tools/benchmark/benchmark.cc b/mindspore/lite/tools/benchmark/benchmark.cc index 5870c2588e..b9b80cc52d 100644 --- a/mindspore/lite/tools/benchmark/benchmark.cc +++ b/mindspore/lite/tools/benchmark/benchmark.cc @@ -174,7 +174,7 @@ int Benchmark::CompareOutput() { for (const auto &calibTensor : calibData) { std::string nodeOrTensorName = calibTensor.first; auto tensors = session->GetOutputsByNodeName(nodeOrTensorName); - const mindspore::tensor::MSTensor *tensor = nullptr; + mindspore::tensor::MSTensor *tensor = nullptr; if (tensors.empty() || tensors.size() != 1) { MS_LOG(INFO) << "Cannot find output node: " << nodeOrTensorName << " or node has more than one output tensor, switch to GetOutputByTensorName"; @@ -370,11 +370,11 @@ int Benchmark::RunBenchmark(const std::string &deviceType) { return RET_ERROR; } if (_flags->device == "CPU") { - context->device_ctx_.type = lite::DT_CPU; + context->device_type_ = lite::DT_CPU; } else if (_flags->device == "GPU") { - context->device_ctx_.type = lite::DT_GPU; + context->device_type_ = lite::DT_GPU; } else { - context->device_ctx_.type = lite::DT_NPU; + context->device_type_ = lite::DT_NPU; } if (_flags->cpuBindMode == -1) { diff --git a/mindspore/lite/tools/benchmark/main.cc b/mindspore/lite/tools/benchmark/main.cc index 5cec7c73d0..549fb7d915 100644 --- a/mindspore/lite/tools/benchmark/main.cc +++ b/mindspore/lite/tools/benchmark/main.cc @@ -21,4 +21,3 @@ int main(int argc, const char **argv) { MS_LOG(INFO) << mindspore::lite::Version(); return mindspore::lite::RunBenchmark(argc, argv); } - diff --git a/mindspore/lite/tools/common/converter_op_utils.h b/mindspore/lite/tools/common/converter_op_utils.h index 20356b5ade..c51386c734 100644 --- a/mindspore/lite/tools/common/converter_op_utils.h +++ b/mindspore/lite/tools/common/converter_op_utils.h @@ -31,4 +31,3 @@ inline std::string GetCNodeTTypeName(const schema::CNodeT &cNodeT) { } // namespace mindspore #endif // PREDICT_CONVERTER_COMMON_OP_UTILS_H_ - diff --git a/mindspore/lite/tools/common/flag_parser.cc b/mindspore/lite/tools/common/flag_parser.cc old mode 100755 new mode 100644 index f69235a2d1..ebc87a61a8 --- a/mindspore/lite/tools/common/flag_parser.cc +++ b/mindspore/lite/tools/common/flag_parser.cc @@ -135,7 +135,7 @@ Option FlagParser::InnerParseFlags(std::multimap &usgMsg) const { } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/common/graph_util.h b/mindspore/lite/tools/common/graph_util.h index 2aa53b75ff..2b4dfb9a9f 100644 --- a/mindspore/lite/tools/common/graph_util.h +++ b/mindspore/lite/tools/common/graph_util.h @@ -36,7 +36,7 @@ enum InsertPlace { kBefore, kAfter }; using NodeIter = std::vector>::iterator; -using OpDefCopyer = std::function (schema::CNodeT *)>; +using OpDefCopyer = std::function(schema::CNodeT *)>; OpDefCopyer GetSimpleOpCopyer(); @@ -92,4 +92,3 @@ std::string GetModelName(const std::string &modelFile); } // namespace mindspore #endif // MINDSPORE_PREDICT_GRAPH_UTIL_H - diff --git a/mindspore/lite/tools/common/node_util.cc b/mindspore/lite/tools/common/node_util.cc index 5cf1c9c0ff..f4ec2c225d 100644 --- a/mindspore/lite/tools/common/node_util.cc +++ b/mindspore/lite/tools/common/node_util.cc @@ -26,13 +26,19 @@ namespace mindspore { namespace lite { static const std::vector nhwcOpList = { #ifdef SUPPORT_TRAIN - schema::PrimitiveType_Conv2DGradFilter, schema::PrimitiveType_Conv2DGradInput, - schema::PrimitiveType_PoolingGrad, schema::PrimitiveType_BiasGrad, + schema::PrimitiveType_Conv2DGradFilter, + schema::PrimitiveType_Conv2DGradInput, + schema::PrimitiveType_PoolingGrad, + schema::PrimitiveType_BiasGrad, #endif - schema::PrimitiveType_Conv2D, schema::PrimitiveType_DeConv2D, - schema::PrimitiveType_DepthwiseConv2D, schema::PrimitiveType_DeDepthwiseConv2D, - schema::PrimitiveType_Pooling, schema::PrimitiveType_Resize, - schema::PrimitiveType_BatchNorm, schema::PrimitiveType_FusedBatchNorm, + schema::PrimitiveType_Conv2D, + schema::PrimitiveType_DeConv2D, + schema::PrimitiveType_DepthwiseConv2D, + schema::PrimitiveType_DeDepthwiseConv2D, + schema::PrimitiveType_Pooling, + schema::PrimitiveType_Resize, + schema::PrimitiveType_BatchNorm, + schema::PrimitiveType_FusedBatchNorm, schema::PrimitiveType_PReLU}; static const std::vector fp32FullOpList = { @@ -58,8 +64,8 @@ static const std::vector int8OpList = { schema::PrimitiveType_MatMul}; static const std::vector needInsertOpList = { - schema::PrimitiveType_Eltwise, schema::PrimitiveType_Activation, - schema::PrimitiveType_Concat, schema::PrimitiveType_Power}; + schema::PrimitiveType_Eltwise, schema::PrimitiveType_Activation, schema::PrimitiveType_Concat, + schema::PrimitiveType_Power}; std::vector GetInsertOpList() { return needInsertOpList; } @@ -71,8 +77,8 @@ std::vector GetUint8NhwcOpList() { return int8NeedNhwcOpL std::vector GetUint8OpList() { return int8OpList; } -STATUS NodeUtils::ConvertDims(mindspore::lite::Format src_format, const std::vector &src_dims, - mindspore::lite::Format dst_format, std::vector *dst_dims) { +STATUS NodeUtils::ConvertDims(mindspore::schema::Format src_format, const std::vector &src_dims, + mindspore::schema::Format dst_format, std::vector *dst_dims) { if ((src_dims.size() != DIM_DEFAULT_SIZE && src_dims.size() != 3) || src_format == dst_format) { MS_LOG(ERROR) << "Convert format , src size " << src_dims.size() << " <3 or src format is equal to dst format,not need convert"; @@ -82,10 +88,10 @@ STATUS NodeUtils::ConvertDims(mindspore::lite::Format src_format, const std::vec std::vector nchw_dim; switch (src_format) { - case Format_NCHW: + case schema::Format::Format_NCHW: nchw_dim = src_dims; break; - case Format_NHWC: + case schema::Format::Format_NHWC: if (src_dims.size() == DIM_DEFAULT_SIZE) { nchw_dim.push_back(src_dims[NHWC_N]); nchw_dim.push_back(src_dims[NHWC_C]); @@ -98,7 +104,7 @@ STATUS NodeUtils::ConvertDims(mindspore::lite::Format src_format, const std::vec } break; default: - MS_LOG(ERROR) << "Not support src format: " << schema::EnumNameFormat(src_format); + MS_LOG(ERROR) << "Not support src format: " << EnumNameFormat(src_format); return RET_ERROR; } @@ -108,10 +114,10 @@ STATUS NodeUtils::ConvertDims(mindspore::lite::Format src_format, const std::vec } switch (dst_format) { - case Format_NCHW: + case schema::Format::Format_NCHW: *dst_dims = nchw_dim; break; - case Format_NHWC: + case schema::Format::Format_NHWC: if (src_dims.size() == DIM_DEFAULT_SIZE) { dst_dims->push_back(nchw_dim[NCHW_N]); dst_dims->push_back(nchw_dim[NCHW_H]); @@ -126,8 +132,8 @@ STATUS NodeUtils::ConvertDims(mindspore::lite::Format src_format, const std::vec return RET_OK; } -STATUS GetFilterDim(const std::vector &oriDims, kTransFilterType type, int32_t* filterK, int32_t* filterC, - int32_t* filterH, int32_t* filterW) { +STATUS GetFilterDim(const std::vector &oriDims, kTransFilterType type, int32_t *filterK, int32_t *filterC, + int32_t *filterH, int32_t *filterW) { MS_ASSERT(oriDims.size() == 4); if (type == kKCHW2HWCK || type == kKCHW2HWKC || type == kKCHW2KHWC || type == kKCHW2CKHW) { *filterK = oriDims.at(KCHW_K); @@ -171,8 +177,8 @@ STATUS GetFilterDim(const std::vector &oriDims, kTransFilterType type, return RET_OK; } -STATUS SetFilterDim(schema::TensorT *tensor, kTransFilterType type, int32_t filterK, int32_t filterC, - int32_t filterH, int32_t filterW) { +STATUS SetFilterDim(schema::TensorT *tensor, kTransFilterType type, int32_t filterK, int32_t filterC, int32_t filterH, + int32_t filterW) { MS_ASSERT(tensor != nullptr); if (type == kKCHW2HWCK || type == kCKHW2HWCK || type == kNHWC2HWCK || type == kKHWC2HWCK || type == kCHWK2HWCK) { tensor->dims = {filterH, filterW, filterC, filterK}; @@ -206,9 +212,9 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { auto dataType = tensor->dataType; STATUS status; switch (dstFormat) { - case schema::Format_KHWC: { + case schema::Format::Format_KHWC: { switch (srcFormat) { - case schema::Format_KCHW: + case schema::Format::Format_KCHW: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kKCHW2KHWC); } else if (dataType == kNumberTypeUInt8) { @@ -220,7 +226,7 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_CKHW: + case schema::Format::Format_CKHW: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kCKHW2KHWC); } else if (dataType == kNumberTypeUInt8) { @@ -232,7 +238,7 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_CHWK: + case schema::Format::Format_CHWK: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kCHWK2KHWC); } else if (dataType == kNumberTypeUInt8) { @@ -244,17 +250,17 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_KHWC: + case schema::Format::Format_KHWC: return RET_OK; default: - MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(srcFormat) << " to " - << schema::EnumNameFormat(dstFormat); + MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(srcFormat) << " to " + << EnumNameFormat(dstFormat); return RET_ERROR; } } break; - case schema::Format_HWCK: { + case schema::Format::Format_HWCK: { switch (srcFormat) { - case schema::Format_KCHW: + case schema::Format::Format_KCHW: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kKCHW2HWCK); } else if (dataType == kNumberTypeUInt8) { @@ -266,7 +272,7 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_KHWC: + case schema::Format::Format_KHWC: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kKHWC2HWCK); } else if (dataType == kNumberTypeUInt8) { @@ -278,7 +284,7 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_CKHW: + case schema::Format::Format_CKHW: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kCKHW2HWCK); } else if (dataType == kNumberTypeUInt8) { @@ -290,7 +296,7 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_CHWK: + case schema::Format::Format_CHWK: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kCHWK2HWCK); } else if (dataType == kNumberTypeUInt8) { @@ -302,19 +308,19 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_HWCK: + case schema::Format::Format_HWCK: return RET_OK; default: - MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(srcFormat) << " to " - << schema::EnumNameFormat(dstFormat); + MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(srcFormat) << " to " + << EnumNameFormat(dstFormat); return RET_ERROR; } } break; - case schema::Format_KCHW: { + case schema::Format::Format_KCHW: { switch (srcFormat) { - case schema::Format_KCHW: + case schema::Format::Format_KCHW: return RET_OK; - case schema::Format_HWCK: + case schema::Format::Format_HWCK: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kHWCK2KCHW); } else if (dataType == kNumberTypeUInt8) { @@ -326,7 +332,7 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_HWKC: + case schema::Format::Format_HWKC: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kHWKC2KCHW); } else if (dataType == kNumberTypeUInt8) { @@ -338,7 +344,7 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_KHWC: + case schema::Format::Format_KHWC: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kKHWC2KCHW); } else if (dataType == kNumberTypeUInt8) { @@ -350,7 +356,7 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_CKHW: + case schema::Format::Format_CKHW: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kCKHW2KCHW); } else if (dataType == kNumberTypeUInt8) { @@ -362,7 +368,7 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_CHWK: + case schema::Format::Format_CHWK: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kCHWK2KCHW); } else if (dataType == kNumberTypeUInt8) { @@ -375,14 +381,14 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { } break; default: - MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(srcFormat) << " to " - << schema::EnumNameFormat(dstFormat); + MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(srcFormat) << " to " + << EnumNameFormat(dstFormat); return RET_ERROR; } } break; - case schema::Format_CKHW: { + case schema::Format::Format_CKHW: { switch (srcFormat) { - case schema::Format_HWCK: + case schema::Format::Format_HWCK: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kHWCK2CKHW); } else if (dataType == kNumberTypeUInt8) { @@ -394,7 +400,7 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_HWKC: + case schema::Format::Format_HWKC: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kHWKC2CKHW); } else if (dataType == kNumberTypeUInt8) { @@ -406,7 +412,7 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_KCHW: + case schema::Format::Format_KCHW: if (dataType == kNumberTypeFloat32) { status = TransFilterFormat(tensor, kKCHW2CKHW); } else if (dataType == kNumberTypeUInt8) { @@ -418,17 +424,17 @@ STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { return RET_ERROR; } break; - case schema::Format_CKHW: + case schema::Format::Format_CKHW: return RET_OK; default: - MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(srcFormat) << " to " - << schema::EnumNameFormat(dstFormat); + MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(srcFormat) << " to " + << EnumNameFormat(dstFormat); return RET_ERROR; } } break; default: - MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(srcFormat) << " to " - << schema::EnumNameFormat(dstFormat); + MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(srcFormat) << " to " + << EnumNameFormat(dstFormat); return RET_ERROR; } if (status != RET_OK) { diff --git a/mindspore/lite/tools/common/node_util.h b/mindspore/lite/tools/common/node_util.h index 1f9af6fef6..7eed217d4f 100644 --- a/mindspore/lite/tools/common/node_util.h +++ b/mindspore/lite/tools/common/node_util.h @@ -79,10 +79,10 @@ enum kTransFilterType { kKCHW2CKHW // 20 }; -STATUS GetFilterDim(const std::vector &oriDims, kTransFilterType type, int32_t* filterK, int32_t* filterC, - int32_t* filterH, int32_t* filterW); -STATUS SetFilterDim(schema::TensorT *tensor, kTransFilterType type, int32_t filterK, int32_t filterC, - int32_t filterH, int32_t filterW); +STATUS GetFilterDim(const std::vector &oriDims, kTransFilterType type, int32_t *filterK, int32_t *filterC, + int32_t *filterH, int32_t *filterW); +STATUS SetFilterDim(schema::TensorT *tensor, kTransFilterType type, int32_t filterK, int32_t filterC, int32_t filterH, + int32_t filterW); template static STATUS TransFilterData(schema::TensorT *tensor, kTransFilterType type, int32_t filterK, int32_t filterC, diff --git a/mindspore/lite/tools/common/option.h b/mindspore/lite/tools/common/option.h index 8b323b7336..76222f4ebd 100644 --- a/mindspore/lite/tools/common/option.h +++ b/mindspore/lite/tools/common/option.h @@ -117,4 +117,3 @@ class Option { } // namespace mindspore #endif // PREDICT_COMMON_OPTION_H_ - diff --git a/mindspore/lite/tools/common/storage.h b/mindspore/lite/tools/common/storage.h index c1cdfa27ad..ef0f12bf5e 100644 --- a/mindspore/lite/tools/common/storage.h +++ b/mindspore/lite/tools/common/storage.h @@ -35,4 +35,3 @@ class Storage { } // namespace mindspore #endif // PREDICT_COMMON_STORAGE_H_ - diff --git a/mindspore/lite/tools/common/tensor_util.cc b/mindspore/lite/tools/common/tensor_util.cc index f9e5838a2b..c7e85c9425 100644 --- a/mindspore/lite/tools/common/tensor_util.cc +++ b/mindspore/lite/tools/common/tensor_util.cc @@ -75,7 +75,7 @@ size_t GetShapeSize(const TensorT &tensor) { std::unique_ptr CopyTensorDefT(const std::unique_ptr &oldTensor) { auto newTensor = std::unique_ptr(new (std::nothrow) TensorT); if (newTensor == nullptr) { - MS_LOG(ERROR) << "new TensorT failed"; + MS_LOG(ERROR) << "new TensorT failed"; return nullptr; } newTensor->dims = oldTensor->dims; diff --git a/mindspore/lite/tools/common/tensor_util.h b/mindspore/lite/tools/common/tensor_util.h index 1f13160f78..6d3e5616a5 100644 --- a/mindspore/lite/tools/common/tensor_util.h +++ b/mindspore/lite/tools/common/tensor_util.h @@ -29,14 +29,14 @@ namespace mindspore { namespace lite { -using schema::TensorT; -using schema::MetaGraphT; using schema::CNodeT; -using schema::QuantParamT; using schema::Format; using schema::FusedBatchNormT; -using schema::Format_NCHW; -using schema::Format_NHWC; +using schema::MetaGraphT; +using schema::QuantParamT; +using schema::TensorT; +using schema::Format::Format_NCHW; +using schema::Format::Format_NHWC; using STATUS = int; std::unique_ptr GetTensorQuantParam(const std::unique_ptr &tensor); @@ -56,11 +56,11 @@ size_t GetRefCount(schema::MetaGraphT *graphT, uint32_t tensorIdx); std::unique_ptr CopyQuantParamT(const std::unique_ptr &srcQuantParam); std::unique_ptr CopyQuantParamArrayT( - const std::unique_ptr &srcQuantParamArray); + const std::unique_ptr &srcQuantParamArray); using MSGraphDefTPtr = std::shared_ptr; -enum TensorType { CONST = 0, GRAPH_INPUT = 1, OP_OUTPUT = 2, TF_CONST = 3 }; +enum Category { CONST = 0, GRAPH_INPUT = 1, OP_OUTPUT = 2, TF_CONST = 3 }; class TensorCache { public: @@ -68,9 +68,9 @@ class TensorCache { ~TensorCache() { tensors.clear(); } - int AddTensor(const std::string &name, TensorT *tensor, int TensorType) { + int AddTensor(const std::string &name, TensorT *tensor, int Category) { index++; - if (TensorType == CONST || TensorType == TF_CONST || TensorType == GRAPH_INPUT) { + if (Category == CONST || Category == TF_CONST || Category == GRAPH_INPUT) { tensor->refCount = 1; tensor->nodeType = schema::NodeType_ValueNode; } else { @@ -78,11 +78,11 @@ class TensorCache { } tensors.push_back(tensor); - if (TensorType == GRAPH_INPUT) { + if (Category == GRAPH_INPUT) { graphInputs.push_back(index); } - if (TensorType == GRAPH_INPUT || TensorType == OP_OUTPUT || TensorType == TF_CONST) { + if (Category == GRAPH_INPUT || Category == OP_OUTPUT || Category == TF_CONST) { UpdateTensorIndex(name, index); } return index; @@ -121,4 +121,3 @@ class TensorCache { } // namespace mindspore #endif // MINDSPORE_PREDICT_TENSOR_UTIL_H - diff --git a/mindspore/lite/tools/converter/CMakeLists.txt b/mindspore/lite/tools/converter/CMakeLists.txt index 0eb17b0518..f239fe95c5 100644 --- a/mindspore/lite/tools/converter/CMakeLists.txt +++ b/mindspore/lite/tools/converter/CMakeLists.txt @@ -1,70 +1,24 @@ add_definitions(-DPRIMITIVE_WRITEABLE) -set(CORE_SRC - ${CORE_SRC} - #core / abstract - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/abstract/abstract_function.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/abstract/analysis_context.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/abstract/param_validator.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/abstract/abstract_value.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/abstract/dshape.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/abstract/utils.cc - #core / base - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/base/base_ref.cc - #core / ir - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/anf.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/anf_extends.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/meta_func_graph.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/func_graph.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/graph_utils.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/func_graph_cloner.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/func_graph_extends.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/manager.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/primitive.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/tensor.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/visitor.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/meta_tensor_extends.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/dtype.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/dtype_extends.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/named.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/scope.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/value.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/value_extends.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/dtype/ref.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/dtype/tensor_type.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/dtype/container.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/dtype/empty.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/dtype/number.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/dtype/ref.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/dtype/type.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/dtype/type_extends.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/utils/any.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/utils/symbolic.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/utils/misc.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/utils/flags.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/utils/trace_base.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/utils/trace_info.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/utils/label.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/utils/info.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/utils/profile.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/utils/ms_context.cc +string(REPLACE " -Werror " " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + +file (GLOB_RECURSE CORE_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/*.cc ) +list(REMOVE_ITEM CORE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/utils/log_adapter.cc) +list(REMOVE_ITEM CORE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/ir/graph_utils_extends.cc) + set(CCSRC_SRC - ## ccsrc - ${CCSRC_DIR}/debug/draw.cc - ${CCSRC_DIR}/pybind_api/export_flags.cc - ${CCSRC_DIR}/utils/context/context_extends.cc - ${CCSRC_DIR}/frontend/parallel/costmodel_context.cc ${CCSRC_DIR}/backend/optimizer/common/pattern_engine.cc ${CCSRC_DIR}/backend/optimizer/common/visit.cc ${CCSRC_DIR}/backend/optimizer/common/optimizer.cc ) + if (WIN32) set(LITE_SRC #src - ${CMAKE_CURRENT_SOURCE_DIR}/../../src/ir/tensor.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../src/tensor.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../src/model.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../src/context.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../src/lite_session.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../src/kernel_registry.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../src/common/graph_util.cc @@ -75,7 +29,6 @@ if (WIN32) ${CMAKE_CURRENT_SOURCE_DIR}/../../src/executor.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../src/scheduler.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../src/lite_kernel.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../src/common/ms_tensor_utils.cc ${CMAKE_CURRENT_SOURCE_DIR}../../nnacl/pack.c ${CMAKE_CURRENT_SOURCE_DIR}/../../src/populate_parameter.cc ) @@ -124,13 +77,11 @@ set(LITE_SRC ${SRC_DIR}/common/graph_utils_extends.cc ${SRC_DIR}/common/log_adapter.cc ${SRC_DIR}/common/graph_util.cc - ${SRC_DIR}/common/ms_tensor_utils.cc ${SRC_DIR}/runtime/allocator.cc ${SRC_DIR}/runtime/runtime_api.cc ${SRC_DIR}/runtime/thread_pool.c ${SRC_DIR}/runtime/workspace_pool.cc - ${SRC_DIR}/ir/tensor.cc - ${SRC_DIR}/context.cc + ${SRC_DIR}/tensor.cc ${SRC_DIR}/kernel_registry.cc ${SRC_DIR}/lite_kernel.cc ${SRC_DIR}/populate_parameter.cc diff --git a/mindspore/lite/tools/converter/anf_transform.cc b/mindspore/lite/tools/converter/anf_transform.cc index 9f09cf4507..1d73154303 100644 --- a/mindspore/lite/tools/converter/anf_transform.cc +++ b/mindspore/lite/tools/converter/anf_transform.cc @@ -47,12 +47,10 @@ FuncGraphPtr AnfTransform::Transform(const FuncGraphPtr &old_graph, const conver schema::ActivationType_RELU)); pm->AddPass(std::make_shared(true, "conv_relu6", schema::PrimitiveType_Activation, schema::ActivationType_RELU6)); - pm->AddPass(std::make_shared(true, "conv_tuple_relu", - schema::PrimitiveType_Activation, - schema::ActivationType_RELU)); - pm->AddPass(std::make_shared(true, "conv_tuple_relu6", - schema::PrimitiveType_Activation, - schema::ActivationType_RELU6)); + pm->AddPass(std::make_shared( + true, "conv_tuple_relu", schema::PrimitiveType_Activation, schema::ActivationType_RELU)); + pm->AddPass(std::make_shared( + true, "conv_tuple_relu6", schema::PrimitiveType_Activation, schema::ActivationType_RELU6)); pm->AddPass(std::make_shared()); optimizer->AddPassManager(pm); FuncGraphPtr new_graph = optimizer->Optimize(old_graph); @@ -66,8 +64,8 @@ FuncGraphPtr AnfTransform::Transform(const FuncGraphPtr &old_graph, const conver return nullptr; } } else if (config->quantType == schema::QuantType_WeightQuant) { - this->mQuantizer = std::make_unique(new_graph, config->quantSize, - config->convWeightQuantChannelThreshold, config->bitNum); + this->mQuantizer = std::make_unique( + new_graph, config->quantSize, config->convWeightQuantChannelThreshold, config->bitNum); if (mQuantizer == nullptr) { MS_LOG(ERROR) << "New PostTrainingQuantizer failed"; return nullptr; diff --git a/mindspore/lite/tools/converter/converter.h b/mindspore/lite/tools/converter/converter.h index 7deb65c9b7..0952521d4b 100644 --- a/mindspore/lite/tools/converter/converter.h +++ b/mindspore/lite/tools/converter/converter.h @@ -46,4 +46,3 @@ int RunConverter(int argc, const char **argv); } // namespace mindspore #endif - diff --git a/mindspore/lite/tools/converter/converter_flags.h b/mindspore/lite/tools/converter/converter_flags.h index 1067a8cb09..0dc118c077 100644 --- a/mindspore/lite/tools/converter/converter_flags.h +++ b/mindspore/lite/tools/converter/converter_flags.h @@ -25,20 +25,12 @@ namespace mindspore { namespace lite { using mindspore::schema::QuantType; +using mindspore::schema::QuantType_AwareTraining; using mindspore::schema::QuantType_PostTraining; using mindspore::schema::QuantType_QUANT_NONE; -using mindspore::schema::QuantType_AwareTraining; using mindspore::schema::QuantType_WeightQuant; -using mindspore::schema::QuantType_PostTraining; -using mindspore::schema::QuantType_PostTraining; namespace converter { -enum FmkType { - FmkType_TF = 0, - FmkType_CAFFE = 1, - FmkType_ONNX = 2, - FmkType_MS = 3, - FmkType_TFLITE = 4 -}; +enum FmkType { FmkType_TF = 0, FmkType_CAFFE = 1, FmkType_ONNX = 2, FmkType_MS = 3, FmkType_TFLITE = 4 }; class Flags : public virtual mindspore::lite::FlagParser { public: @@ -82,4 +74,3 @@ class Flags : public virtual mindspore::lite::FlagParser { } // namespace mindspore #endif - diff --git a/mindspore/lite/tools/converter/graphdef_transform.h b/mindspore/lite/tools/converter/graphdef_transform.h index 0251b7d15a..b7496682b3 100644 --- a/mindspore/lite/tools/converter/graphdef_transform.h +++ b/mindspore/lite/tools/converter/graphdef_transform.h @@ -50,4 +50,3 @@ class GraphDefTransform { } // namespace mindspore #endif - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/batchnorm_fold_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/batchnorm_fold_fusion_pass.cc index 238b3bc46f..e6a2388406 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/batchnorm_fold_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/batchnorm_fold_fusion_pass.cc @@ -301,7 +301,7 @@ STATUS BatchNormFoldFusionPass::CheckPath(MetaGraphT *graph, STATUS BatchNormFoldFusionPass::GenNewWeightTensor() { MS_ASSERT(oldWeightTensor != nullptr); MS_ASSERT(oldWeightTensor->dataType == DataType_DT_FLOAT); - MS_ASSERT(oldWeightTensor->refCount == schema::NodeType_ValueNode); + MS_ASSERT(oldWeightTensor->refCount == schema::NodeType::NodeType_ValueNode); auto weightShape = oldWeightTensor->dims; if (weightShape.size() != 4) { MS_LOG(ERROR) << "shape of weight should be 4 dims, got " << weightShape.size() << " dims"; @@ -319,7 +319,7 @@ STATUS BatchNormFoldFusionPass::GenNewWeightTensor() { } newWeightTensor->dataType = oldWeightTensor->dataType; newWeightTensor->format = oldWeightTensor->format; - newWeightTensor->refCount = schema::NodeType_ValueNode; + newWeightTensor->refCount = schema::NodeType::NodeType_ValueNode; newWeightTensor->dims = weightShape; newWeightTensor->data.resize(weightShapeSize * sizeof(float)); void *oldWeightData = oldWeightTensor->data.data(); @@ -349,8 +349,8 @@ STATUS BatchNormFoldFusionPass::GenNewBiasTensor() { // bias has no quant return RET_ERROR; } newBiasTensor->dataType = 0; - newBiasTensor->format = Format_NUM_OF_FORMAT; - newBiasTensor->refCount = schema::NodeType_ValueNode; + newBiasTensor->format = schema::Format::Format_NUM_OF_FORMAT; + newBiasTensor->refCount = schema::NodeType::NodeType_ValueNode; newBiasTensor->dims = biasShape; newBiasTensor->data.resize(channelOut * sizeof(float)); void *newBiasData = newBiasTensor->data.data(); @@ -496,4 +496,3 @@ BatchNormFoldFusionPass::~BatchNormFoldFusionPass() { } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc index 7bd256ea21..9fda406d84 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc @@ -130,7 +130,7 @@ STATUS FormatTransFusionPass::DoFusion(schema::MetaGraphT *graph, const std::str std::unordered_map> &matchedPath) { MS_ASSERT(graph != nullptr); if (matchedPath.size() != kFormatTransMatchPathLen2 && matchedPath.size() != kFormatTransMatchPathLen3) { - MS_LOG(ERROR) << "Format-Transform-Fusion should have " << kFormatTransMatchPathLen2 << " or " + MS_LOG(ERROR) << "schema::Format-Transform-Fusion should have " << kFormatTransMatchPathLen2 << " or " << kFormatTransMatchPathLen3 << " NodeIndex in matchedPair"; return RET_PARAM_INVALID; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.h index 1b77c74d6b..60541edaad 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.h @@ -49,4 +49,3 @@ class FormatTransFusionPass : public FusionPass { } // namespace mindspore #endif // MINDSPORE_PREDICT_FORMAT_TRANS_FUSION_PASS_H - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_transpose_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_transpose_fusion_pass.cc index a40b03e38b..44e9c00b5d 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_transpose_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_transpose_fusion_pass.cc @@ -81,7 +81,7 @@ STATUS FormatTransPermuteFusionPass::DoFusion(schema::MetaGraphT *graph, const s std::unordered_map> &matchedPath) { MS_ASSERT(graph != nullptr); if (matchedPath.size() != kFormatTransTransposeMatchPathLen) { - MS_LOG(ERROR) << "Format-Transform-Transpose-Fusion should have " << kFormatTransTransposeMatchPathLen + MS_LOG(ERROR) << "schema::Format-Transform-Transpose-Fusion should have " << kFormatTransTransposeMatchPathLen << " NodeIndex in matchedPair"; return RET_PARAM_INVALID; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.cc index 73e103d63b..1ffd26daa2 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.cc @@ -174,7 +174,7 @@ STATUS FusionPass::MatchOnePattern(schema::MetaGraphT *graph, FusionPattern *pat return RET_OK; } -bool FusionPass::CheckMatch(schema::MetaGraphT *graph, const std::shared_ptr& patternOp) { +bool FusionPass::CheckMatch(schema::MetaGraphT *graph, const std::shared_ptr &patternOp) { MS_ASSERT(graph != nullptr); MS_ASSERT(patternOp != nullptr); // find included nodes @@ -204,7 +204,7 @@ bool FusionPass::CheckMatch(schema::MetaGraphT *graph, const std::shared_ptrisPlaceHold) { continue; } @@ -319,4 +319,3 @@ void FusionPass::MergeNodeAttrFromPost(std::unique_ptr &dstOp, s size_t dstOpOutIdx) {} } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.h index 058b933a65..191d2d7c31 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.h @@ -67,7 +67,7 @@ class FusionPass : public GraphPass { bool MatchTree(schema::MetaGraphT *graph, size_t nodeIdx, const std::shared_ptr &target, std::vector &sinkIdes, std::vector &pathSinkIdes); - static bool CheckMatch(schema::MetaGraphT *graph, const std::shared_ptr& patternOp); + static bool CheckMatch(schema::MetaGraphT *graph, const std::shared_ptr &patternOp); void MergeNodeAttrFromPost(std::unique_ptr &dstOp, std::unique_ptr &postOp, size_t dstOpOutIdx = 0); @@ -84,4 +84,3 @@ class FusionPass : public GraphPass { } // namespace mindspore #endif // MINDSPORE_PREDICT_FUSION_PASS_H - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pattern.h b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pattern.h index 38331f45d0..89737b2177 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pattern.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pattern.h @@ -37,7 +37,7 @@ struct Path { // Op description in pattern struct PatternOp { - std::string id; // id of op in pattern + std::string id; // id of op in pattern std::vector types; // type of matchable op // only support node with no more than two preNode now // avoid loop reference @@ -64,7 +64,7 @@ struct PatternOp { this->path->nodeIdx = -1; this->pathSetted = false; } - static std::shared_ptr Copy(const std::shared_ptr& src) { + static std::shared_ptr Copy(const std::shared_ptr &src) { if (src == nullptr) { return nullptr; } @@ -98,7 +98,7 @@ class FusionPattern { FusionPattern &AddPatternOp(const std::string &id, const std::vector &types); - FusionPattern &AddPatternOp(const std::shared_ptr& patternOp); + FusionPattern &AddPatternOp(const std::shared_ptr &patternOp); FusionPattern &RemovePatternOp(const std::string &id); @@ -137,4 +137,3 @@ class FusionPattern { } // namespace mindspore #endif // MINDSPORE_PREDICT_FUSION_PATTERN_H - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc index 2b45d210ee..63691ede97 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc @@ -82,7 +82,7 @@ STATUS MatMulBiasAddFusionPass::DoFusion(MetaGraphT *graph, const std::string &p MS_ASSERT(graph->allTensors.size() > baNodeInputIndex.at(BIASADD_OP_BIAS_INDEX)); const auto &baNodeBiasTensor = graph->allTensors.at(baNodeInputIndex.at(BIASADD_OP_BIAS_INDEX)); MS_ASSERT(baNodeBiasTensor != nullptr); - if (baNodeBiasTensor->refCount != schema::NodeType_ValueNode) { + if (baNodeBiasTensor->refCount != schema::NodeType::NodeType_ValueNode) { // dont fusion, return return RET_OK; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.h index ff23fcd47f..7350058ddc 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.h @@ -43,17 +43,16 @@ class MatMulBiasAddFusionPass : public FusionPass { STATUS Run(MetaGraphT *graph) override; protected: - static STATUS AddFullConnectionBiasTensor(const std::shared_ptr& matMulPath, - const std::shared_ptr& dstPath, - MetaGraphT *subGraph); - STATUS InsertTransposeNode(MetaGraphT *subGraph, const std::shared_ptr& matMulPath); + static STATUS AddFullConnectionBiasTensor(const std::shared_ptr &matMulPath, + const std::shared_ptr &dstPath, MetaGraphT *subGraph); + STATUS InsertTransposeNode(MetaGraphT *subGraph, const std::shared_ptr &matMulPath); protected: bool transA = false; bool transB = false; size_t id = 0; - OpDefCopyer TransposeOpCopyer = [](CNodeT *inOpDef) -> std::unique_ptr { + OpDefCopyer TransposeOpCopyer = [](CNodeT *inOpDef) -> std::unique_ptr { std::unique_ptr newOpDef(new (std::nothrow) CNodeT); if (newOpDef == nullptr) { MS_LOG(ERROR) << "new OpDefT failed"; @@ -81,4 +80,3 @@ class MatMulBiasAddFusionPass : public FusionPass { } // namespace mindspore #endif // MINDSPORE_PREDICT_MATMUL_BIASADD_FUSION_PASS_H - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc index e243050fe5..8c3bfd495b 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc @@ -79,7 +79,7 @@ STATUS MulAddFusionPass::DoFusion(MetaGraphT *graph, const std::string &patternN MS_ASSERT(graph->allTensors.size() > mulNodeInputIndex.at(MUL_OP_BIAS_INDEX)); const auto &mulNodeBiasTensor = graph->allTensors.at(mulNodeInputIndex.at(MUL_OP_BIAS_INDEX)); MS_ASSERT(mulNodeBiasTensor != nullptr); - if (mulNodeBiasTensor->refCount != schema::NodeType_ValueNode) { + if (mulNodeBiasTensor->refCount != schema::NodeType::NodeType_ValueNode) { // dont fusion, return return RET_OK; } @@ -92,7 +92,7 @@ STATUS MulAddFusionPass::DoFusion(MetaGraphT *graph, const std::string &patternN MS_ASSERT(graph->allTensors.size() > addNodeInputIndex.at(ADD_OP_BIAS_INDEX)); const auto &addNodeBiasTensor = graph->allTensors.at(addNodeInputIndex.at(ADD_OP_BIAS_INDEX)); MS_ASSERT(addNodeBiasTensor != nullptr); - if (addNodeBiasTensor->refCount != schema::NodeType_ValueNode) { + if (addNodeBiasTensor->refCount != schema::NodeType::NodeType_ValueNode) { // dont fusion, return return RET_OK; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h index 2006b434a3..d988dc70f8 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h @@ -44,8 +44,8 @@ class MulAddFusionPass : public FusionPass { STATUS Run(MetaGraphT *graph) override; protected: - static STATUS AddNewScaleNode(MetaGraphT *graph, const std::unique_ptr &mulNode, - CNodeT* addNode, uint32_t addBiasIndex); + static STATUS AddNewScaleNode(MetaGraphT *graph, const std::unique_ptr &mulNode, CNodeT *addNode, + uint32_t addBiasIndex); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.cc index 15bc674c06..61c639b810 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.cc @@ -35,8 +35,8 @@ STATUS QuantCastFusionPass::DoFusion(MetaGraphT *graph, const std::string &patte std::unordered_map> &matchedPath) { MS_ASSERT(graph != nullptr); if (matchedPath.size() != kQuantCastMatchPathLen2 && matchedPath.size() != kQuantCastMatchPathLen3) { - MS_LOG(ERROR) << "QuantDtypeCastFusion should have " << kQuantCastMatchPathLen2 << " or " << - kQuantCastMatchPathLen3 << " NodeIndex in matchedPair"; + MS_LOG(ERROR) << "QuantDtypeCastFusion should have " << kQuantCastMatchPathLen2 << " or " << kQuantCastMatchPathLen3 + << " NodeIndex in matchedPair"; return RET_PARAM_INVALID; } @@ -134,5 +134,3 @@ STATUS QuantCastFusionPass::DefinePattern() { } } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.h index dad09cfd02..9765bff938 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.h @@ -48,4 +48,3 @@ class QuantCastFusionPass : public FusionPass { } // namespace mindspore #endif // MINDSPORE_PREDICT_QUANT_CAST_FUSION_PASS_H - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.cc index ace35e9eaf..cf8b34ca80 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.cc @@ -43,7 +43,7 @@ constexpr const float EPS = 1e-8; constexpr const float EPS_DEFAULT_FLOAT = 1e-8; constexpr const float POW_NUM = 0.5; constexpr const int32_t NCHW_DIM_C = 1; -} +} // namespace STATUS BatchNormConvertScalePass::Run(MetaGraphT *graph) { MS_ASSERT(graph != nullptr); @@ -94,14 +94,14 @@ STATUS BatchNormConvertScalePass::GenNewScaleTensor(MetaGraphT *graph, const std MS_ASSERT(graph != nullptr); MS_ASSERT(bnNode != nullptr); GetTransParam(graph, bnNode); - newScaleWeightTensor = std::unique_ptr(new(std::nothrow) TensorT); + newScaleWeightTensor = std::unique_ptr(new (std::nothrow) TensorT); if (newScaleWeightTensor == nullptr) { MS_LOG(ERROR) << "new weightTensor failed"; return RET_ERROR; } newScaleWeightTensor->dataType = bnMeanTensor->dataType; newScaleWeightTensor->format = bnMeanTensor->format; - newScaleWeightTensor->refCount = schema::NodeType_ValueNode; + newScaleWeightTensor->refCount = schema::NodeType::NodeType_ValueNode; newScaleWeightTensor->dims = bnMeanTensor->dims; auto weightShapeSize = GetShapeSize(*bnMeanTensor); newScaleWeightTensor->data.resize(weightShapeSize * sizeof(float)); @@ -116,7 +116,7 @@ STATUS BatchNormConvertScalePass::GenNewScaleTensor(MetaGraphT *graph, const std return RET_ERROR; } - newScaleBiasTensor = std::unique_ptr(new(std::nothrow) TensorT); + newScaleBiasTensor = std::unique_ptr(new (std::nothrow) TensorT); if (newScaleBiasTensor == nullptr) { MS_LOG(ERROR) << "new weightTensor failed"; return RET_ERROR; @@ -124,7 +124,7 @@ STATUS BatchNormConvertScalePass::GenNewScaleTensor(MetaGraphT *graph, const std newScaleBiasTensor->dataType = bnMeanTensor->dataType; newScaleBiasTensor->format = bnMeanTensor->format; - newScaleBiasTensor->refCount = schema::NodeType_ValueNode; + newScaleBiasTensor->refCount = schema::NodeType::NodeType_ValueNode; newScaleBiasTensor->dims = bnMeanTensor->dims; weightShapeSize = GetShapeSize(*bnMeanTensor); newScaleBiasTensor->data.resize(weightShapeSize * sizeof(float)); @@ -168,8 +168,8 @@ STATUS BatchNormConvertScalePass::GetTransParam(MetaGraphT *graph, const std::un MS_LOG(ERROR) << "GetBnEpsilon failed"; return status; } - this->transScale = new(std::nothrow) float[bnChannel]; - this->transBias = new(std::nothrow) float[bnChannel]; + this->transScale = new (std::nothrow) float[bnChannel]; + this->transBias = new (std::nothrow) float[bnChannel]; // cal transScale, tf : scale/sqrt(variance + eps); caffe : 1/sqrt(variance + eps) if (memcpy_s(transScale, bnChannel * sizeof(float), varianceData, bnChannel * sizeof(float)) != 0) { MS_LOG(ERROR) << "memcpy_s transScale error"; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc index 77571609cd..51f266ca13 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc @@ -81,7 +81,7 @@ STATUS FormatTransPass::DoModelInputFormatTrans(schema::MetaGraphT *graph) { MS_ASSERT(transNode->inputIndex.size() == 1); MS_ASSERT(subGraph->allTensors.size() > transNode->inputIndex.front()); auto &graphInTensor = graph->allTensors.at(transNode->inputIndex.front()); - graphInTensor->format = schema::Format_NHWC; + graphInTensor->format = schema::Format::Format_NHWC; // assume parser not reformat shape auto oldDims = graphInTensor->dims; graphInTensor->dims = {oldDims[NCHW_N], oldDims[NCHW_H], oldDims[NCHW_W], oldDims[NCHW_C]}; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc index a2f8f2f855..ca857fc4a0 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc @@ -18,44 +18,43 @@ #include #include "utils/log_adapter.h" #include "include/errorcode.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "src/ops/primitive_c.h" using mindspore::lite::PrimitiveC; -using mindspore::lite::tensor::Tensor; +using mindspore::lite::Tensor; namespace mindspore { namespace lite { namespace { -std::vector ConvertTensorToLiteTensor(MetaGraphT *graph, const std::vector &tensor_indexs, - const schema::PrimitiveType node_type) { - std::vector lite_tensors; +std::vector ConvertTensorToLiteTensor(MetaGraphT *graph, const std::vector &tensor_indexs, + const schema::PrimitiveType node_type) { + std::vector lite_tensors; for (size_t i = 0; i < tensor_indexs.size(); i++) { auto &tensorT = graph->allTensors.at(tensor_indexs[i]); auto tensor_shape = tensorT->dims; - auto lite_tensor = - std::make_unique(TypeId(tensorT->dataType), tensor_shape, tensorT->format, tensorT->nodeType); + auto lite_tensor = std::make_unique(TypeId(tensorT->dataType), tensor_shape, tensorT->format, + TensorCategory(tensorT->nodeType)); if (lite_tensor == nullptr) { MS_LOG(ERROR) << "lite tensor is nullptr"; - return std::vector(); + return std::vector(); } // reshape op must get tensor data to infershape if (node_type == schema::PrimitiveType_Reshape && i == 1 && tensorT->nodeType == NodeType_ValueNode) { auto lite_tensor_size = tensorT->data.size() * sizeof(uint8_t); // when tensorT as param input if (lite_tensor_size == 0) { - return std::vector(); + return std::vector(); } - auto tensor_data = std::unique_ptr(new (std::nothrow) char[lite_tensor_size / sizeof(char)]); - if (tensor_data == nullptr) { - MS_LOG(ERROR) << "tensor_data is nullptr"; - return std::vector(); + auto ret = lite_tensor->MallocData(); + if (ret != 0) { + MS_LOG(ERROR) << "Malloc tensor data failed"; + return std::vector(); } - auto ret = memcpy_s(tensor_data.get(), lite_tensor_size, tensorT->data.data(), lite_tensor_size); + ret = memcpy_s(lite_tensor->MutableData(), lite_tensor->Size(), tensorT->data.data(), lite_tensor_size); if (ret != EOK) { MS_LOG(ERROR) << "memcpy error: " << ret; - return std::vector(); + return std::vector(); } - lite_tensor->SetData(tensor_data.release()); lite_tensors.emplace_back(lite_tensor.release()); continue; } @@ -83,7 +82,7 @@ STATUS InferShapePass::Run(MetaGraphT *graph) { MS_LOG(ERROR) << "copy primitiveT error"; return RET_ERROR; } - auto primitiveC = std::shared_ptr(PrimitiveC::UnPackFromSchemaPrimitiveT(primitiveT.release())); + auto primitiveC = std::shared_ptr(PrimitiveC::Create(primitiveT.release())); if (primitiveC == nullptr) { MS_LOG(ERROR) << "unpack primitiveT error"; return RET_ERROR; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.cc index b6f0113bfe..848699f7ca 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.cc @@ -43,4 +43,3 @@ STATUS IsolatedNodeRemovePass::Run(schema::MetaGraphT *graph) { } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.h index 293ccd8920..efe6957340 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.h @@ -34,4 +34,3 @@ class IsolatedNodeRemovePass : public GraphPass { } // namespace mindspore #endif // MINDSPORE_PREDICT_ISOLATED_NODE_REMOVE_PASS_H - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/model_input_format_preprocess_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/model_input_format_preprocess_pass.cc index b6979b7dbc..1a4002ae2d 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/model_input_format_preprocess_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/model_input_format_preprocess_pass.cc @@ -28,16 +28,16 @@ STATUS ModelInputFormatPreProcessPass::Run(schema::MetaGraphT *graph) { for (auto inputIndex : graph->inputIndex) { if (graph->allTensors[inputIndex]->dims.size() == 4) { std::vector tmpDims(graph->allTensors[inputIndex]->dims); - auto status = - NodeUtils::ConvertDims(schema::Format_NCHW, tmpDims, schema::Format_NHWC, &graph->allTensors[inputIndex]->dims); + auto status = NodeUtils::ConvertDims(schema::Format::Format_NCHW, tmpDims, schema::Format::Format_NHWC, + &graph->allTensors[inputIndex]->dims); if (status == RET_OK) { - graph->allTensors[inputIndex]->format = schema::Format_NHWC; + graph->allTensors[inputIndex]->format = schema::Format::Format_NHWC; } else { MS_LOG(ERROR) << "ConvertDims from NHWC to NCHW error: " << status; return RET_ERROR; } } else { - graph->allTensors[inputIndex]->format = schema::Format_NHWC; + graph->allTensors[inputIndex]->format = schema::Format::Format_NHWC; } } return RET_OK; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/model_input_format_preprocess_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/model_input_format_preprocess_pass.h index 187c93079e..82248960bf 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/model_input_format_preprocess_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/model_input_format_preprocess_pass.h @@ -35,4 +35,3 @@ class ModelInputFormatPreProcessPass : public GraphPass { } // namespace mindspore #endif // MINDSPORE_PREDICT_MODEL_FORMAT_PREPROCESS_PASS_H - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc index 0478584f22..3082cb6ad5 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc @@ -31,7 +31,7 @@ STATUS TopologicalSortPass::Run(schema::MetaGraphT *graph) { std::vector sinkedTensorIdxes; // put all const tensor index into sinkedTensorIdxes for (size_t i = 0; i < graph->allTensors.size(); i++) { - if (graph->allTensors.at(i)->nodeType == schema::NodeType_ValueNode) { + if (graph->allTensors.at(i)->nodeType == schema::NodeType::NodeType_ValueNode) { sinkedTensorIdxes.insert(sinkedTensorIdxes.end(), i); } } @@ -79,4 +79,3 @@ bool TopologicalSortPass::IsNodeNonDepend(const std::unique_ptr } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.h index 994648ab57..ecb37e63b0 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.h @@ -39,4 +39,3 @@ class TopologicalSortPass : public GraphPass { } // namespace mindspore #endif // MINDSPORE_PREDICT_TOPOLOGICAL_SORT_PASS_H - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.cc index 61c28f5a0a..a8d7bf8f51 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.cc @@ -19,11 +19,11 @@ #include "utils/log_adapter.h" #include "include/errorcode.h" #include "tools/common/graph_util.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "src/ops/primitive_c.h" -using mindspore::lite::tensor::Tensor; using mindspore::lite::PrimitiveC; +using mindspore::lite::Tensor; namespace mindspore { namespace lite { STATUS TransOpRemovePass::Run(MetaGraphT *graph) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.cc index 01968311a6..ea9496bafe 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.cc @@ -47,4 +47,3 @@ STATUS UnusedNodeRemovePass::Run(schema::MetaGraphT *graph) { } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.h index 7716592a24..3539568b3a 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.h @@ -34,4 +34,3 @@ class UnusedNodeRemovePass : public GraphPass { } // namespace mindspore #endif // MINDSPORE_PREDICT_UNUSED_NODE_REMOVE_PASS_H - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc index c23e2681d0..7c396c2d3e 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc @@ -71,7 +71,7 @@ STATUS WeightFormatHardCodePass::Run(MetaGraphT *graph) { return RET_ERROR; } if (status != RET_OK) { - MS_LOG(ERROR) << "Format hardCode faild: " << status << ", node: " << node->name; + MS_LOG(ERROR) << "schema::Format hardCode faild: " << status << ", node: " << node->name; return RET_ERROR; } } @@ -89,7 +89,7 @@ STATUS WeightFormatHardCodePass::HardCodeCAFFE(const std::unique_ptr &no case QuantType_QUANT_NONE: { if (opType == schema::PrimitiveType_Conv2D || opType == schema::PrimitiveType_DepthwiseConv2D || opType == schema::PrimitiveType_DeConv2D || opType == schema::PrimitiveType_DeDepthwiseConv2D) { - weightTensor->format = Format_KCHW; + weightTensor->format = schema::Format::Format_KCHW; } else { MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(opType) << ", node: " << node->name; } @@ -113,11 +113,11 @@ STATUS WeightFormatHardCodePass::HardCodeONNX(const std::unique_ptr &nod case QuantType_AwareTraining: { // sum up from current onnx quant models if (opType == PrimitiveType_Conv2D) { - weightTensor->format = Format_KHWC; + weightTensor->format = schema::Format::Format_KHWC; } else if (opType == PrimitiveType_DepthwiseConv2D) { - weightTensor->format = Format_CHWK; + weightTensor->format = schema::Format::Format_CHWK; } else if (opType == PrimitiveType_DeConv2D) { - weightTensor->format = Format_CKHW; + weightTensor->format = schema::Format::Format_CKHW; } else { MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(opType) << ", node: " << node->name; return RET_ERROR; @@ -129,9 +129,9 @@ STATUS WeightFormatHardCodePass::HardCodeONNX(const std::unique_ptr &nod // deconv (C x K/group x kH x kW) group = 1 // dedepth (C x K/group x kH x kW) group = channelIn ==> (C, multiplier, H, W) if (opType == PrimitiveType_Conv2D || opType == PrimitiveType_DepthwiseConv2D) { - weightTensor->format = Format_KCHW; + weightTensor->format = schema::Format::Format_KCHW; } else if (opType == PrimitiveType_DeConv2D) { - weightTensor->format = Format_CKHW; + weightTensor->format = schema::Format::Format_CKHW; } else { MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(opType) << ", node: " << node->name; return RET_ERROR; @@ -155,19 +155,19 @@ STATUS WeightFormatHardCodePass::HardCodeMS(const std::unique_ptr &node, switch (this->quantType) { case QuantType_AwareTraining: { if (opType == schema::PrimitiveType_Conv2D) { - weightTensor->format = schema::Format_KCHW; + weightTensor->format = schema::Format::Format_KCHW; } else if (opType == PrimitiveType_DepthwiseConv2D) { - weightTensor->format = Format_CKHW; + weightTensor->format = schema::Format::Format_CKHW; } else { - weightTensor->format = schema::Format_KCHW; + weightTensor->format = schema::Format::Format_KCHW; } } break; case QuantType_QUANT_NONE: { // sum up from current ms quant models if (opType == PrimitiveType_Conv2D) { - weightTensor->format = Format_KCHW; + weightTensor->format = schema::Format::Format_KCHW; } else if (opType == PrimitiveType_DepthwiseConv2D) { - weightTensor->format = Format_CKHW; + weightTensor->format = schema::Format::Format_CKHW; } else { MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(opType) << ", node: " << node->name; return RET_ERROR; @@ -194,11 +194,11 @@ STATUS WeightFormatHardCodePass::HardCodeTFLITE(const std::unique_ptr &n case QuantType_WeightQuant: case QuantType_QUANT_NONE: { if (opType == schema::PrimitiveType_Conv2D) { - weightTensor->format = schema::Format_KHWC; + weightTensor->format = schema::Format::Format_KHWC; } else if (opType == schema::PrimitiveType_DepthwiseConv2D) { - weightTensor->format = schema::Format_CHWK; + weightTensor->format = schema::Format::Format_CHWK; } else if (opType == schema::PrimitiveType_DeConv2D) { - weightTensor->format = schema::Format_CHWK; + weightTensor->format = schema::Format::Format_CHWK; } else { MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(opType) << ", node: " << node->name; return RET_ERROR; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_transform_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_transform_pass.cc index 0c9509eeb8..ee26865341 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_transform_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_transform_pass.cc @@ -27,7 +27,7 @@ void WeightFormatTransformPass::SetQuantType(QuantType quantType) { this->quantT void WeightFormatTransformPass::SetFmkType(converter::FmkType fmkType) { this->fmkType = fmkType; } -void WeightFormatTransformPass::SetDstFormat(Format format) { this->dstFormat = format; } +void WeightFormatTransformPass::SetDstFormat(schema::Format format) { this->dstFormat = format; } STATUS WeightFormatTransformPass::Run(MetaGraphT *graph) { MS_ASSERT(graph != nullptr); @@ -73,8 +73,8 @@ STATUS WeightFormatTransformPass::QuantDataFormatTrans(MetaGraphT *graph) { if (status == RET_OK) { weightTensor->format = curDstFormat; } else { - MS_LOG(ERROR) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To" - << EnumNameFormat(curDstFormat) << " failed, node : " << node->name; + MS_LOG(ERROR) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To" << EnumNameFormat(curDstFormat) + << " failed, node : " << node->name; return ERROR; } } @@ -100,35 +100,35 @@ STATUS WeightFormatTransformPass::NonQuantDataFormatTrans(MetaGraphT *graph) { STATUS status; if (opType == PrimitiveType_Conv2D || opType == PrimitiveType_DepthwiseConv2D || opType == schema::PrimitiveType_DeConv2D) { - Format curDstFormat; - if (this->dstFormat == Format_NUM_OF_FORMAT) { - curDstFormat = Format_KHWC; + schema::Format curDstFormat; + if (this->dstFormat == schema::Format::Format_NUM_OF_FORMAT) { + curDstFormat = schema::Format::Format_KHWC; } else { curDstFormat = this->dstFormat; } status = TransFilterFormat(weightTensor.get(), curDstFormat); if (status == RET_OK) { - // node->attr.AsConv2D()->format = Format_NCHW; + // node->attr.AsConv2D()->format = schema::Format::Format_NCHW; weightTensor->format = curDstFormat; } else { - MS_LOG(ERROR) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To" - << EnumNameFormat(curDstFormat) << " failed, node : " << node->name; + MS_LOG(ERROR) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To" << EnumNameFormat(curDstFormat) + << " failed, node : " << node->name; return ERROR; } } else { // weight should be CKHW - Format curDstFormat; - if (this->dstFormat == Format_NUM_OF_FORMAT) { - curDstFormat = Format_KHWC; + schema::Format curDstFormat; + if (this->dstFormat == schema::Format::Format_NUM_OF_FORMAT) { + curDstFormat = schema::Format::Format_KHWC; } else { curDstFormat = this->dstFormat; } status = TransFilterFormat(weightTensor.get(), curDstFormat); if (status == RET_OK) { - // node->attr.AsDepthwiseConv2D()->format = Format_NCHW; + // node->attr.AsDepthwiseConv2D()->format = schema::Format::Format_NCHW; weightTensor->format = curDstFormat; } else { - MS_LOG(ERROR) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To" - << EnumNameFormat(curDstFormat) << " failed, node : " << node->name; + MS_LOG(ERROR) << "TransFilter " << EnumNameFormat(weightTensor->format) << "To" << EnumNameFormat(curDstFormat) + << " failed, node : " << node->name; return ERROR; } } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_transform_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_transform_pass.h index 110b1df58a..5d582ca784 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_transform_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_transform_pass.h @@ -33,7 +33,7 @@ class WeightFormatTransformPass : public GraphPass { void SetFmkType(converter::FmkType fmkType); - void SetDstFormat(Format format); + void SetDstFormat(schema::Format format); STATUS Run(MetaGraphT *graph) override; @@ -45,7 +45,7 @@ class WeightFormatTransformPass : public GraphPass { private: QuantType quantType = QuantType_QUANT_NONE; converter::FmkType fmkType = converter::FmkType_TF; - Format dstFormat = Format_NUM_OF_FORMAT; + schema::Format dstFormat = schema::Format::Format_NUM_OF_FORMAT; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/main.cc b/mindspore/lite/tools/converter/main.cc index 6923ed75c1..d54a1dcc81 100644 --- a/mindspore/lite/tools/converter/main.cc +++ b/mindspore/lite/tools/converter/main.cc @@ -17,4 +17,3 @@ #include "tools/converter/converter.h" int main(int argc, const char **argv) { return mindspore::lite::RunConverter(argc, argv); } - diff --git a/mindspore/lite/tools/converter/model_parser.h b/mindspore/lite/tools/converter/model_parser.h index f216699acb..2e8b4e67d9 100644 --- a/mindspore/lite/tools/converter/model_parser.h +++ b/mindspore/lite/tools/converter/model_parser.h @@ -33,15 +33,15 @@ class ModelParser { virtual ~ModelParser() {} FuncGraphPtr Parse(const std::string &modelFile, const std::string &weightFile, - const QuantType &quantType = QuantType_QUANT_NONE) { + const QuantType &quantType = QuantType_QUANT_NONE) { auto *meta_graph = ParseToFb(modelFile, weightFile, quantType); auto func_graph = this->Fb2Anf(meta_graph); - delete(meta_graph); + delete (meta_graph); return func_graph; } virtual schema::MetaGraphT *ParseToFb(const std::string &modelFile, const std::string &weightFile, - const QuantType &quantType = QuantType_QUANT_NONE) = 0; + const QuantType &quantType = QuantType_QUANT_NONE) = 0; public: static FuncGraphPtr Fb2Anf(schema::MetaGraphT *meta_graph) { @@ -59,5 +59,3 @@ class ModelParser { } // namespace mindspore::lite #endif - - diff --git a/mindspore/lite/tools/converter/optimizer.cc b/mindspore/lite/tools/converter/optimizer.cc index 4fd6bc239f..6189d56c1f 100644 --- a/mindspore/lite/tools/converter/optimizer.cc +++ b/mindspore/lite/tools/converter/optimizer.cc @@ -78,4 +78,3 @@ STATUS Optimizer::Run(schema::MetaGraphT *graphDefT) { } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/optimizer.h b/mindspore/lite/tools/converter/optimizer.h index 346e8c6016..dbafca4542 100644 --- a/mindspore/lite/tools/converter/optimizer.h +++ b/mindspore/lite/tools/converter/optimizer.h @@ -82,5 +82,3 @@ class Optimizer { } // namespace mindspore #endif - - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc index 8aa275c037..d0deccaa73 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc @@ -19,10 +19,8 @@ namespace mindspore { namespace lite { -STATUS CaffeArgMaxParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeArgMaxParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeArgMaxParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -68,4 +66,3 @@ STATUS CaffeArgMaxParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeArgMaxParser("ArgMax", new CaffeArgMaxParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.h index 5b1b9fe943..1c24801118 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.h @@ -27,13 +27,10 @@ class CaffeArgMaxParser : public CaffeNodeParser { public: CaffeArgMaxParser() : CaffeNodeParser("argmax") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc index b859c8464e..24a24128ba 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc @@ -29,10 +29,8 @@ namespace mindspore { namespace lite { using STATUS = int; -STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeBatchNormParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -53,21 +51,20 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caffe::BatchNormParameter batchNormParam = proto.batch_norm_param(); // check bottom size if (proto.bottom_size() != CAFFE_BATCHNORMAL_BOTTOM_SIZE) { - MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "bottom numbers is error, it must be " \ + MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "bottom numbers is error, it must be " << CAFFE_BATCHNORMAL_BOTTOM_SIZE << "but is " << proto.bottom_size(); return RET_ERROR; } // check top size if (proto.top_size() != CAFFE_BATCHNORMAL_TOP_SIZE) { - MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "top numbers is error, it must be " \ + MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "top numbers is error, it must be " << CAFFE_BATCHNORMAL_TOP_SIZE << "but is " << proto.top_size(); return RET_ERROR; } if (batchNormParam.has_eps()) { - if (fabs(CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT - batchNormParam.eps()) - < CAFFE_BATCH_NORM_ESP_DEFAULT_DIFF_FLOAT) { + if (fabs(CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT - batchNormParam.eps()) < CAFFE_BATCH_NORM_ESP_DEFAULT_DIFF_FLOAT) { attr->epsilon = CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT; } else { auto tmpAuto = batchNormParam.eps(); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h index 83142963f2..fc5e0bb529 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h @@ -27,13 +27,10 @@ class CaffeBatchNormParser : public CaffeNodeParser { public: CaffeBatchNormParser() : CaffeNodeParser("batchnorm") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc index 009c46a7c2..682106ee47 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc @@ -21,10 +21,8 @@ const int32_t CONCAT_DEFAULT_AXIS = 1; namespace mindspore { namespace lite { -STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeConcatParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -75,4 +73,3 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeConcatParser("Concat", new CaffeConcatParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h index f0cb018dcb..8d3f005674 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h @@ -27,13 +27,10 @@ class CaffeConcatParser : public CaffeNodeParser { public: CaffeConcatParser() : CaffeNodeParser("concat") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.cc index efdfa3aa1d..dd767689d9 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.cc @@ -26,8 +26,7 @@ static const int CAFFE_CONV_BIAS_DIM_NUM = 1; namespace mindspore { namespace lite { -STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convParam, - std::vector *pad) { +STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convParam, std::vector *pad) { /** * padUp = padH; * padDown = padH; @@ -74,8 +73,7 @@ STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convPar return RET_OK; } -STATUS CaffeConvBaseParser::ParseStrides(const caffe::ConvolutionParameter &convParam, - std::vector *stride) { +STATUS CaffeConvBaseParser::ParseStrides(const caffe::ConvolutionParameter &convParam, std::vector *stride) { if (convParam.has_stride_h() || convParam.has_stride_w()) { if (convParam.stride_size() != 0) { MS_LOG(ERROR) << "Either stride or stride_h/w should be specified; not both"; @@ -119,8 +117,7 @@ STATUS CaffeConvBaseParser::ParseDilations(const caffe::ConvolutionParameter &co return RET_OK; } -STATUS CaffeConvBaseParser::ParseKernels(const caffe::ConvolutionParameter &convParam, - std::vector *kernel) { +STATUS CaffeConvBaseParser::ParseKernels(const caffe::ConvolutionParameter &convParam, std::vector *kernel) { if (convParam.has_kernel_h() || convParam.has_kernel_w()) { if (convParam.kernel_size_size() != 0) { MS_LOG(ERROR) << "Either kernel_size or kernel_h/w should be specified; not both."; @@ -149,8 +146,7 @@ STATUS CaffeConvBaseParser::ParseKernels(const caffe::ConvolutionParameter &conv return RET_OK; } -int CaffeConvBaseParser::ParseGroup(const caffe::ConvolutionParameter &convParam, - const std::string &layerType) { +int CaffeConvBaseParser::ParseGroup(const caffe::ConvolutionParameter &convParam, const std::string &layerType) { // group default 1 int group = 0; if (convParam.has_group()) { @@ -162,7 +158,7 @@ int CaffeConvBaseParser::ParseGroup(const caffe::ConvolutionParameter &convParam } int CaffeConvBaseParser::ParseChannelOut(const caffe::ConvolutionParameter &convParam, int32_t *channelOut) { - MS_ASSERT(channelOut != nullptr); + MS_ASSERT(channelOut != nullptr); if (!convParam.has_num_output()) { MS_LOG(ERROR) << "Parse num_output for failed."; return RET_ERROR; @@ -175,7 +171,7 @@ STATUS CaffeConvBaseParser::ParseWeight(const caffe::LayerParameter &weight, std::vector *weightVec) { // Layer must have Filter if (weight.blobs_size() == 0) { - MS_LOG(ERROR) << "No filter data in layer " << weight.name().c_str(); + MS_LOG(ERROR) << "No filter data in layer " << weight.name().c_str(); return RET_ERROR; } @@ -197,7 +193,7 @@ STATUS CaffeConvBaseParser::ParseWeight(const caffe::LayerParameter &weight, std::vector shape = bias->dims; if (shape.size() != CAFFE_CONV_BIAS_DIM_NUM) { - MS_LOG(ERROR) << "Bias dim-num of layer "<< weight.name().c_str() << " is not supported"; + MS_LOG(ERROR) << "Bias dim-num of layer " << weight.name().c_str() << " is not supported"; return RET_ERROR; } weightVec->push_back(bias); @@ -206,4 +202,3 @@ STATUS CaffeConvBaseParser::ParseWeight(const caffe::LayerParameter &weight, } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.h index 7cdfe13d46..6313ef90c4 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.h @@ -30,28 +30,21 @@ class CaffeConvBaseParser { virtual ~CaffeConvBaseParser() {} - STATUS ParsePads(const caffe::ConvolutionParameter &conv_param, - std::vector *pad); + STATUS ParsePads(const caffe::ConvolutionParameter &conv_param, std::vector *pad); - STATUS ParseStrides(const caffe::ConvolutionParameter &conv_param, - std::vector *stride); + STATUS ParseStrides(const caffe::ConvolutionParameter &conv_param, std::vector *stride); - STATUS ParseDilations(const caffe::ConvolutionParameter &conv_param, - std::vector *dilation); + STATUS ParseDilations(const caffe::ConvolutionParameter &conv_param, std::vector *dilation); - STATUS ParseKernels(const caffe::ConvolutionParameter &conv_param, - std::vector *kernel); + STATUS ParseKernels(const caffe::ConvolutionParameter &conv_param, std::vector *kernel); - int ParseGroup(const caffe::ConvolutionParameter &convParam, - const std::string &layerType); + int ParseGroup(const caffe::ConvolutionParameter &convParam, const std::string &layerType); int ParseChannelOut(const caffe::ConvolutionParameter &convParam, int32_t *channelOut); - STATUS ParseWeight(const caffe::LayerParameter &weight, - std::vector *weightVec); + STATUS ParseWeight(const caffe::LayerParameter &weight, std::vector *weightVec); }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_converter.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_converter.cc index 37e51bce7b..cd4dea78ed 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_converter.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_converter.cc @@ -18,9 +18,6 @@ namespace mindspore { namespace lite { -CaffeConverter::CaffeConverter() { - modelParser = new CaffeModelParser(); -} +CaffeConverter::CaffeConverter() { modelParser = new CaffeModelParser(); } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_converter.h b/mindspore/lite/tools/converter/parser/caffe/caffe_converter.h index 2f6ea10d75..93a6f7ffbf 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_converter.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_converter.h @@ -33,4 +33,3 @@ class CaffeConverter : public Converter { } // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc index ec94c2f432..d1e054ef0c 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc @@ -19,13 +19,11 @@ namespace mindspore { namespace lite { -STATUS CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op, - schema::Conv2DT *attr) { +STATUS CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op, schema::Conv2DT *attr) { if (attr->group == 1) { return RET_OK; } - std::unique_ptr depthwiseConv2DParam - = std::make_unique(); + std::unique_ptr depthwiseConv2DParam = std::make_unique(); if (depthwiseConv2DParam == nullptr) { MS_LOG(ERROR) << "new op failed"; return RET_ERROR; @@ -53,10 +51,8 @@ STATUS CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op, return RET_OK; } -STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeConvolutionParser"; if (op == nullptr) { @@ -83,7 +79,7 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, std::vector pad(4, 0); auto status = convParser.ParsePads(convParam, &pad); if (status != RET_OK) { - MS_LOG(ERROR) << "ParsePads for " << proto.name().c_str() <<" failed"; + MS_LOG(ERROR) << "ParsePads for " << proto.name().c_str() << " failed"; return RET_ERROR; } attr->padUp = pad[0]; @@ -159,4 +155,3 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeConvolutionParser("Convolution", new CaffeConvolutionParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h index 7baf194c97..7b7f0f9bc6 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h @@ -28,17 +28,13 @@ class CaffeConvolutionParser : public CaffeNodeParser { public: CaffeConvolutionParser() : CaffeNodeParser("convolution") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; private: - STATUS ParseGroupConvolution(schema::CNodeT *op, - schema::Conv2DT *attr); + STATUS ParseGroupConvolution(schema::CNodeT *op, schema::Conv2DT *attr); }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc index e5a0cbefdb..5d47c37e49 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc @@ -21,10 +21,8 @@ const int32_t CROP_AXIS = 2; namespace mindspore { namespace lite { -STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeCropParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -75,4 +73,3 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeCropParser("Crop", new CaffeCropParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h index d64f8534f6..fef8977b69 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h @@ -27,13 +27,10 @@ class CaffeCropParser : public CaffeNodeParser { public: CaffeCropParser() : CaffeNodeParser("crop") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc index d46253b3f1..c64d9d476d 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc @@ -19,14 +19,12 @@ namespace mindspore { namespace lite { -STATUS CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op, - schema::DeConv2DT *attr) { +STATUS CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op, schema::DeConv2DT *attr) { if (attr->group == 1) { return RET_OK; } - std::unique_ptr deDepthwiseConv2DParam - = std::make_unique(); + std::unique_ptr deDepthwiseConv2DParam = std::make_unique(); if (deDepthwiseConv2DParam == nullptr) { MS_LOG(ERROR) << "new op failed"; return RET_ERROR; @@ -53,10 +51,8 @@ STATUS CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op, return RET_OK; } -STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeDeconvolutionParser"; if (op == nullptr) { @@ -71,7 +67,7 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, std::unique_ptr attr(new (std::nothrow) schema::DeConv2DT()); - attr->format = schema::Format_NCHW; + attr->format = schema::Format::Format_NCHW; const caffe::ConvolutionParameter convParam = proto.convolution_param(); CaffeConvBaseParser convParser; @@ -158,4 +154,3 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeDeconvolutionParser("Deconvolution", new CaffeDeconvolutionParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h index fea07878b9..dfd0563144 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h @@ -28,17 +28,13 @@ class CaffeDeconvolutionParser : public CaffeNodeParser { public: CaffeDeconvolutionParser() : CaffeNodeParser("deconvolution") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; private: - STATUS ParseGroupDeconvolution(schema::CNodeT *op, - schema::DeConv2DT *attr); + STATUS ParseGroupDeconvolution(schema::CNodeT *op, schema::DeConv2DT *attr); }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc index 37689206bc..99fb5d148e 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc @@ -23,10 +23,8 @@ const float ELTWISE_SUM_COEFF_EPSILON = 1e-5; namespace mindspore { namespace lite { -STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeEltwiseParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h index 8d0d2aed7c..9e35535579 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h @@ -27,13 +27,10 @@ class CaffeEltwiseParser : public CaffeNodeParser { public: CaffeEltwiseParser() : CaffeNodeParser("eltwise") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.cc index e39d400f4b..b6bf0425a9 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.cc @@ -20,10 +20,8 @@ namespace mindspore { namespace lite { -STATUS CaffeExpParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeExpParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse ExpParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -66,4 +64,3 @@ STATUS CaffeExpParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeExpParser("Exp", new CaffeExpParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.h index ac6a12cfee..36c7d94799 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.h @@ -27,9 +27,7 @@ class CaffeExpParser : public CaffeNodeParser { public: CaffeExpParser() : CaffeNodeParser("exp") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.cc index 4eb0cb78bd..517588120b 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.cc @@ -19,10 +19,8 @@ namespace mindspore { namespace lite { -STATUS CaffeFlattenParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeFlattenParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeFlattenParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h index 333caaf6f3..60fbbd6449 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h @@ -27,9 +27,7 @@ class CaffeFlattenParser : public CaffeNodeParser { public: CaffeFlattenParser() : CaffeNodeParser("flatten") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc index 2ba0588e31..cfdf0831fc 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc @@ -19,10 +19,8 @@ namespace mindspore { namespace lite { -STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeInnerProductParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -92,4 +90,3 @@ STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeInnerProductParser("InnerProduct", new CaffeInnerProductParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h index c4ed3e7fee..9d58b13d11 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h @@ -27,13 +27,10 @@ class CaffeInnerProductParser : public CaffeNodeParser { public: CaffeInnerProductParser() : CaffeNodeParser("innerproduct") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc index 77652fdfbf..7168e5714c 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc @@ -80,4 +80,3 @@ STATUS CaffeInspector::SetTopsAndBottoms() { } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.h b/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.h index a6083b80b4..0c2ee1dbdf 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.h @@ -53,4 +53,3 @@ using CaffeInspectorPtr = std::shared_ptr; } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc index e7bb9aaeed..87bdcc1ba3 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc @@ -19,10 +19,8 @@ namespace mindspore { namespace lite { -STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeInterpParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -70,4 +68,3 @@ STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeInterpParser("Interp", new CaffeInterpParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h index 18ab4e4571..df00743ef6 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h @@ -27,13 +27,10 @@ class CaffeInterpParser : public CaffeNodeParser { public: CaffeInterpParser() : CaffeNodeParser("Interp") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc index 67a1d9cf84..549167cf7a 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc @@ -177,7 +177,7 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff for (int j = 0; j < layer.input_param().shape(0).dim_size(); j++) { msTensor->dims.push_back(layer.input_param().shape(0).dim(j)); } - msTensor->nodeType = schema::NodeType_ValueNode; + msTensor->nodeType = schema::NodeType::NodeType_ValueNode; msTensor->refCount = 1; msTensor->dataType = kNumberTypeFloat32; tensorCache->AddTensor(layer.top(0), msTensor.release(), GRAPH_INPUT); @@ -245,7 +245,7 @@ STATUS CaffeModelParser::GetModelInput(const caffe::NetParameter &proto, TensorC for (int j = 0; j < proto.input_dim_size(); j++) { msTensor->dims.push_back(proto.input_dim(j)); } - msTensor->refCount = schema::NodeType_ValueNode; + msTensor->refCount = schema::NodeType::NodeType_ValueNode; msTensor->dataType = kNumberTypeFloat32; tensorCache->AddTensor(proto.input(i), msTensor.release(), GRAPH_INPUT); } @@ -256,7 +256,7 @@ STATUS CaffeModelParser::GetModelInput(const caffe::NetParameter &proto, TensorC for (int j = 0; j < shape.dim_size(); j++) { msTensor->dims.push_back(shape.dim(j)); } - msTensor->refCount = schema::NodeType_ValueNode; + msTensor->refCount = schema::NodeType::NodeType_ValueNode; msTensor->dataType = kNumberTypeFloat32; tensorCache->AddTensor(proto.input(i), msTensor.release(), GRAPH_INPUT); } diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.h index 84b318b9c5..4b69a363bb 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.h @@ -35,7 +35,7 @@ class CaffeModelParser : public ModelParser { virtual ~CaffeModelParser(); schema::MetaGraphT *ParseToFb(const std::string &modelFile, const std::string &weightFile, - const QuantType &quantType = QuantType_QUANT_NONE) override; + const QuantType &quantType = QuantType_QUANT_NONE) override; private: STATUS SetOpInputIdx(const caffe::LayerParameter &layer, schema::CNodeT *op, TensorCache *tensorCache); @@ -46,8 +46,7 @@ class CaffeModelParser : public ModelParser { STATUS SetAllTensors(const TensorCache &tensorCache, schema::MetaGraphT *subGraphDef); - STATUS SetGraphTensorIndex(const caffe::NetParameter &proto, - TensorCache *tensorCache, + STATUS SetGraphTensorIndex(const caffe::NetParameter &proto, TensorCache *tensorCache, schema::MetaGraphT *subGraphDef); STATUS ParseLayer(const caffe::NetParameter &proto, const caffe::NetParameter &weight, TensorCache *tensorCache, @@ -63,4 +62,3 @@ class CaffeModelParser : public ModelParser { } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_MODEL_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc index 9cc92cb959..c9c5d15882 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc @@ -23,12 +23,12 @@ namespace mindspore { namespace lite { schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) { std::unique_ptr weight = std::make_unique(); - weight->format = schema::Format_NCHW; + weight->format = schema::Format::Format_NCHW; std::vector shapeVec; ConvertShape(proto, &shapeVec); weight->dims = shapeVec; weight->dataType = kNumberTypeFloat32; - weight->nodeType = schema::NodeType_ValueNode; + weight->nodeType = schema::NodeType::NodeType_ValueNode; // cal Weight num int count = 1; @@ -62,8 +62,7 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) { buf[i] = proto.double_data(i); } weight->data.resize(count * sizeof(float)); - ::memcpy_s(weight->data.data(), count * sizeof(float), - reinterpret_cast(buf.get()), + ::memcpy_s(weight->data.data(), count * sizeof(float), reinterpret_cast(buf.get()), count * sizeof(float)); } else { // datatype float @@ -81,8 +80,7 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) { return weight.release(); } -STATUS ConvertShape(const caffe::BlobProto &proto, - std::vector *shape) { +STATUS ConvertShape(const caffe::BlobProto &proto, std::vector *shape) { shape->clear(); if (proto.has_num() || proto.has_channels() || proto.has_height() || proto.has_width()) { @@ -99,4 +97,3 @@ STATUS ConvertShape(const caffe::BlobProto &proto, } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h index f3320f77f0..7a8391ae69 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h @@ -34,9 +34,7 @@ class CaffeNodeParser { virtual ~CaffeNodeParser() {} - virtual int Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + virtual int Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) = 0; protected: @@ -45,10 +43,8 @@ class CaffeNodeParser { schema::TensorT *ConvertWeight(const caffe::BlobProto &proto); -STATUS ConvertShape(const caffe::BlobProto &proto, - std::vector *shape); +STATUS ConvertShape(const caffe::BlobProto &proto, std::vector *shape); } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.cc index 550ab3c917..57c978c7ba 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.cc @@ -43,4 +43,3 @@ CaffeNodeParser *CaffeNodeParserRegistry::GetNodeParser(const std::string &name) } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.h b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.h index 000ddff4ce..9c69821e64 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.h @@ -45,4 +45,3 @@ class CaffeNodeRegistrar { } // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_REGISTRY_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc index 2e5ec46e91..cc74fe6942 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc @@ -19,10 +19,8 @@ namespace mindspore { namespace lite { -STATUS CaffePermuteParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffePermuteParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffePermuteParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -57,4 +55,3 @@ STATUS CaffePermuteParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffePermuteParser("Permute", new CaffePermuteParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.h index 28680d31f2..027caad310 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.h @@ -27,13 +27,10 @@ class CaffePermuteParser : public CaffeNodeParser { public: CaffePermuteParser() : CaffeNodeParser("Permute") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PERMUTE_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc index 638931ca65..f570bbb6d0 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc @@ -22,10 +22,8 @@ const uint32_t INNERPRODUCT_PAD_DEFAULT_VALUE = 0; namespace mindspore { namespace lite { -STATUS CaffePoolingParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffePoolingParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffePoolingParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -43,7 +41,7 @@ STATUS CaffePoolingParser::Parse(const caffe::LayerParameter &proto, return RET_NULL_PTR; } - attr->format = schema::Format_NCHW; + attr->format = schema::Format::Format_NCHW; const caffe::PoolingParameter poolingParam = proto.pooling_param(); auto status = ParsePads(poolingParam, attr.get()); @@ -89,8 +87,7 @@ STATUS CaffePoolingParser::Parse(const caffe::LayerParameter &proto, return RET_OK; } -STATUS CaffePoolingParser::ParsePads(const caffe::PoolingParameter &poolingParam, - schema::PoolingT *attr) { +STATUS CaffePoolingParser::ParsePads(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr) { if (poolingParam.has_pad_h() && poolingParam.has_pad_w()) { if (poolingParam.has_pad()) { MS_LOG(ERROR) << "Either pad or pad_h/w should be specified; not both"; @@ -109,8 +106,7 @@ STATUS CaffePoolingParser::ParsePads(const caffe::PoolingParameter &poolingParam return RET_OK; } -STATUS CaffePoolingParser::ParseStrides(const caffe::PoolingParameter &poolingParam, - schema::PoolingT *attr) { +STATUS CaffePoolingParser::ParseStrides(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr) { if (poolingParam.has_stride_h() && poolingParam.has_stride_w()) { if (poolingParam.has_stride()) { MS_LOG(ERROR) << "Either stride or stride_h/w should be specified; not both"; @@ -125,8 +121,7 @@ STATUS CaffePoolingParser::ParseStrides(const caffe::PoolingParameter &poolingPa return RET_OK; } -STATUS CaffePoolingParser::ParseWindows(const caffe::PoolingParameter &poolingParam, - schema::PoolingT *attr) { +STATUS CaffePoolingParser::ParseWindows(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr) { if (poolingParam.has_global_pooling() && poolingParam.global_pooling()) { if (poolingParam.has_kernel_size() || poolingParam.has_kernel_h() || poolingParam.has_kernel_w()) { MS_LOG(ERROR) << "With Global_pooling: true Filter size cannot specified"; @@ -156,8 +151,7 @@ STATUS CaffePoolingParser::ParseWindows(const caffe::PoolingParameter &poolingPa return RET_OK; } -STATUS CaffePoolingParser::ParsePoolingMode(const caffe::PoolingParameter &poolingParam, - schema::PoolingT *attr) { +STATUS CaffePoolingParser::ParsePoolingMode(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr) { if (poolingParam.pool() == caffe::PoolingParameter::MAX) { attr->poolingMode = schema::PoolMode_MAX_POOLING; } else if (poolingParam.pool() == caffe::PoolingParameter::AVE) { @@ -172,4 +166,3 @@ STATUS CaffePoolingParser::ParsePoolingMode(const caffe::PoolingParameter &pooli CaffeNodeRegistrar g_caffePoolingParser("Pooling", new CaffePoolingParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h index c1b778d38e..8e88230ef3 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h @@ -27,25 +27,18 @@ class CaffePoolingParser : public CaffeNodeParser { public: CaffePoolingParser() : CaffeNodeParser("pooling") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; - STATUS ParsePads(const caffe::PoolingParameter &poolingParam, - schema::PoolingT *attr); + STATUS ParsePads(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); - STATUS ParseStrides(const caffe::PoolingParameter &poolingParam, - schema::PoolingT *attr); + STATUS ParseStrides(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); - STATUS ParseWindows(const caffe::PoolingParameter &poolingParam, - schema::PoolingT *attr); + STATUS ParseWindows(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); - STATUS ParsePoolingMode(const caffe::PoolingParameter &poolingParam, - schema::PoolingT *attr); + STATUS ParsePoolingMode(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_POOLING_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc index da4ce1127b..6efc5cecb8 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc @@ -24,10 +24,8 @@ static const float CAFFE_POWER_DEFAULT_SHIFT = 0.0; namespace mindspore { namespace lite { -STATUS CaffePowerParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffePowerParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffePowerParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -45,7 +43,6 @@ STATUS CaffePowerParser::Parse(const caffe::LayerParameter &proto, return RET_NULL_PTR; } - const caffe::PowerParameter powerParam = proto.power_param(); if (proto.has_power_param()) { attr->power = powerParam.has_power() ? powerParam.power() : CAFFE_POWER_DEFAULT_POWER; @@ -66,4 +63,3 @@ STATUS CaffePowerParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffePowerParser("Power", new CaffePowerParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h index e5c8a6ed8d..52335c2fac 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h @@ -25,15 +25,12 @@ namespace mindspore { namespace lite { class CaffePowerParser : public CaffeNodeParser { public: - CaffePowerParser() : CaffeNodeParser("power") {} + CaffePowerParser() : CaffeNodeParser("power") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) override; + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, + std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_POWER_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc index 71788fb5ab..16757783c6 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc @@ -19,10 +19,8 @@ namespace mindspore { namespace lite { -STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffePReluParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -68,4 +66,3 @@ STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffePReluParser("PReLU", new CaffePReluParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h index 0275559497..b5ff499b44 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h @@ -25,15 +25,12 @@ namespace mindspore { namespace lite { class CaffePReluParser : public CaffeNodeParser { public: - CaffePReluParser() : CaffeNodeParser("pRelu") {} + CaffePReluParser() : CaffeNodeParser("pRelu") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) override; + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, + std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PRELU_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.cc index cbb4555ee6..94b51335f7 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.cc @@ -19,10 +19,8 @@ namespace mindspore { namespace lite { -STATUS CaffeRelu6Parser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeRelu6Parser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeRelu6Parser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.h index a385f6e5a4..69dfaf3281 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.h @@ -26,9 +26,7 @@ class CaffeRelu6Parser : public CaffeNodeParser { public: CaffeRelu6Parser() : CaffeNodeParser("relu6") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.cc index 4cc16c507c..f94308402f 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.cc @@ -19,10 +19,8 @@ namespace mindspore { namespace lite { -STATUS CaffeReluParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeReluParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeReluParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -58,4 +56,3 @@ STATUS CaffeReluParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeReluParser("ReLU", new CaffeReluParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.h index 06ef95372c..8651aa6b86 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.h @@ -27,13 +27,10 @@ class CaffeReluParser : public CaffeNodeParser { public: CaffeReluParser() : CaffeNodeParser("relu") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RELU_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc index 38261ca9c3..44c6eb0247 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc @@ -19,10 +19,8 @@ namespace mindspore { namespace lite { -STATUS CaffeReshapeParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeReshapeParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeReshapeParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -40,7 +38,7 @@ STATUS CaffeReshapeParser::Parse(const caffe::LayerParameter &proto, return RET_NULL_PTR; } - attr->format = schema::Format_NCHW; + attr->format = schema::Format::Format_NCHW; const caffe::ReshapeParameter reshapeParam = proto.reshape_param(); if (!reshapeParam.has_shape()) { @@ -62,4 +60,3 @@ STATUS CaffeReshapeParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeReshapeParser("Reshape", new CaffeReshapeParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h index 05fd515756..2f51e0bef0 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h @@ -27,13 +27,10 @@ class CaffeReshapeParser : public CaffeNodeParser { public: CaffeReshapeParser() : CaffeNodeParser("reshape") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RESHAPE_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc index b6b09c78fc..9c58531c9f 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc @@ -22,10 +22,8 @@ const int32_t DIM_DEFAULT_SIZE = 4; namespace mindspore { namespace lite { -STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeScaleParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h index 667ad4136a..93b51171ef 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h @@ -27,16 +27,12 @@ class CaffeScaleParser : public CaffeNodeParser { public: CaffeScaleParser() : CaffeNodeParser("scale") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; - STATUS GetAxisIndex(const int32_t &axis, - uint32_t *axis_index); + STATUS GetAxisIndex(const int32_t &axis, uint32_t *axis_index); }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SCALE_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.cc index 3c72db93dc..da70b3a796 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.cc @@ -19,10 +19,8 @@ namespace mindspore { namespace lite { -STATUS CaffeSigmoidParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeSigmoidParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeSigmoidParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -51,4 +49,3 @@ STATUS CaffeSigmoidParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeSigmoidParser("Sigmoid", new CaffeSigmoidParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h index 976d154fdc..ee8a280ff4 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h @@ -27,13 +27,10 @@ class CaffeSigmoidParser : public CaffeNodeParser { public: CaffeSigmoidParser() : CaffeNodeParser("sigmoid") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SIGMOID_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc index 9839b25349..0e10625179 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc @@ -21,10 +21,8 @@ static const int32_t CAFFE_SOFTMAX_DEFAULT_AXIS = 1; namespace mindspore { namespace lite { -STATUS CaffeSoftmaxParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeSoftmaxParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeSoftmaxParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -60,4 +58,3 @@ STATUS CaffeSoftmaxParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeSoftmaxParser("Softmax", new CaffeSoftmaxParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h index ad59d5291d..d440acc96a 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h @@ -27,13 +27,10 @@ class CaffeSoftmaxParser : public CaffeNodeParser { public: CaffeSoftmaxParser() : CaffeNodeParser("softmax") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SOFTMAX_PARSER_H_ - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.cc index 2dc46593ff..e88121cac4 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.cc @@ -20,10 +20,8 @@ namespace mindspore { namespace lite { -STATUS CaffeTanhParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeTanhParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeTanhParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -51,4 +49,3 @@ STATUS CaffeTanhParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeTanhParser("TanH", new CaffeTanhParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.h index bcd8366f89..ac24d3e92c 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.h @@ -27,9 +27,7 @@ class CaffeTanhParser : public CaffeNodeParser { public: CaffeTanhParser() : CaffeNodeParser("tanh") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc index 9ac361118b..7efe74e9fb 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc @@ -20,10 +20,8 @@ namespace mindspore { namespace lite { -STATUS CaffeTileParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { +STATUS CaffeTileParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, + schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeTileParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -68,4 +66,3 @@ STATUS CaffeTileParser::Parse(const caffe::LayerParameter &proto, CaffeNodeRegistrar g_caffeTileParser("Tile", new CaffeTileParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h index bc89816524..6126865aab 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h @@ -27,9 +27,7 @@ class CaffeTileParser : public CaffeNodeParser { public: CaffeTileParser() : CaffeNodeParser("tile") {} - STATUS Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.cc index 95c84cc941..65a3ce45d1 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.cc @@ -19,10 +19,9 @@ namespace mindspore { namespace lite { -STATUS OnnxArgMaxParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxArgMaxParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { - MS_LOG(DEBUG) << "onnx ArgMaxParser"; + MS_LOG(DEBUG) << "onnx ArgMaxParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -56,4 +55,3 @@ STATUS OnnxArgMaxParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxArgMaxParser("ArgMax", new OnnxArgMaxParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.h index 77e616593d..95c658d4c2 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.h @@ -26,11 +26,8 @@ class OnnxArgMaxParser : public OnnxNodeParser { public: OnnxArgMaxParser() : OnnxNodeParser("ArgMax") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MMINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ARGMAX_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.h index 28f9a5d8f0..d2761083b5 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.h @@ -168,4 +168,3 @@ class OnnxTanhParser : public OnnxNodeParser { } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ARITHMETIC_OPREATION_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.cc index 73650e1582..ea301e7f8b 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxBatchNormParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxBatchNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx BatchNormParser"; if (op == nullptr) { diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.h index 0328fb7627..983a5df7fb 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.h @@ -26,11 +26,8 @@ class OnnxBatchNormParser : public OnnxNodeParser { public: OnnxBatchNormParser() : OnnxNodeParser("BatchNormalization") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_BATCHNORM_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc index c92e02151b..69ee22f8b7 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxBiasAddParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxBiasAddParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx BiasAddParser"; if (op == nullptr) { @@ -50,4 +49,3 @@ STATUS OnnxBiasAddParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxBiasAddParser("BiasAdd", new OnnxBiasAddParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.h index 892802acdc..456120f549 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.h @@ -26,11 +26,8 @@ class OnnxBiasAddParser : public OnnxNodeParser { public: OnnxBiasAddParser() : OnnxNodeParser("BiasAdd") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_BIASADD_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.cc index 66423500f1..3d458e66e5 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.cc @@ -19,9 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxCastParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) { +STATUS OnnxCastParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx CastParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -54,4 +52,3 @@ STATUS OnnxCastParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxCastParser("Cast", new OnnxCastParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.h index 027fd0bdbe..035b3810e4 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.h @@ -26,11 +26,8 @@ class OnnxCastParser : public OnnxNodeParser { public: OnnxCastParser() : OnnxNodeParser("Cast") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_CAST_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.cc index 1ac60d4330..b6016a8bcc 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.cc @@ -19,9 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxClipParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) { +STATUS OnnxClipParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx ClipParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.h index 9f6e2eba6f..7238fe75b5 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.h @@ -26,11 +26,8 @@ class OnnxClipParser : public OnnxNodeParser { public: OnnxClipParser() : OnnxNodeParser("Clip") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_CLIP_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.cc index 539da26e62..f8148924ab 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxConcatParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxConcatParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx ConcatParser"; if (op == nullptr) { @@ -54,4 +53,3 @@ STATUS OnnxConcatParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxConcatParser("Concat", new OnnxConcatParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.h index ca5a407cf1..10319154a1 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.h @@ -26,11 +26,8 @@ class OnnxConcatParser : public OnnxNodeParser { public: OnnxConcatParser() : OnnxNodeParser("Concat") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_CONCAT_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.cc index 06afd29f61..83348f3fe9 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxConstantParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxConstantParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx ConstantParser"; if (op == nullptr) { @@ -47,4 +46,3 @@ STATUS OnnxConstantParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxConstantParser("Constant", new OnnxConstantParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.h index 43d84a7a6c..fb07013edb 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.h @@ -26,11 +26,8 @@ class OnnxConstantParser : public OnnxNodeParser { public: OnnxConstantParser() : OnnxNodeParser("Constant") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_CONSTANT_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc index 4c1e4b22d7..fcb258fb2e 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc @@ -21,8 +21,7 @@ namespace mindspore { namespace lite { -bool OnnxConvParser::ParseGroupConvolution(const std::unique_ptr &attr, - schema::CNodeT *op) { +bool OnnxConvParser::ParseGroupConvolution(const std::unique_ptr &attr, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx DepthwiseConvParser"; if (attr == nullptr || attr->group != attr->channelIn) { return false; @@ -54,9 +53,7 @@ bool OnnxConvParser::ParseGroupConvolution(const std::unique_ptrstrideW = static_cast(onnx_node_attr.ints(1)); } else if (onnx_node_attr.name() == "order") { if (onnx_node_attr.s() == "NHWC") { - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; } else { MS_LOG(ERROR) << "Unsupported format: " << onnx_node_attr.s(); return RET_ERROR; @@ -160,7 +157,7 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, attr->channelOut = dims[0]; attr->channelIn = dims[3] * attr->group; } - attr->format = schema::Format_NCHW; + attr->format = schema::Format::Format_NCHW; attr->hasBias = onnx_node.input().size() == 3; if (onnx_node.op_type() == "ConvRelu" || onnx_node.op_type() == "Int8ConvRelu") { attr->activationType = schema::ActivationType_RELU; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.h index 6fceb2dc3b..290df792b5 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.h @@ -27,15 +27,11 @@ class OnnxConvParser : public OnnxNodeParser { public: OnnxConvParser() : OnnxNodeParser("Conv") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; private: - bool ParseGroupConvolution(const std::unique_ptr &attr, - schema::CNodeT *op); + bool ParseGroupConvolution(const std::unique_ptr &attr, schema::CNodeT *op); }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_CONV_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_converter.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_converter.cc old mode 100755 new mode 100644 index f61e34630a..c25c778964 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_converter.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_converter.cc @@ -19,10 +19,7 @@ namespace mindspore { namespace lite { -OnnxConverter::OnnxConverter() { - modelParser = new OnnxModelParser(); -} +OnnxConverter::OnnxConverter() { modelParser = new OnnxModelParser(); } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_converter.h b/mindspore/lite/tools/converter/parser/onnx/onnx_converter.h old mode 100755 new mode 100644 index ad8da5a2ab..a04a3e456c --- a/mindspore/lite/tools/converter/parser/onnx/onnx_converter.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_converter.h @@ -34,4 +34,3 @@ class OnnxConverter : public Converter { } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_CONVERTER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc index 44541d8dfd..940f7423c3 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc @@ -21,8 +21,7 @@ namespace mindspore { namespace lite { -bool OnnxDeConvParser::ParseGroupDeConvolution(const std::unique_ptr &attr, - schema::CNodeT *op) { +bool OnnxDeConvParser::ParseGroupDeConvolution(const std::unique_ptr &attr, schema::CNodeT *op) { if (attr == nullptr || attr->group != attr->channelOut) { return false; } @@ -53,8 +52,7 @@ bool OnnxDeConvParser::ParseGroupDeConvolution(const std::unique_ptrstrideH = static_cast(onnx_node_attr.ints(1)); } else if (onnx_node_attr.name() == "order") { if (onnx_node_attr.s() == "NHWC") { - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; } else { MS_LOG(ERROR) << "Unsupported format: " << onnx_node_attr.s().c_str(); return RET_ERROR; @@ -143,7 +141,7 @@ STATUS OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, attr->channelIn = weight_shape[0]; attr->channelOut = weight_shape[1] * attr->group; - attr->format = schema::Format_NCHW; + attr->format = schema::Format::Format_NCHW; attr->hasBias = onnx_node.input().size() == 3; if (attr->group != 1) { diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.h index 0c0730ab11..8525d803ff 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.h @@ -27,15 +27,11 @@ class OnnxDeConvParser : public OnnxNodeParser { public: OnnxDeConvParser() : OnnxNodeParser("DeConv") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; private: - bool ParseGroupDeConvolution(const std::unique_ptr &attr, - schema::CNodeT *op); + bool ParseGroupDeConvolution(const std::unique_ptr &attr, schema::CNodeT *op); }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_DECONV_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.cc index 0738060ffc..13b2d91726 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxDepthToSpaceParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxDepthToSpaceParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx DepthToSpaceParser"; if (op == nullptr) { @@ -40,7 +39,7 @@ STATUS OnnxDepthToSpaceParser::Parse(const onnx::GraphProto &onnx_graph, } for (const auto &onnx_node_attr : onnx_node.attribute()) { - const auto& attribute_name = onnx_node_attr.name(); + const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "blocksize") { attr->blockSize = static_cast(onnx_node_attr.i()); } @@ -54,4 +53,3 @@ STATUS OnnxDepthToSpaceParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxDepthToSpaceParser("DepthToSpace", new OnnxDepthToSpaceParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.h index b176a123a1..ea850e0a72 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.h @@ -26,11 +26,8 @@ class OnnxDepthToSpaceParser : public OnnxNodeParser { public: OnnxDepthToSpaceParser() : OnnxNodeParser("DepthToSpace") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_DEPTH_TO_SPACE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.cc index 820d957e18..7c0f593191 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxDropoutParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxDropoutParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx DropoutParser"; if (op == nullptr) { @@ -54,4 +53,3 @@ STATUS OnnxDropoutParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxDropoutParser("Dropout", new OnnxDropoutParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.h index 454a8805e0..43e4f74a39 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.h @@ -26,11 +26,8 @@ class OnnxDropoutParser : public OnnxNodeParser { public: OnnxDropoutParser() : OnnxNodeParser("Dropout") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_DROPOUT_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.cc index 8a3723b1cd..b567a2950f 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.cc @@ -19,9 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxEluParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) { +STATUS OnnxEluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx EluParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -40,7 +38,7 @@ STATUS OnnxEluParser::Parse(const onnx::GraphProto &onnx_graph, } for (const auto &onnx_node_attr : onnx_node.attribute()) { - const auto& attribute_name = onnx_node_attr.name(); + const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "alpha") { attr->alpha = onnx_node_attr.f(); } @@ -54,4 +52,3 @@ STATUS OnnxEluParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxEluParser("Elu", new OnnxEluParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.h index 76201660c0..5ccbf0b6d5 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.h @@ -26,11 +26,8 @@ class OnnxEluParser : public OnnxNodeParser { public: OnnxEluParser() : OnnxNodeParser("Elu") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ELU_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.h index b1bb3fe777..19df2fe984 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.h @@ -26,11 +26,8 @@ class OnnxExpandParser : public OnnxNodeParser { public: OnnxExpandParser() : OnnxNodeParser("Expand") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_EXPAND_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.cc index cff8b530fe..c01c48ce5e 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxFlattenParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxFlattenParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx FlattenParser"; if (op == nullptr) { @@ -59,4 +58,3 @@ STATUS OnnxFlattenParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxFlattenParser("Flatten", new OnnxFlattenParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.h index 6f28794aa8..25d3d83fa6 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.h @@ -26,11 +26,8 @@ class OnnxFlattenParser : public OnnxNodeParser { public: OnnxFlattenParser() : OnnxNodeParser("Fatten") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_FLATTEN_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.cc index 98f66ed82b..4642b95085 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxGatherParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxGatherParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx GatherParser"; if (op == nullptr) { @@ -40,7 +39,7 @@ STATUS OnnxGatherParser::Parse(const onnx::GraphProto &onnx_graph, } for (const auto &onnx_node_attr : onnx_node.attribute()) { - const auto& attribute_name = onnx_node_attr.name(); + const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axis") { attr->axis = static_cast(onnx_node_attr.i()); } @@ -54,4 +53,3 @@ STATUS OnnxGatherParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxGatherParser("Gather", new OnnxGatherParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.h index 778c0e8538..85da3f19cb 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.h @@ -26,11 +26,8 @@ class OnnxGatherParser : public OnnxNodeParser { public: OnnxGatherParser() : OnnxNodeParser("Gather") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_GATHER_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc index 228881edcd..de1f234603 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc @@ -19,9 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) { +STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx LrnParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -33,8 +31,7 @@ STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, return RET_NULL_PTR; } - std::unique_ptr attr - = std::make_unique(); + std::unique_ptr attr = std::make_unique(); if (attr == nullptr) { MS_LOG(ERROR) << "new op failed"; return RET_NULL_PTR; @@ -83,4 +80,3 @@ STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxLrnxParser("Lrn", new OnnxLrnParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.h index d7c6cf88d0..fec32c0799 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.h @@ -26,11 +26,8 @@ class OnnxLrnParser : public OnnxNodeParser { public: OnnxLrnParser() : OnnxNodeParser("Lrn") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_LRN_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.cc index 53fb2a4a87..4c4d8c0e4c 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxMatmulParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxMatmulParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx MatMulParser"; if (op == nullptr) { diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.h index 2b099d1992..22e4f7af66 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.h @@ -26,11 +26,8 @@ class OnnxMatmulParser : public OnnxNodeParser { public: OnnxMatmulParser() : OnnxNodeParser("MatMul") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_MATMUL_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc index 57b3724b44..dc4eb404ea 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc @@ -91,7 +91,7 @@ STATUS OnnxModelParser::SetGraphConstTensor(const onnx::GraphProto &onnx_graph, return RET_OK; } -STATUS OnnxModelParser::AddValueInfo(const onnx::ValueInfoProto &proto, const std::string &name, const TensorType &type, +STATUS OnnxModelParser::AddValueInfo(const onnx::ValueInfoProto &proto, const std::string &name, const Category &type, TensorCache *tensor_cache, int *index) { auto data_type = GetDataTypeFromOnnx(static_cast(proto.type().tensor_type().elem_type())); if (data_type == kTypeUnknown) { @@ -106,13 +106,13 @@ STATUS OnnxModelParser::AddValueInfo(const onnx::ValueInfoProto &proto, const st } tensor->dataType = data_type; tensor->dims = GetDimsFromOnnxValue(proto); - tensor->format = schema::Format_NCHW; - tensor->nodeType = schema::NodeType_ValueNode; + tensor->format = schema::Format::Format_NCHW; + tensor->nodeType = schema::NodeType::NodeType_ValueNode; *index = tensor_cache->AddTensor(name, tensor.release(), type); return RET_OK; } -STATUS OnnxModelParser::AddTensorProto(const onnx::TensorProto &proto, const std::string &name, const TensorType &type, +STATUS OnnxModelParser::AddTensorProto(const onnx::TensorProto &proto, const std::string &name, const Category &type, TensorCache *tensor_cache, int *index) { auto data_type = GetDataTypeFromOnnx(static_cast(proto.data_type())); if (data_type == kTypeUnknown) { @@ -127,8 +127,8 @@ STATUS OnnxModelParser::AddTensorProto(const onnx::TensorProto &proto, const std } tensor->dataType = data_type; std::copy(proto.dims().begin(), proto.dims().end(), std::back_inserter(tensor->dims)); - tensor->format = schema::Format_NCHW; - tensor->nodeType = schema::NodeType_ValueNode; + tensor->format = schema::Format::Format_NCHW; + tensor->nodeType = schema::NodeType::NodeType_ValueNode; if (CopyOnnxTensorData(proto, tensor.get())) { MS_LOG(ERROR) << "copy onnx data failed"; return RET_ERROR; @@ -206,8 +206,8 @@ STATUS OnnxModelParser::ParseOnnxGivenFillNode(const onnx::NodeProto &onnx_node, std::for_each(shape.begin(), shape.end(), [](int sh) { MS_LOG(DEBUG) << "shape: " << sh; }); } tensor->dims = shape; - tensor->format = schema::Format_NUM_OF_FORMAT; - tensor->nodeType = schema::NodeType_ValueNode; + tensor->format = schema::Format::Format_NUM_OF_FORMAT; + tensor->nodeType = schema::NodeType::NodeType_ValueNode; iter = std::find_if(onnx_node.attribute().begin(), onnx_node.attribute().end(), [](const onnx::AttributeProto &attr) { return attr.name() == "values"; }); // copy GivenIntTensorFill node value to tensor diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.h index e227dec4fc..a53229e949 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.h @@ -54,10 +54,10 @@ class OnnxModelParser : public ModelParser { STATUS SetGraphOutputTensor(const onnx::GraphProto &onnx_graph, schema::MetaGraphT *graph, TensorCache *tensor_cache); - STATUS AddValueInfo(const onnx::ValueInfoProto &proto, const std::string &name, const TensorType &type, + STATUS AddValueInfo(const onnx::ValueInfoProto &proto, const std::string &name, const Category &type, TensorCache *tensor_cache, int *index); - STATUS AddTensorProto(const onnx::TensorProto &proto, const std::string &name, const TensorType &type, + STATUS AddTensorProto(const onnx::TensorProto &proto, const std::string &name, const Category &type, TensorCache *tensor_cache, int *index); STATUS ParseOnnxNodeToDstOp(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc index ea23f51811..8baedfb972 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc @@ -32,9 +32,7 @@ schema::PadMode OnnxNodeParser::GetOnnxPadMode(const onnx::AttributeProto &onnx_ } } -void OnnxNodeParser::Split(const std::string &src_str, - std::vector *dst_str, - const std::string &chr) { +void OnnxNodeParser::Split(const std::string &src_str, std::vector *dst_str, const std::string &chr) { std::string ::size_type p1 = 0, p2 = src_str.find(chr); while (std::string::npos != p2) { dst_str->push_back(src_str.substr(p1, p2 - p1)); @@ -47,4 +45,3 @@ void OnnxNodeParser::Split(const std::string &src_str, } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.h index 901abc920f..fdbe936ce8 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.h @@ -33,20 +33,15 @@ class OnnxNodeParser { virtual ~OnnxNodeParser() = default; - virtual STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) = 0; + virtual STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) = 0; protected: schema::PadMode GetOnnxPadMode(const onnx::AttributeProto &onnx_node_attr); - void Split(const std::string &src_str, - std::vector *dst_str, - const std::string &chr); + void Split(const std::string &src_str, std::vector *dst_str, const std::string &chr); const std::string &name; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_NODE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser_registry.h b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser_registry.h index 7027abab1f..9b459a12d6 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser_registry.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser_registry.h @@ -38,8 +38,7 @@ class OnnxNodeParserRegistry { class OnnxNodeRegistrar { public: - OnnxNodeRegistrar(const std::string &name, - OnnxNodeParser *parser) { + OnnxNodeRegistrar(const std::string &name, OnnxNodeParser *parser) { OnnxNodeParserRegistry::GetInstance()->parsers[name] = parser; } }; @@ -47,4 +46,3 @@ class OnnxNodeRegistrar { } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_NODE_REGISTRY_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.cc index 14e4ef5662..7a26ab8f54 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.cc @@ -19,9 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxPadParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) { +STATUS OnnxPadParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx PadParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -49,8 +47,8 @@ STATUS OnnxPadParser::Parse(const onnx::GraphProto &onnx_graph, attr->paddings[i * 2 + 1] = static_cast(onnx_node_attr.ints(i + size / 2)); } } else if (attribute_name == "mode") { - const auto &mode = onnx_node_attr.s(); - if (mode == "constant") { + const auto &mode = onnx_node_attr.s(); + if (mode == "constant") { attr->paddingMode = schema::PaddingMode_CONSTANT; } else if (mode == "reflect") { attr->paddingMode = schema::PaddingMode_REFLECT; @@ -68,4 +66,3 @@ STATUS OnnxPadParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxPadParser("Pad", new OnnxPadParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.h index e8f60ae30c..f0d5f08787 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.h @@ -26,11 +26,8 @@ class OnnxPadParser : public OnnxNodeParser { public: OnnxPadParser() : OnnxNodeParser("Pad") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_PAD_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc index 98c9565c0b..46ad0aac12 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc @@ -19,9 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxPoolParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) { +STATUS OnnxPoolParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx PoolParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -39,7 +37,7 @@ STATUS OnnxPoolParser::Parse(const onnx::GraphProto &onnx_graph, return RET_NULL_PTR; } - attr->format = schema::Format_NCHW; + attr->format = schema::Format::Format_NCHW; const auto &pool_type = onnx_node.op_type(); if (pool_type == "MaxPool") { attr->poolingMode = schema::PoolMode_MAX_POOLING; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.h index 39b8e4d241..9fb50590b7 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.h @@ -26,11 +26,8 @@ class OnnxPoolParser : public OnnxNodeParser { public: OnnxPoolParser() : OnnxNodeParser("Pool") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_POOL_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.cc index fdf3b3ebe0..fa2293ae06 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxReduceParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxReduceParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx ReduceParser"; if (op == nullptr) { @@ -77,4 +76,3 @@ OnnxNodeRegistrar g_onnxReduceSumParser("ReduceSum", new OnnxReduceParser()); OnnxNodeRegistrar g_onnxReduceSumSquareParser("ReduceSumSquare", new OnnxReduceParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.h index edb1c7b094..400bab9901 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.h @@ -26,11 +26,8 @@ class OnnxReduceParser : public OnnxNodeParser { public: OnnxReduceParser() : OnnxNodeParser("Reduce") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_REDUCE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc index 06f5d460f5..3fff5ff08a 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc @@ -21,9 +21,7 @@ namespace mindspore { namespace lite { -STATUS OnnxReluParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) { +STATUS OnnxReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx ReluParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.h index 049c39b3aa..c59d520276 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.h @@ -26,9 +26,7 @@ class OnnxReluParser : public OnnxNodeParser { public: OnnxReluParser() : OnnxNodeParser("Relu") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; class OnnxLeakeyReluParser : public OnnxReluParser { @@ -40,11 +38,8 @@ class OnnxPReluParser : public OnnxNodeParser { public: OnnxPReluParser() : OnnxNodeParser("Prelu") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_RELU_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc index 83b3d3ecf2..56b06a2f50 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc @@ -20,8 +20,7 @@ namespace mindspore { namespace lite { -STATUS OnnxReshapeParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxReshapeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx ReshapeParser"; if (op == nullptr) { @@ -40,7 +39,7 @@ STATUS OnnxReshapeParser::Parse(const onnx::GraphProto &onnx_graph, return RET_NULL_PTR; } - attr->format = schema::Format_NCHW; + attr->format = schema::Format::Format_NCHW; std::vector params; for (int i = 0; i < onnx_node.input_size(); ++i) { const auto &input_name = onnx_node.input(i); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.h index 6bd227426b..5514503bdd 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.h @@ -26,11 +26,8 @@ class OnnxReshapeParser : public OnnxNodeParser { public: OnnxReshapeParser() : OnnxNodeParser("Reshape") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_RESHAPE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.cc index fbfc87a568..83cf58ba41 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxShapeParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxShapeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx ShapeParser"; if (op == nullptr) { @@ -47,4 +46,3 @@ STATUS OnnxShapeParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxShapeParser("Shape", new OnnxShapeParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.h index d504f5d69f..2e7b534e60 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.h @@ -26,11 +26,8 @@ class OnnxShapeParser : public OnnxNodeParser { public: OnnxShapeParser() : OnnxNodeParser("Shape") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_SHAPE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.cc index 67cd08c836..e8a344bcfb 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxSigmoidParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxSigmoidParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx SigmoidParser"; if (op == nullptr) { @@ -49,4 +48,3 @@ STATUS OnnxSigmoidParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxSigmoodParser("Sigmoid", new OnnxSigmoidParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.h index e721f72931..29e98fecf0 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.h @@ -26,11 +26,8 @@ class OnnxSigmoidParser : public OnnxNodeParser { public: OnnxSigmoidParser() : OnnxNodeParser("Sigmoid") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_SIGMOID_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.cc index f3b7ed0966..c9b3eb0076 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.cc @@ -66,7 +66,7 @@ STATUS OnnxSliceParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::No } std::vector sizes(starts.size(), -1); for (size_t i = 0; i < starts.size(); ++i) { - sizes[i] = (ends[i] < 0 ? ends[i] : ends[i] - starts[i]); + sizes[i] = (ends[i] < 0 ? ends[i] : ends[i] - starts[i]); } attr->axes = axes; attr->begin = starts; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.h index bda83c1866..29a52be983 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.h @@ -26,11 +26,8 @@ class OnnxSliceParser : public OnnxNodeParser { public: OnnxSliceParser() : OnnxNodeParser("Slice") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_SLICE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.cc index 53dfe860e3..8e35db742d 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxSoftMaxParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxSoftMaxParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx SoftMaxParser"; if (op == nullptr) { @@ -40,7 +39,7 @@ STATUS OnnxSoftMaxParser::Parse(const onnx::GraphProto &onnx_graph, } for (const auto &onnx_node_attr : onnx_node.attribute()) { - const auto& attribute_name = onnx_node_attr.name(); + const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axis") { attr->axis = static_cast(onnx_node_attr.i()); } @@ -54,4 +53,3 @@ STATUS OnnxSoftMaxParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxSoftMaxParser("Softmax", new OnnxSoftMaxParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.h index 668ab25ea9..47b3c6b2ff 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.h @@ -26,11 +26,8 @@ class OnnxSoftMaxParser : public OnnxNodeParser { public: OnnxSoftMaxParser() : OnnxNodeParser("Softmax") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_SOFTMAX_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.cc index 4928c8a05c..acfd7ec72a 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxSpaceToDepthParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxSpaceToDepthParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx SpaceToDepthParser"; if (op == nullptr) { diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.h index 62340ba381..374868e951 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.h @@ -26,11 +26,8 @@ class OnnxSpaceToDepthParser : public OnnxNodeParser { public: OnnxSpaceToDepthParser() : OnnxNodeParser("SpaceToDepth") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_SPACE_TO_DEPTH_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.cc index 4f4c9ff868..91012d71b0 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxSqueezeParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxSqueezeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx SqueezeParser"; if (op == nullptr) { @@ -56,4 +55,3 @@ STATUS OnnxSqueezeParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxSqueezeParser("Squeeze", new OnnxSqueezeParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.h index 741c9754a4..1c3a01f042 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.h @@ -26,11 +26,8 @@ class OnnxSqueezeParser : public OnnxNodeParser { public: OnnxSqueezeParser() : OnnxNodeParser("Squeeze") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_SQUEEZE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.cc index 20ade29485..09785373bd 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.cc @@ -19,9 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxTileParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) { +STATUS OnnxTileParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx TileParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.h index a921911e1c..81330a3cd5 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.h @@ -26,11 +26,8 @@ class OnnxTileParser : public OnnxNodeParser { public: OnnxTileParser() : OnnxNodeParser("Tile") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_TILE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc index 7846a02358..55c3600c22 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxTransposeParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxTransposeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx TransposeParser"; if (op == nullptr) { @@ -64,4 +63,3 @@ STATUS OnnxTransposeParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxTransposeParser("Transpose", new OnnxTransposeParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.h index e9e84d025d..5180ce40be 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.h @@ -26,11 +26,8 @@ class OnnxTransposeParser : public OnnxNodeParser { public: OnnxTransposeParser() : OnnxNodeParser("Transpose") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_TRANSPOSE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_unsample_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_unsample_parser.cc index 576fb2d449..ad1be93b3b 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_unsample_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_unsample_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxUpsampleParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxUpsampleParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx UpsampleParser"; if (op == nullptr) { @@ -58,4 +57,3 @@ STATUS OnnxUpsampleParser::Parse(const onnx::GraphProto &onnx_graph, OnnxNodeRegistrar g_onnxUpsampleParser("Upsample", new OnnxUpsampleParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_unsample_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_unsample_parser.h index 426d5d8b5a..b475671231 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_unsample_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_unsample_parser.h @@ -26,11 +26,8 @@ class OnnxUpsampleParser : public OnnxNodeParser { public: OnnxUpsampleParser() : OnnxNodeParser("Upsample") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_UPSAMPLE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.h index 10abcae3f7..bec7505ce0 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.h @@ -26,11 +26,8 @@ class OnnxUnSqueezeParser : public OnnxNodeParser { public: OnnxUnSqueezeParser() : OnnxNodeParser("Unsqueeze") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_UNSQUEEZE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_unuseful_node_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_unuseful_node_parser.cc index 869cbd8c02..9fb09ce987 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_unuseful_node_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_unuseful_node_parser.cc @@ -19,8 +19,7 @@ namespace mindspore { namespace lite { -STATUS OnnxUnusefulNodeParser::Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, +STATUS OnnxUnusefulNodeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx UnusefulNodeParser"; if (op == nullptr) { @@ -60,4 +59,3 @@ OnnxNodeRegistrar g_onnxInt8QuantizeParser("Int8Quantize", new OnnxUnusefulNodeP OnnxNodeRegistrar g_onnxInt8DequantizeParser("Int8Dequantize", new OnnxUnusefulNodeParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_unuseful_node_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_unuseful_node_parser.h index 94cb3db72e..bbb3e07487 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_unuseful_node_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_unuseful_node_parser.h @@ -26,11 +26,8 @@ class OnnxUnusefulNodeParser : public OnnxNodeParser { public: OnnxUnusefulNodeParser() : OnnxNodeParser("UnusefulNode") {} - STATUS Parse(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node, - schema::CNodeT *op) override; + STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX__UNUSEFUL_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc index 1b3a28b721..c8671ceb14 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteActivationParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -48,22 +46,22 @@ STATUS TfliteActivationParser::Parse(const std::unique_ptr &t std::vector node_name_str; Split(op->name, &node_name_str, "-"); const char *node_name = node_name_str.data()->c_str(); - if (std::strcmp(node_name, "Relu") == 0) { + if (std::strcmp(node_name, "Relu") == 0) { MS_LOG(DEBUG) << "parse TfliteReluParser"; attr->type = schema::ActivationType_RELU; - } else if (std::strcmp(node_name, "Relu6") == 0) { + } else if (std::strcmp(node_name, "Relu6") == 0) { MS_LOG(DEBUG) << "parse TfliteRelu6Parser"; attr->type = schema::ActivationType_RELU6; - } else if (std::strcmp(node_name, "Tanh") == 0) { + } else if (std::strcmp(node_name, "Tanh") == 0) { MS_LOG(DEBUG) << "parse TfliteTanhParser"; attr->type = schema::ActivationType_TANH; - } else if (std::strcmp(node_name, "Logistic") == 0) { + } else if (std::strcmp(node_name, "Logistic") == 0) { MS_LOG(DEBUG) << "parse TfliteLogisticParser"; attr->type = schema::ActivationType_SIGMOID; - } else if (std::strcmp(node_name, "HardSwish") == 0) { + } else if (std::strcmp(node_name, "HardSwish") == 0) { MS_LOG(DEBUG) << "parse TfliteHardSwishParser"; attr->type = schema::ActivationType_HSWISH; - } else if (std::strcmp(node_name, "LeakyRelu") == 0) { + } else if (std::strcmp(node_name, "LeakyRelu") == 0) { const auto &tflite_attr = tflite_op->builtin_options.AsLeakyReluOptions(); if (tflite_attr == nullptr) { MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed"; @@ -76,10 +74,10 @@ STATUS TfliteActivationParser::Parse(const std::unique_ptr &t op->primitive->value.type = schema::PrimitiveType_Activation; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h index b1509599d9..c3b9db23b9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h @@ -31,11 +31,9 @@ class TfliteActivationParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; class TfliteReluParser : public TfliteActivationParser { @@ -43,12 +41,12 @@ class TfliteReluParser : public TfliteActivationParser { TfliteReluParser() : TfliteActivationParser() {} }; -class TfliteRelu6Parser : public TfliteActivationParser{ +class TfliteRelu6Parser : public TfliteActivationParser { public: TfliteRelu6Parser() : TfliteActivationParser() {} }; -class TfliteTanhParser : public TfliteActivationParser{ +class TfliteTanhParser : public TfliteActivationParser { public: TfliteTanhParser() : TfliteActivationParser() {} }; @@ -72,4 +70,3 @@ class TfliteLeakyReluParser : public TfliteActivationParser { } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_ACTIVATION_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc index e8a8cb80a7..72bccef988 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_addn_parser.h" #include @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteAddNParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteAddNParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -51,11 +49,11 @@ STATUS TfliteAddNParser::Parse(const std::unique_ptr &tflite_ op->primitive->value.value = attr.release(); for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.h index 8bd1ef03ac..fdc2fe0553 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_ADDN_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_ADDN_PARSER_H @@ -31,11 +31,9 @@ class TfliteAddNParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc index 5d766ee33a..56d3efea6c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteArgmaxParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteArgmaxParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -52,7 +50,7 @@ STATUS TfliteArgmaxParser::Parse(const std::unique_ptr &tflit // get axis attr auto axis_idx = tflite_op->inputs[1]; - std::for_each(tflite_tensors[axis_idx]->shape.begin(), tflite_tensors[axis_idx]->shape.end(), [&](int32_t sha){}); + std::for_each(tflite_tensors[axis_idx]->shape.begin(), tflite_tensors[axis_idx]->shape.end(), [&](int32_t sha) {}); auto &buf_data = tflite_model_buffer[tflite_tensors[axis_idx]->buffer]; if (buf_data == nullptr) { MS_LOG(ERROR) << "the buf data is null"; @@ -68,10 +66,10 @@ STATUS TfliteArgmaxParser::Parse(const std::unique_ptr &tflit op->primitive->value.type = schema::PrimitiveType_ArgMax; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.h index f7dc10cfaf..61d1ac385d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.h @@ -31,11 +31,9 @@ class TfliteArgmaxParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.cc index 4cb3b19b3c..e1b97dac8b 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteArgminParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteArgminParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -52,8 +50,7 @@ STATUS TfliteArgminParser::Parse(const std::unique_ptr &tflit // get axis attr auto axis_idx = tflite_op->inputs[1]; - std::for_each(tflite_tensors[axis_idx]->shape.begin(), - tflite_tensors[axis_idx]->shape.end(), [&](int32_t sha){}); + std::for_each(tflite_tensors[axis_idx]->shape.begin(), tflite_tensors[axis_idx]->shape.end(), [&](int32_t sha) {}); auto &buf_data = tflite_model_buffer[tflite_tensors[axis_idx]->buffer]; if (buf_data == nullptr) { MS_LOG(ERROR) << "the buf data is null"; @@ -69,10 +66,10 @@ STATUS TfliteArgminParser::Parse(const std::unique_ptr &tflit op->primitive->value.type = schema::PrimitiveType_ArgMin; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.h index 4213fc3211..58e90a6775 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.h @@ -31,11 +31,9 @@ class TfliteArgminParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc index 68d9eaf168..4cb191f7b4 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc @@ -25,10 +25,9 @@ namespace lite { STATUS TfliteDoubleInputOpParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, + schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, - std::map *tensors_id_map) { + std::map *tensors_id_map) { if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -172,21 +171,20 @@ STATUS TfliteDoubleInputOpParser::Parse(const std::unique_ptr // set input for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } STATUS TfliteSingleInputOpParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, + schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, - std::map *tensors_id_map) { + std::map *tensors_id_map) { if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -304,20 +302,18 @@ STATUS TfliteSingleInputOpParser::Parse(const std::unique_ptr op->primitive->value.value = attr.release(); } - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } STATUS TfliteCompareOpParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -388,11 +384,11 @@ STATUS TfliteCompareOpParser::Parse(const std::unique_ptr &tf } for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } @@ -428,5 +424,3 @@ TfliteNodeRegister g_tfliteLessParser("Less", new TfliteLessParser()); TfliteNodeRegister g_tfliteLessEqualParser("LessEqual", new TfliteLessEqualParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h index d61b742b19..b25e57b1a1 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h @@ -31,11 +31,9 @@ class TfliteDoubleInputOpParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; class TfliteAddParser : public TfliteDoubleInputOpParser { @@ -93,18 +91,15 @@ class TfliteMinimumParser : public TfliteDoubleInputOpParser { TfliteMinimumParser() : TfliteDoubleInputOpParser() {} }; - class TfliteSingleInputOpParser : public TfliteNodeParser { public: TfliteSingleInputOpParser() : TfliteNodeParser("node_name") {} STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; class TfliteAbsParser : public TfliteSingleInputOpParser { @@ -162,18 +157,15 @@ class TfliteFloorParser : public TfliteSingleInputOpParser { TfliteFloorParser() : TfliteSingleInputOpParser() {} }; - class TfliteCompareOpParser : public TfliteNodeParser { public: TfliteCompareOpParser() : TfliteNodeParser("node_name") {} STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; class TfliteEqualParser : public TfliteCompareOpParser { @@ -209,4 +201,3 @@ class TfliteLessEqualParser : public TfliteCompareOpParser { } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_ARITHMETIC_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc index 2c897f20f2..2ae3bcbe29 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_batch_to_space_parser.h" #include @@ -26,10 +26,9 @@ namespace lite { STATUS TfliteBatchToSpaceParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, + schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, - std::map *tensors_id_map) { + std::map *tensors_id_map) { if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -67,10 +66,10 @@ STATUS TfliteBatchToSpaceParser::Parse(const std::unique_ptr op->primitive->value.type = schema::PrimitiveType_BatchToSpace; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h index 2e3723b04f..398707bcd1 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_BATCH_TO_SPACE_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_BATCH_TO_SPACE_PARSER_H @@ -31,11 +31,9 @@ class TfliteBatchToSpaceParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; class TfliteBatchToSpaceNDParser : public TfliteBatchToSpaceParser { diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.cc index 34e958c494..afbf73c9ed 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_broadcast_to_parser.h" #include @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteBroadcastToParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteBroadcastToParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -54,10 +52,10 @@ STATUS TfliteBroadcastToParser::Parse(const std::unique_ptr & op->primitive->value.type = schema::PrimitiveType_BroadcastTo; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.h index 25478346fc..50363b19ae 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_BROADCAST_TO_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_BROADCAST_TO_PARSER_H @@ -31,11 +31,9 @@ class TfliteBroadcastToParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc index a0e6ebd359..9d10a00d28 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_cast_parser.h" #include #include @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteCastParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteCastParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -61,13 +59,13 @@ STATUS TfliteCastParser::Parse(const std::unique_ptr &tflite_ op->primitive->value.type = schema::PrimitiveType_Cast; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } - TfliteNodeRegister g_tfliteCastParser("Cast", new TfliteCastParser()); +TfliteNodeRegister g_tfliteCastParser("Cast", new TfliteCastParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.h index 151808dbd5..2570f43e94 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CAST_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CAST_PARSER_H @@ -31,11 +31,9 @@ class TfliteCastParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.cc index 6ccf7883c7..d749dfc58f 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteConcatParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteConcatParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -57,16 +55,14 @@ STATUS TfliteConcatParser::Parse(const std::unique_ptr &tflit op->primitive->value.value = attr.release(); for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteConcatParser("Concat", new TfliteConcatParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.h index eac2caf581..b50b6eb03e 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.h @@ -31,14 +31,11 @@ class TfliteConcatParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CONCAT_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc index 8832320ade..9c4a90e6df 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteConvParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteConvParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -56,7 +54,7 @@ STATUS TfliteConvParser::Parse(const std::unique_ptr &tflite_ attr->dilateH = tflite_attr->dilation_h_factor; attr->dilateW = tflite_attr->dilation_w_factor; attr->padMode = GetPadMode(tflite_attr->padding); - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; attr->activationType = GetActivationFunctionType(tflite_attr->fused_activation_function); attr->hasBias = true; @@ -77,8 +75,8 @@ STATUS TfliteConvParser::Parse(const std::unique_ptr &tflite_ auto data_index = tflite_op->inputs[0]; const auto &data_tensor = tflite_tensors[data_index]; std::vector params; - if (getPaddingParam(data_tensor, attr->padMode, attr->strideH, - attr->strideW, attr->kernelH, attr->kernelW, ¶ms) != RET_OK) { + if (getPaddingParam(data_tensor, attr->padMode, attr->strideH, attr->strideW, attr->kernelH, attr->kernelW, + ¶ms) != RET_OK) { MS_LOG(ERROR) << "get padding params failed"; return RET_ERROR; } else { @@ -91,19 +89,17 @@ STATUS TfliteConvParser::Parse(const std::unique_ptr &tflite_ op->primitive->value.type = schema::PrimitiveType_Conv2D; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[1], tensors_id->size(), tflite_tensors.size(), schema::Format_KHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[1], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_KHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[2], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteConv2DParser("Conv2D", new TfliteConvParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.h index abb5d889f7..6a21bce5c6 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.h @@ -31,14 +31,11 @@ class TfliteConvParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CONV_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_converter.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_converter.cc index 825deec6f9..24b6ec0783 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_converter.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_converter.cc @@ -18,9 +18,6 @@ namespace mindspore { namespace lite { -TfliteConverter::TfliteConverter() { - modelParser = new TfliteModelParser(); -} +TfliteConverter::TfliteConverter() { modelParser = new TfliteModelParser(); } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_converter.h b/mindspore/lite/tools/converter/parser/tflite/tflite_converter.h index d510d74a16..9bc53fb955 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_converter.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_converter.h @@ -36,4 +36,3 @@ class TfliteConverter : public Converter { } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CONVERTER_H_ - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.cc index e9d647633d..0c2ec85d94 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.cc @@ -47,7 +47,7 @@ STATUS TfliteCustomParser::Parse(const std::unique_ptr &tflit const auto &custom_attr = tflite_op->custom_options; auto attr_map = flexbuffers::GetRoot(custom_attr).AsMap(); - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; attr->inputSize = tflite_op->inputs.size(); attr->hScale = attr_map["h_scale"].AsFloat(); attr->wScale = attr_map["w_scale"].AsFloat(); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.h index 150137b9e4..88e7bdd3a1 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.h @@ -31,11 +31,9 @@ class TfliteCustomParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.cc index 5d994cd8ac..2ea3c3be69 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteDeConvParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse tflite Transpose_Conv parser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -57,7 +55,7 @@ STATUS TfliteDeConvParser::Parse(const std::unique_ptr &tflit attr->dilateH = 1; attr->dilateW = 1; attr->padMode = GetPadMode(tflite_attr->padding); - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; attr->activationType = schema::ActivationType_NO_ACTIVATION; attr->hasBias = true; @@ -78,8 +76,8 @@ STATUS TfliteDeConvParser::Parse(const std::unique_ptr &tflit auto data_index = tflite_op->inputs[2]; const auto &data_tensor = tflite_tensors[data_index]; std::vector params; - if (getPaddingParam(data_tensor, attr->padMode, attr->strideH, - attr->strideW, attr->kernelH, attr->kernelW, ¶ms) != RET_OK) { + if (getPaddingParam(data_tensor, attr->padMode, attr->strideH, attr->strideW, attr->kernelH, attr->kernelW, + ¶ms) != RET_OK) { MS_LOG(ERROR) << "get padding params failed"; return RET_ERROR; } else { @@ -92,16 +90,15 @@ STATUS TfliteDeConvParser::Parse(const std::unique_ptr &tflit op->primitive->value.type = schema::PrimitiveType_DeConv2D; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[1], tensors_id->size(), tflite_tensors.size(), schema::Format_KHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[2], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[1], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_KHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteDeConv2DParser("DeConv2D", new TfliteDeConvParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.h index 0a26ceb68f..5d754318b9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.h @@ -31,11 +31,9 @@ class TfliteDeConvParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.cc index 67b63735ca..8d781f3787 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_depth_to_space_parser.h" #include @@ -25,10 +25,9 @@ namespace lite { STATUS TfliteDepthToSpaceParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, + schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, - std::map *tensors_id_map) { + std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteDepthToSpaceParser"; if (op == nullptr) { @@ -53,15 +52,15 @@ STATUS TfliteDepthToSpaceParser::Parse(const std::unique_ptr return RET_NULL_PTR; } attr->blockSize = tflite_attr->block_size; - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; op->primitive->value.type = schema::PrimitiveType_DepthToSpace; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.h index 6fac3d3cd1..72c212f310 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_DEPTH_TO_SPACE_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_DEPTH_TO_SPACE_PARSER_H @@ -31,11 +31,9 @@ class TfliteDepthToSpaceParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc index a4956b82be..6904acff0e 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc @@ -24,10 +24,9 @@ namespace lite { STATUS TfliteDepthwiseConv2DParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, + schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, - std::map *tensors_id_map) { + std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteDepthwiseConv2DParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -55,7 +54,7 @@ STATUS TfliteDepthwiseConv2DParser::Parse(const std::unique_ptrdilateH = tflite_attr->dilation_h_factor; attr->dilateW = tflite_attr->dilation_w_factor; attr->padMode = GetPadMode(tflite_attr->padding); - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; attr->activationType = GetActivationFunctionType(tflite_attr->fused_activation_function); attr->hasBias = true; attr->channelMultiplier = tflite_attr->depth_multiplier; @@ -83,8 +82,8 @@ STATUS TfliteDepthwiseConv2DParser::Parse(const std::unique_ptr params; - if (getPaddingParam(data_tensor, attr->padMode, attr->strideH, attr->strideW, - attr->kernelH, attr->kernelW, ¶ms) != RET_OK) { + if (getPaddingParam(data_tensor, attr->padMode, attr->strideH, attr->strideW, attr->kernelH, attr->kernelW, + ¶ms) != RET_OK) { MS_LOG(ERROR) << "get padding params failed"; return RET_ERROR; } else { @@ -97,19 +96,17 @@ STATUS TfliteDepthwiseConv2DParser::Parse(const std::unique_ptrprimitive->value.type = schema::PrimitiveType_DepthwiseConv2D; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[1], tensors_id->size(), tflite_tensors.size(), schema::Format_KHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[1], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_KHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[2], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteDepthwiseConv2DParser("DepthwiseConv2D", new TfliteDepthwiseConv2DParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.h index 6e4022f4fb..20451e7319 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.h @@ -31,14 +31,11 @@ class TfliteDepthwiseConv2DParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CONV_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.cc index 743d97594a..2865c086eb 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.cc @@ -23,10 +23,8 @@ namespace lite { STATUS TfliteDequantizeParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteDequantizeNParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -70,10 +68,10 @@ STATUS TfliteDequantizeParser::Parse(const std::unique_ptr &t op->primitive->value.type = schema::PrimitiveType_Cast; } - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.h index 276bd3e748..dc1a3c1545 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.h @@ -30,11 +30,9 @@ class TfliteDequantizeParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.cc index ab8c634725..654f66c9b9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteExpandDimsParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteExpandDimsParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -60,5 +58,3 @@ STATUS TfliteExpandDimsParser::Parse(const std::unique_ptr &t TfliteNodeRegister g_tfliteExpandDimsParser("ExpandDims", new TfliteExpandDimsParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.h index cdbda4b5b8..09029fe591 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.h @@ -31,14 +31,11 @@ class TfliteExpandDimsParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_EXPAND_DIMS_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.cc index 88fa4a639f..b264bc809d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteFillParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteFillParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -55,10 +53,10 @@ STATUS TfliteFillParser::Parse(const std::unique_ptr &tflite_ op->primitive->value.type = schema::PrimitiveType_Fill; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.h index 7bfcd5df99..1e454e9fb4 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.h @@ -31,14 +31,11 @@ class TfliteFillParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_FILL_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc index d0202ce16e..d82c6d57ae 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc @@ -24,10 +24,9 @@ namespace lite { STATUS TfliteFullyConnectedParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, + schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, - std::map *tensors_id_map) { + std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteFullyConnectedParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -61,21 +60,20 @@ STATUS TfliteFullyConnectedParser::Parse(const std::unique_ptrprimitive->value.type = schema::PrimitiveType_FullConnection; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[1], tensors_id->size(), tflite_tensors.size(), schema::Format_KHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[1], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_KHWC); if (hasBias) { - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[2], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteFullyConnectedParser("FullyConnected", new TfliteFullyConnectedParser()); -TfliteNodeRegister g_tfliteFakeQuantParser("FakeQuant", new TfliteFakeQuantParser());; +TfliteNodeRegister g_tfliteFakeQuantParser("FakeQuant", new TfliteFakeQuantParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.h index 21fd8186ad..8178d2b09a 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.h @@ -31,11 +31,9 @@ class TfliteFullyConnectedParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; class TfliteFakeQuantParser : public TfliteFullyConnectedParser { @@ -46,4 +44,3 @@ class TfliteFakeQuantParser : public TfliteFullyConnectedParser { } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_FULLY_CONNECTED_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.cc index 706a2d9f90..5681b5a12a 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteGatherNdParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteGatherNdParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -51,16 +49,14 @@ STATUS TfliteGatherNdParser::Parse(const std::unique_ptr &tfl op->primitive->value.value = attr.release(); for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteGatherNdParser("GatherND", new TfliteGatherNdParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.h index 4d9c3e525c..f8d11b6ccc 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.h @@ -31,14 +31,11 @@ class TfliteGatherNdParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_GATHER_ND_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.cc index 09994bbdc6..bb215aa0e9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteGatherParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteGatherParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -57,16 +55,14 @@ STATUS TfliteGatherParser::Parse(const std::unique_ptr &tflit op->primitive->value.value = attr.release(); for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteGatherParser("Gather", new TfliteGatherParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.h index 08ea38976c..4558e02307 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.h @@ -31,14 +31,11 @@ class TfliteGatherParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_GATHER_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.cc index 177097482e..39179be052 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_l2norm_parser.h" #include @@ -23,12 +23,10 @@ namespace mindspore { namespace lite { STATUS TfliteL2NormParser::Parse(const std::unique_ptr &tflite_op, - const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + const std::vector> &tflite_tensors, + const std::vector> &tflite_model_buffer, + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteL2NormParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -61,8 +59,8 @@ STATUS TfliteL2NormParser::Parse(const std::unique_ptr &tflit std::vector axis; axis.reserve(ndim); for (size_t i = 0; i < ndim; i++) { - axis.emplace_back(i); - } + axis.emplace_back(i); + } attr->axis = axis; attr->epsilon = 0.0f; @@ -70,10 +68,10 @@ STATUS TfliteL2NormParser::Parse(const std::unique_ptr &tflit op->primitive->value.value = attr.release(); // set input - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.h index ea9b902be9..7da98c9cfc 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_L2NORM_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_L2NORM_PARSER_H @@ -31,11 +31,9 @@ class TfliteL2NormParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc index e12b7ead9a..c8b249f0b9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteLogicalParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -72,11 +70,11 @@ STATUS TfliteLogicalParser::Parse(const std::unique_ptr &tfli } for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h index 377b871790..d95145f658 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h @@ -31,11 +31,9 @@ class TfliteLogicalParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; class TfliteLogicalAndParser : public TfliteLogicalParser { diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.cc index 7839938400..8e1371c95e 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteLRNParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteLRNParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -58,15 +56,13 @@ STATUS TfliteLRNParser::Parse(const std::unique_ptr &tflite_o op->primitive->value.type = schema::PrimitiveType_LocalResponseNormalization; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteLRNParser("LocalResponseNorm", new TfliteLRNParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.h index a64179c64b..1566650c8c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.h @@ -31,14 +31,11 @@ class TfliteLRNParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_LRN_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc index a991502ecd..795fabd6e7 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc @@ -53,8 +53,7 @@ STATUS TfliteModelParser::CopyConstTensorData(const std::vectorbuffer; if (!tflite_model_buffer[buffer_idx]->data.empty()) { tensor->data.resize(data_size); - if (memcpy_s(tensor->data.data(), tensor->data.size(), - tflite_model_buffer[buffer_idx]->data.data(), + if (memcpy_s(tensor->data.data(), tensor->data.size(), tflite_model_buffer[buffer_idx]->data.data(), tflite_model_buffer[buffer_idx]->data.size())) { MS_LOG(ERROR) << "memcpy tensor data failed"; return RET_ERROR; @@ -167,7 +166,7 @@ STATUS TfliteModelParser::ConvertTensor(const std::unique_ptr // set tensor attr if (isInput || isConst) { - tensor->nodeType = schema::NodeType_ValueNode; + tensor->nodeType = schema::NodeType::NodeType_ValueNode; } else { tensor->nodeType = schema::NodeType_Parameter; } @@ -280,7 +279,7 @@ STATUS TfliteModelParser::ConvertGroupDepthwiseOp(schema::MetaGraphT *sub_graph) if (weight_tensor->dataType == TypeId::kNumberTypeUInt8) { auto status = TransFilterFormat(weight_tensor.get(), kKHWC2CHWK); if (status != RET_OK) { - MS_LOG(ERROR) << "Trans depthwiseConv Filter Format failed."; + MS_LOG(ERROR) << "Trans depthwiseConv Filter schema::Format failed."; return RET_ERROR; } } else if (weight_tensor->dataType == kNumberTypeFloat32 || weight_tensor->dataType == kNumberTypeFloat) { @@ -293,7 +292,7 @@ STATUS TfliteModelParser::ConvertGroupDepthwiseOp(schema::MetaGraphT *sub_graph) MS_LOG(ERROR) << "The dataType of weight tensor is unsupported."; return RET_ERROR; } - weight_tensor->format = schema::Format_CHWK; + weight_tensor->format = schema::Format::Format_CHWK; } } } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h index 38dbe95592..3cae315af1 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h @@ -41,38 +41,33 @@ class TfliteModelParser : public ModelParser { ~TfliteModelParser() override; - schema::MetaGraphT *ParseToFb(const std::string &model_file, - const std::string &weight_file, - const QuantType &quantType = QuantType_QUANT_NONE) override; + schema::MetaGraphT *ParseToFb(const std::string &model_file, const std::string &weight_file, + const QuantType &quantType = QuantType_QUANT_NONE) override; private: std::unique_ptr ReadTfliteModel(const char *model_path); STATUS CopyConstTensorData(const std::vector> &tflite_model_buffer, - const tflite::TensorT *tflite_tensor, - schema::TensorT *tensor); + const tflite::TensorT *tflite_tensor, schema::TensorT *tensor); - void SetTensorQuantParam(const std::unique_ptr &tflite_tensor, - schema::TensorT *tensor); + void SetTensorQuantParam(const std::unique_ptr &tflite_tensor, schema::TensorT *tensor); STATUS ConvertOp(const std::unique_ptr &tflite_model, - const std::unique_ptr &tflite_subgraph, - const QuantType &quant_type, + const std::unique_ptr &tflite_subgraph, const QuantType &quant_type, schema::MetaGraphT *sub_graph); STATUS ConvertTensor(const std::unique_ptr &tflite_subgraph, const std::vector> &tflite_model_buffer, schema::MetaGraphT *sub_graph); - STATUS GetGraphInfo(const std::unique_ptr &tflite_subgraph, - schema::MetaGraphT *sub_graph); + STATUS GetGraphInfo(const std::unique_ptr &tflite_subgraph, schema::MetaGraphT *sub_graph); - STATUS ConvertGroupDepthwiseOp(schema::MetaGraphT* sub_graph); + STATUS ConvertGroupDepthwiseOp(schema::MetaGraphT *sub_graph); private: std::vector tensorsId; std::vector tensorsFormat; - std::map tensorsIdMap; + std::map tensorsIdMap; std::vector tensors; std::map opMap; @@ -83,4 +78,3 @@ class TfliteModelParser : public ModelParser { } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_MODEL_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h index c2422e8d90..f3df22b20e 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h @@ -40,17 +40,12 @@ class TfliteNodeParser { virtual STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) = 0; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) = 0; - void AddOpInput(schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map, - int idx, int new_idx, int total, schema::Format format) { + void AddOpInput(schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map, int idx, int new_idx, int total, schema::Format format) { auto iter = tensors_id_map->find(idx); if (iter != tensors_id_map->end()) { op->inputIndex.emplace_back(iter->second); @@ -65,11 +60,8 @@ class TfliteNodeParser { } } - void AddOpOutput(schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map, - int idx, int new_idx, int total, schema::Format format) { + void AddOpOutput(schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map, int idx, int new_idx, int total, schema::Format format) { auto iter = tensors_id_map->find(idx); if (iter != tensors_id_map->end()) { op->outputIndex.emplace_back(iter->second); @@ -85,8 +77,7 @@ class TfliteNodeParser { } template - STATUS GetTfliteData(const int32_t tensor_index, - const std::vector> &tflite_tensors, + STATUS GetTfliteData(const int32_t tensor_index, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, std::vector &attr_data) { int32_t count = 1; @@ -158,14 +149,10 @@ class TfliteNodeParser { protected: const std::string &name; std::map dtype_map = { - {tflite::TensorType_FLOAT64, TypeId::kNumberTypeFloat64}, - {tflite::TensorType_FLOAT32, TypeId::kNumberTypeFloat32}, - {tflite::TensorType_FLOAT16, TypeId::kNumberTypeFloat16}, - {tflite::TensorType_INT64, TypeId::kNumberTypeInt64}, - {tflite::TensorType_INT32, TypeId::kNumberTypeInt32}, - {tflite::TensorType_INT16, TypeId::kNumberTypeInt16}, - {tflite::TensorType_INT8, TypeId::kNumberTypeInt8}, - {tflite::TensorType_UINT8, TypeId::kNumberTypeUInt8}, + {tflite::TensorType_FLOAT64, TypeId::kNumberTypeFloat64}, {tflite::TensorType_FLOAT32, TypeId::kNumberTypeFloat32}, + {tflite::TensorType_FLOAT16, TypeId::kNumberTypeFloat16}, {tflite::TensorType_INT64, TypeId::kNumberTypeInt64}, + {tflite::TensorType_INT32, TypeId::kNumberTypeInt32}, {tflite::TensorType_INT16, TypeId::kNumberTypeInt16}, + {tflite::TensorType_INT8, TypeId::kNumberTypeInt8}, {tflite::TensorType_UINT8, TypeId::kNumberTypeUInt8}, {tflite::TensorType_BOOL, TypeId::kNumberTypeBool}, }; }; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.cc index f17ae7ca6d..1087536c8d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.cc @@ -43,4 +43,3 @@ TfliteNodeParser *TfliteNodeParserRegistry::GetNodeParser(const std::string &nam } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h b/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h index 2bfe898d34..214a9460ed 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h @@ -46,4 +46,3 @@ class TfliteNodeRegister { } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_NODE_PARSER_REGISTRY_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.cc index 088b6a7bb4..dcb7eb3dcc 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteOneHotParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteOneHotParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -62,11 +60,11 @@ STATUS TfliteOneHotParser::Parse(const std::unique_ptr &tflit op->primitive->value.value = attr.release(); for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.h index 8a23110957..bac4ce944e 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.h @@ -31,11 +31,9 @@ class TfliteOneHotParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc index 112733dd0a..0502afc269 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TflitePadParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TflitePadParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -61,10 +59,10 @@ STATUS TflitePadParser::Parse(const std::unique_ptr &tflite_o op->primitive->value.type = schema::PrimitiveType_Pad; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.h index 44f657ad4e..86e91913fc 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.h @@ -31,11 +31,9 @@ class TflitePadParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.cc index 96fb0681c8..51e096e7f1 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.cc @@ -25,10 +25,8 @@ namespace lite { STATUS TflitePoolingParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -66,7 +64,7 @@ STATUS TflitePoolingParser::Parse(const std::unique_ptr &tfli attr->strideW = tflite_attr->stride_w; attr->strideH = tflite_attr->stride_h; attr->padMode = GetPadMode(tflite_attr->padding); - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; attr->global = false; attr->roundMode = schema::RoundMode_FLOOR; @@ -76,8 +74,8 @@ STATUS TflitePoolingParser::Parse(const std::unique_ptr &tfli auto data_index = tflite_op->inputs[0]; const auto &data_tensor = tflite_tensors[data_index]; std::vector params; - if (getPaddingParam(data_tensor, attr->padMode, attr->strideH, - attr->strideW, attr->windowH, attr->windowW, ¶ms) != RET_OK) { + if (getPaddingParam(data_tensor, attr->padMode, attr->strideH, attr->strideW, attr->windowH, attr->windowW, + ¶ms) != RET_OK) { MS_LOG(ERROR) << "get padding params failed"; return RET_ERROR; } else { @@ -90,10 +88,10 @@ STATUS TflitePoolingParser::Parse(const std::unique_ptr &tfli op->primitive->value.type = schema::PrimitiveType_Pooling; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } @@ -101,5 +99,3 @@ TfliteNodeRegister g_tfliteMeanPoolingParser("MeanPooling", new TfliteMeanPoolin TfliteNodeRegister g_tfliteMaxPoolingParser("MaxPooling", new TfliteMaxPoolingParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.h index fe8d5fa804..914c8db8e1 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.h @@ -31,11 +31,9 @@ class TflitePoolingParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; class TfliteMeanPoolingParser : public TflitePoolingParser { @@ -51,4 +49,3 @@ class TfliteMaxPoolingParser : public TflitePoolingParser { } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CONV_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.cc index c9a49d78f3..b0aff03236 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.cc @@ -21,12 +21,10 @@ namespace mindspore { namespace lite { STATUS TfliteQuantizeParser::Parse(const std::unique_ptr &tflite_op, - const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + const std::vector> &tflite_tensors, + const std::vector> &tflite_model_buffer, + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteQuantizeNParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -60,10 +58,10 @@ STATUS TfliteQuantizeParser::Parse(const std::unique_ptr &tfl op->primitive->value.type = schema::PrimitiveType_QuantDTypeCast; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.h index 834d6f1861..0ee44fc567 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.h @@ -30,11 +30,9 @@ class TfliteQuantizeParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.cc index 36bf0775d9..8e9d1a1309 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteRangeParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteRangeParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -46,22 +44,20 @@ STATUS TfliteRangeParser::Parse(const std::unique_ptr &tflite } attr->dType = 0; -// attr->start -// attr->limit -// attr->delta + // attr->start + // attr->limit + // attr->delta op->primitive->value.type = schema::PrimitiveType_Range; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteRangeParser("Range", new TfliteRangeParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.h index 204b5c8e73..eedace5def 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.h @@ -31,14 +31,11 @@ class TfliteRangeParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_RANGE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.cc index 28d06bf756..cd01d674bf 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteRankParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteRankParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -48,15 +46,13 @@ STATUS TfliteRankParser::Parse(const std::unique_ptr &tflite_ op->primitive->value.type = schema::PrimitiveType_Rank; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteRankParser("Rank", new TfliteRankParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.h index 9afd1fcc22..9a910c5774 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.h @@ -31,14 +31,11 @@ class TfliteRankParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_RANK_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc index 239d8e0492..bb36668fd4 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteReduceParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -85,10 +83,10 @@ STATUS TfliteReduceParser::Parse(const std::unique_ptr &tflit op->primitive->value.type = schema::PrimitiveType_Reduce; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h index 35c49fb25a..eb2d422f8d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h @@ -31,11 +31,9 @@ class TfliteReduceParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; class TfliteReduceMaxParser : public TfliteReduceParser { diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc index b05d5ea854..3aea529d4d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteReshapeParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteReshapeParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -69,7 +67,7 @@ STATUS TfliteReshapeParser::Parse(const std::unique_ptr &tfli } } } else { - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; attr->shape.resize(tfliteAttr->new_shape.size()); for (size_t i = 0; i < tfliteAttr->new_shape.size(); ++i) { attr->shape[i] = tfliteAttr->new_shape[i]; @@ -80,11 +78,11 @@ STATUS TfliteReshapeParser::Parse(const std::unique_ptr &tfli op->primitive->value.value = attr.release(); for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.h index 6ab5fa1db6..6f1d6fcd28 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.h @@ -31,14 +31,11 @@ class TfliteReshapeParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_ADD_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc index 9f5313c7e1..cbbb3d0c63 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteResizeParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -57,7 +55,7 @@ STATUS TfliteResizeParser::Parse(const std::unique_ptr &tflit } attr->alignCorners = tfliteAttr->align_corners; attr->method = schema::ResizeMethod_BILINEAR; - } else if (std::strcmp(node_name, "NearestNeighbor") == 0) { + } else if (std::strcmp(node_name, "NearestNeighbor") == 0) { MS_LOG(DEBUG) << "parse TfliteResizeNearestNeighborParser"; const auto &tfliteAttr = tflite_op->builtin_options.AsResizeNearestNeighborOptions(); if (tfliteAttr == nullptr) { @@ -71,7 +69,7 @@ STATUS TfliteResizeParser::Parse(const std::unique_ptr &tflit return RET_ERROR; } - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; attr->preserveAspectRatio = false; auto tfliteResizeTensorIndex = tflite_op->inputs[1]; @@ -95,17 +93,14 @@ STATUS TfliteResizeParser::Parse(const std::unique_ptr &tflit op->primitive->value.type = schema::PrimitiveType_Resize; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteResizeBilinearParser("ResizeBilinear", new TfliteResizeBilinearParser()); -TfliteNodeRegister g_tfliteResizeNearestNeighborParser("NearestNeighbor", - new TfliteResizeNearestNeighborParser()); +TfliteNodeRegister g_tfliteResizeNearestNeighborParser("NearestNeighbor", new TfliteResizeNearestNeighborParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h index 1b9b5d91ae..526864e6f7 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h @@ -31,11 +31,9 @@ class TfliteResizeParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; class TfliteResizeBilinearParser : public TfliteResizeParser { @@ -51,4 +49,3 @@ class TfliteResizeNearestNeighborParser : public TfliteResizeParser { } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_RESIZE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.cc index 268af7065e..2662f33e68 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteReverseParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteReverseParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -53,15 +51,13 @@ STATUS TfliteReverseParser::Parse(const std::unique_ptr &tfli op->primitive->value.type = schema::PrimitiveType_Reverse; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteReverseParser("reverse", new TfliteReverseParser()); } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.h index d9fa0ce2df..0e3771eb1e 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.h @@ -31,14 +31,11 @@ class TfliteReverseParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_REVERSE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc index 9263e4c5a7..0bb7e1770a 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_reverse_sequence_parser.h" #include @@ -25,10 +25,9 @@ namespace lite { STATUS TfliteReverseSequenceParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, + schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, - std::map *tensors_id_map) { + std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteReverseSequenceParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -57,12 +56,12 @@ STATUS TfliteReverseSequenceParser::Parse(const std::unique_ptrprimitive->value.type = schema::PrimitiveType_ReverseSequence; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[1], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[1], tensors_id->size(), + tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.h index 927247fe86..538f859bf5 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_REVERSE_SEQUENCE_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_REVERSE_SEQUENCE_PARSER_H @@ -31,11 +31,9 @@ class TfliteReverseSequenceParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc index 90818e7db0..ccd5e1cbe6 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteScatterNdParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteScatterNdParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -56,14 +54,14 @@ STATUS TfliteScatterNdParser::Parse(const std::unique_ptr &tf // in tflite, kIndices = 0, kUpdates = 1, kShape = 2 // in mslite, kScatterShapeIndex = 0, kScatterIndicesIndex = 1, kScatterUpdateIndex = 2; - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[1], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[2], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[1], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.h index 6baeed21be..788c6c522e 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.h @@ -31,11 +31,9 @@ class TfliteScatterNdParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.cc index 71f11b0316..ef98d0888c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteShapeParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteShapeParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -48,10 +46,10 @@ STATUS TfliteShapeParser::Parse(const std::unique_ptr &tflite op->primitive->value.type = schema::PrimitiveType_Shape; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.h index ab34d3c901..c413750764 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.h @@ -31,11 +31,9 @@ class TfliteShapeParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.cc index 571f6ad4d1..9cca9d521c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteSliceParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteSliceParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -45,7 +43,7 @@ STATUS TfliteSliceParser::Parse(const std::unique_ptr &tflite return RET_NULL_PTR; } - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; if (GetTfliteData(tflite_op->inputs[1], tflite_tensors, tflite_model_buffer, attr->begin)) { MS_LOG(ERROR) << "get slice -> begin failed"; @@ -64,14 +62,13 @@ STATUS TfliteSliceParser::Parse(const std::unique_ptr &tflite op->primitive->value.type = schema::PrimitiveType_Slice; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteSliceParser("Slice", new TfliteSliceParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.h index 0a84cda642..7a4850dca8 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.h @@ -31,14 +31,11 @@ class TfliteSliceParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_SLICE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.cc index a022de6b96..814c5000f9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteSoftmaxParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteSoftmaxParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -50,10 +48,10 @@ STATUS TfliteSoftmaxParser::Parse(const std::unique_ptr &tfli op->primitive->value.type = schema::PrimitiveType_SoftMax; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.h index 73576c110c..5a7387ac58 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.h @@ -31,14 +31,11 @@ class TfliteSoftmaxParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CONV_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.cc index b2bf6d3b07..ed5467a878 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.h" #include @@ -25,10 +25,9 @@ namespace lite { STATUS TfliteSpaceToBatchNDParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, + schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, - std::map *tensors_id_map) { + std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteSpaceToBatchNDParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -58,10 +57,10 @@ STATUS TfliteSpaceToBatchNDParser::Parse(const std::unique_ptrprimitive->value.type = schema::PrimitiveType_SpaceToBatchND; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.h index e8b5b69a11..284396bcbb 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_SPACE_TO_BATCH_ND_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_SPACE_TO_BATCH_ND_PARSER_H @@ -31,11 +31,9 @@ class TfliteSpaceToBatchNDParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.cc index b9a788c7e0..08207e02cd 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_space_to_depth_parser.h" #include @@ -25,10 +25,9 @@ namespace lite { STATUS TfliteSpaceToDepthParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, + schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, - std::map *tensors_id_map) { + std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteSpaceToDepthParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -52,15 +51,15 @@ STATUS TfliteSpaceToDepthParser::Parse(const std::unique_ptr return RET_NULL_PTR; } attr->blockSize = tflite_attr->block_size; - attr->format = schema::Format_NHWC; + attr->format = schema::Format::Format_NHWC; op->primitive->value.type = schema::PrimitiveType_SpaceToDepth; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.h index be2cc7a16c..8ab6e77dbd 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_SPACE_TO_DEPTH_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_SPACE_TO_DEPTH_PARSER_H @@ -31,11 +31,9 @@ class TfliteSpaceToDepthParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc index 02b98c07f3..8e76dbb3c8 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_sparse_to_dense_parser.h" #include @@ -25,10 +25,9 @@ namespace lite { STATUS TfliteSparseToDenseParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, + schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, - std::map *tensors_id_map) { + std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteSparseToDenseParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -50,20 +49,19 @@ STATUS TfliteSparseToDenseParser::Parse(const std::unique_ptr op->primitive->value.type = schema::PrimitiveType_SparseToDense; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[1], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[3], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[1], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[2], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[3], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } TfliteNodeRegister g_tfliteSparseToDenseParser("SparseToDense", new TfliteSparseToDenseParser()); } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.h index f4c496f5bf..6e7f6923dc 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_SPARSE_TO_DENSE_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_SPARSE_TO_DENSE_PARSER_H @@ -31,11 +31,9 @@ class TfliteSparseToDenseParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.cc index ee9e0ae336..a0f989a15c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteSplitParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteSplitParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -85,11 +83,11 @@ STATUS TfliteSplitParser::Parse(const std::unique_ptr &tflite op->primitive->value.type = schema::PrimitiveType_Split; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[1], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[1], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); for (size_t i = 0; i < tflite_op->outputs.size(); i++) { - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.h index 997fbb01e2..e182f323ed 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.h @@ -31,11 +31,9 @@ class TfliteSplitParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.cc index 6166dee069..8631e35449 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteSplitVParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteSplitVParser"; if (op == nullptr) { @@ -82,11 +80,11 @@ STATUS TfliteSplitVParser::Parse(const std::unique_ptr &tflit op->primitive->value.type = schema::PrimitiveType_Split; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); for (size_t i = 0; i < tflite_op->outputs.size(); i++) { - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.h index 125ddbc30d..8b3f37fa81 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.h @@ -31,11 +31,9 @@ class TfliteSplitVParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.cc index 9300787712..aa2b8deaea 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteSqueezeParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteSqueezeParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -55,10 +53,10 @@ STATUS TfliteSqueezeParser::Parse(const std::unique_ptr &tfli op->primitive->value.type = schema::PrimitiveType_Squeeze; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.h index e1b9c9436e..685866d409 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.h @@ -31,11 +31,9 @@ class TfliteSqueezeParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.cc index c0304362a9..fb6917950c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.cc @@ -24,10 +24,8 @@ namespace lite { STATUS TfliteStackParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteStackParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -59,11 +57,11 @@ STATUS TfliteStackParser::Parse(const std::unique_ptr &tflite op->primitive->value.value = attr.release(); for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.h index 3e6774239a..d4801ec1ed 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.h @@ -31,14 +31,11 @@ class TfliteStackParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_STACK_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.cc index bf7c379366..fdfe493027 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.cc @@ -24,10 +24,9 @@ namespace lite { STATUS TfliteStridedSliceParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, + schema::CNodeT *op, std::vector *tensors_id, std::vector *tensors_format, - std::map *tensors_id_map) { + std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteStridedSliceParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -74,10 +73,10 @@ STATUS TfliteStridedSliceParser::Parse(const std::unique_ptr op->primitive->value.type = schema::PrimitiveType_StridedSlice; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.h index 9e4db2461f..1a55022eb1 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.h @@ -14,7 +14,6 @@ * limitations under the License. */ - #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_STRIDED_SLICE_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_STRIDED_SLICE_PARSER_H @@ -32,14 +31,11 @@ class TfliteStridedSliceParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_STRIDED_SLICE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.cc index 178c39bebe..c523ca833d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_tile_parser.h" #include @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteTileParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteTileParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -58,10 +56,10 @@ STATUS TfliteTileParser::Parse(const std::unique_ptr &tflite_ op->primitive->value.type = schema::PrimitiveType_Tile; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.h index 3534901911..602c577648 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_TILE_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_TILE_PARSER_H @@ -31,11 +31,9 @@ class TfliteTileParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.cc index b9c5e4b7cc..5e864f2d52 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_topk_v2_parser.h" #include @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteTopKV2Parser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteTopKV2Parser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -57,11 +55,11 @@ STATUS TfliteTopKV2Parser::Parse(const std::unique_ptr &tflit op->primitive->value.type = schema::PrimitiveType_TopK; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); for (size_t i = 0; i < tflite_op->outputs.size(); i++) { - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.h index 6ed92506c1..15f1a4812f 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_TOPK_V2_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_TOPK_V2_PARSER_H @@ -31,11 +31,9 @@ class TfliteTopKV2Parser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.cc index a6711b0bb7..5cb6ee3dfa 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.cc @@ -23,10 +23,8 @@ namespace lite { STATUS TfliteTransposeParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteTransposeParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -53,12 +51,12 @@ STATUS TfliteTransposeParser::Parse(const std::unique_ptr &tf op->primitive->value.type = schema::PrimitiveType_Transpose; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[1], tensors_id->size(), tflite_tensors.size(), schema::Format_KHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[1], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_KHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.h index 4fc4062713..2f023946cc 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.h @@ -31,14 +31,11 @@ class TfliteTransposeParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_TRANSPOSE_PARSER_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.cc index 8cd3044f0f..d725d0b025 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_unique_parser.h" #include @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteUniqueParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteUniqueParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -56,11 +54,11 @@ STATUS TfliteUniqueParser::Parse(const std::unique_ptr &tflit op->primitive->value.type = schema::PrimitiveType_Unique; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); for (size_t i = 0; i < tflite_op->outputs.size(); i++) { - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.h index 2fadd9aa2f..6bf2af8973 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_UNIQUE_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_UNIQUE_PARSER_H @@ -31,11 +31,9 @@ class TfliteUniqueParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.cc index d0d1ccc113..48dba85394 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_unstack_parser.h" #include @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteUnstackParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "paser TfliteUnstackParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -57,11 +55,11 @@ STATUS TfliteUnstackParser::Parse(const std::unique_ptr &tfli op->primitive->value.type = schema::PrimitiveType_Unstack; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); for (size_t i = 0; i < tflite_op->outputs.size(); i++) { - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.h index 28fed8b714..5ffc3bb17c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_UNSTACK_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_UNSTACK_PARSER_H @@ -31,11 +31,9 @@ class TfliteUnstackParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_util.h b/mindspore/lite/tools/converter/parser/tflite/tflite_util.h index 9dc0bba97d..eb588a611a 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_util.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_util.h @@ -40,17 +40,11 @@ std::string GetMSOpType(tflite::BuiltinOperator tfliteOpType); TypeId GetTfliteDataType(const tflite::TensorType &tflite_data_type); -STATUS getPaddingParam(const std::unique_ptr &tensor, - schema::PadMode pad_mode, - int strideH, int strideW, - int windowH, int windowW, - std::vector *params); - -void Split(const std::string &src_str, - std::vector *dst_str, - const std::string &chr); +STATUS getPaddingParam(const std::unique_ptr &tensor, schema::PadMode pad_mode, int strideH, + int strideW, int windowH, int windowW, std::vector *params); + +void Split(const std::string &src_str, std::vector *dst_str, const std::string &chr); } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_UTIL_H - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.cc index cdb36c92cf..0e0bbd0f9e 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_where_parser.h" #include @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteWhereParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteWhereParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -55,11 +53,11 @@ STATUS TfliteWhereParser::Parse(const std::unique_ptr &tflite op->primitive->value.value = attr.release(); for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[i], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); } - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.h index 583b8dffe6..a98f3401ae 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_WHERE_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_WHERE_PARSER_H @@ -31,11 +31,9 @@ class TfliteWhereParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.cc index 75d34b023c..5f17f68cc7 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.cc @@ -1,19 +1,19 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* distributed under the License is distributed on an AS -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an AS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #include "tools/converter/parser/tflite/tflite_zeros_like_parser.h" #include @@ -25,10 +25,8 @@ namespace lite { STATUS TfliteZerosLikeParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { + schema::CNodeT *op, std::vector *tensors_id, + std::vector *tensors_format, std::map *tensors_id_map) { MS_LOG(DEBUG) << "parse TfliteZerosLikeParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -49,10 +47,10 @@ STATUS TfliteZerosLikeParser::Parse(const std::unique_ptr &tf op->primitive->value.type = schema::PrimitiveType_ZerosLike; op->primitive->value.value = attr.release(); - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); + AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), + tflite_tensors.size(), schema::Format::Format_NHWC); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.h index a8cec073fd..74ec9b920a 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_ZEROS_LIKE_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_ZEROS_LIKE_PARSER_H @@ -31,11 +31,9 @@ class TfliteZerosLikeParser : public TfliteNodeParser { STATUS Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; + const std::vector> &tflite_model_buffer, schema::CNodeT *op, + std::vector *tensors_id, std::vector *tensors_format, + std::map *tensors_id_map) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/quantizer/aware_quantizer.cc b/mindspore/lite/tools/converter/quantizer/aware_quantizer.cc index 8743bc1ebe..a19d8b61eb 100644 --- a/mindspore/lite/tools/converter/quantizer/aware_quantizer.cc +++ b/mindspore/lite/tools/converter/quantizer/aware_quantizer.cc @@ -248,7 +248,8 @@ STATUS AwareQuantizer::QuantDetectionPostProcessConstTensor(const schema::MetaGr MS_ASSERT(constTensor != nullptr); const auto *constData = reinterpret_cast(constTensor->data.data()); - if (constTensor->nodeType == schema::NodeType_ValueNode && constTensor->dataType == TypeId::kNumberTypeFloat) { + if (constTensor->nodeType == schema::NodeType::NodeType_ValueNode && + constTensor->dataType == TypeId::kNumberTypeFloat) { size_t constTensorShapeSize = GetShapeSize(*constTensor); std::unique_ptr quantParam = GetTensorQuantParam(constTensor); if (quantParam == nullptr) { diff --git a/mindspore/lite/tools/converter/quantizer/aware_quantizer.h b/mindspore/lite/tools/converter/quantizer/aware_quantizer.h index a9f046a47f..40b92f6ead 100644 --- a/mindspore/lite/tools/converter/quantizer/aware_quantizer.h +++ b/mindspore/lite/tools/converter/quantizer/aware_quantizer.h @@ -34,8 +34,7 @@ struct InputArray { int numBits = 8; TypeId dataType = TypeId::kTypeUnknown; - InputArray(float mean, float stdDev, - TypeId dataType = TypeId::kNumberTypeFloat) { + InputArray(float mean, float stdDev, TypeId dataType = TypeId::kNumberTypeFloat) { this->dataType = dataType; constexpr float qmin = -128; constexpr float qmax = 127; diff --git a/mindspore/lite/tools/converter/quantizer/calc_quant_param.cc b/mindspore/lite/tools/converter/quantizer/calc_quant_param.cc index 90983a5f4d..aea5e6f98e 100644 --- a/mindspore/lite/tools/converter/quantizer/calc_quant_param.cc +++ b/mindspore/lite/tools/converter/quantizer/calc_quant_param.cc @@ -77,7 +77,8 @@ int QuantParamCalcer::Calc(MetaGraphT *graph, const CNodeT &node) { MS_ASSERT(graph->allTensors.size() > node.inputIndex.at(i)); MS_ASSERT(tensor != nullptr); - if (tensor->refCount == schema::NodeType_ValueNode && !IsContain(graph->inputIndex, node.inputIndex.at(i))) { + if (tensor->refCount == schema::NodeType::NodeType_ValueNode && + !IsContain(graph->inputIndex, node.inputIndex.at(i))) { auto status = ComputeConstQuantParam((*tensor), quantParam.get()); if (status != RET_OK) { MS_LOG(WARNING) << "ComputeConstQuantParam failed: " << status; diff --git a/mindspore/lite/tools/converter/quantizer/general_bitpacking.cc b/mindspore/lite/tools/converter/quantizer/general_bitpacking.cc index 4e8897c659..448924129f 100644 --- a/mindspore/lite/tools/converter/quantizer/general_bitpacking.cc +++ b/mindspore/lite/tools/converter/quantizer/general_bitpacking.cc @@ -18,68 +18,67 @@ namespace mindspore { namespace lite { -BitPack::BitPack(const uint8_t& bitnum) {this->bitnum = bitnum;} -void BitPack::UnPackFromUint8ToOrigin(uint8_t& n, std::queue& unpackBitData) { - int bitCount = 0; - while (bitCount < 8) { - bool a = n % 2; - n = n >> 1; - bitCount++; - unpackBitData.push(a); - } +BitPack::BitPack(const uint8_t &bitnum) { this->bitnum = bitnum; } +void BitPack::UnPackFromUint8ToOrigin(uint8_t &n, std::queue &unpackBitData) { + int bitCount = 0; + while (bitCount < 8) { + bool a = n % 2; + n = n >> 1; + bitCount++; + unpackBitData.push(a); + } } -void BitPack::UnPack(uint8_t bitnum, uint8_t& packedData, - std::vector &originData, std::queue& unpackBitData) { - UnPackFromUint8ToOrigin(packedData, unpackBitData); - // std::queue unpackBitTmpData; +void BitPack::UnPack(uint8_t bitnum, uint8_t &packedData, std::vector &originData, + std::queue &unpackBitData) { + UnPackFromUint8ToOrigin(packedData, unpackBitData); + // std::queue unpackBitTmpData; - while (unpackBitData.size() > bitnum) { - uint32_t result = 0; - for (int k = 0; k < bitnum; k++) { - bool bitTmp = unpackBitData.front(); - result = (result << 1) + static_cast(bitTmp); - unpackBitData.pop(); - } - originData.push_back(result); - } -} -void BitPack::PackFromOriginToUint8(std::stack& ans, std::vector& packedDataVec) { + while (unpackBitData.size() > bitnum) { uint32_t result = 0; - for (size_t i = 0; i < 8; i++) { - bool bit_tmp = ans.top(); - result = (result << 1) + static_cast(bit_tmp); - ans.pop(); + for (int k = 0; k < bitnum; k++) { + bool bitTmp = unpackBitData.front(); + result = (result << 1) + static_cast(bitTmp); + unpackBitData.pop(); } - packedDataVec.push_back(result); + originData.push_back(result); + } } -void BitPack::DoBinary(uint8_t& n, std::stack& ans, std::vector& packedDataVec) { - int bitCount = 0; - while (bitCount < bitnum) { - bool a = n / (1 << (unsigned int)(bitnum - bitCount - 1)); - n = n - a * (1 << (unsigned int)(bitnum - bitCount - 1)); - bitCount++; - ans.push(a); - if (ans.size() == 8) { - PackFromOriginToUint8(ans, packedDataVec); - } +void BitPack::PackFromOriginToUint8(std::stack &ans, std::vector &packedDataVec) { + uint32_t result = 0; + for (size_t i = 0; i < 8; i++) { + bool bit_tmp = ans.top(); + result = (result << 1) + static_cast(bit_tmp); + ans.pop(); + } + packedDataVec.push_back(result); +} +void BitPack::DoBinary(uint8_t &n, std::stack &ans, std::vector &packedDataVec) { + int bitCount = 0; + while (bitCount < bitnum) { + bool a = n / (1 << (unsigned int)(bitnum - bitCount - 1)); + n = n - a * (1 << (unsigned int)(bitnum - bitCount - 1)); + bitCount++; + ans.push(a); + if (ans.size() == 8) { + PackFromOriginToUint8(ans, packedDataVec); } + } } -void BitPack::BitPacking(const std::vector& originDataVec, std::vector& packedDataVec) { - std::stack bitDataVec; - for (size_t i = 0; i < originDataVec.size(); i++) { - uint8_t tmp = originDataVec[i]; - DoBinary(tmp, bitDataVec, packedDataVec); - } +void BitPack::BitPacking(const std::vector &originDataVec, std::vector &packedDataVec) { + std::stack bitDataVec; + for (size_t i = 0; i < originDataVec.size(); i++) { + uint8_t tmp = originDataVec[i]; + DoBinary(tmp, bitDataVec, packedDataVec); + } - size_t remainBitData = bitDataVec.size(); - if (8 > remainBitData && remainBitData > 0) { - for (size_t i = 0; i < 8 - remainBitData; i++) { - bitDataVec.push(0); - } - PackFromOriginToUint8(bitDataVec, packedDataVec); + size_t remainBitData = bitDataVec.size(); + if (8 > remainBitData && remainBitData > 0) { + for (size_t i = 0; i < 8 - remainBitData; i++) { + bitDataVec.push(0); } + PackFromOriginToUint8(bitDataVec, packedDataVec); + } } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc b/mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc index 40d6cc6f6b..fc00bf64ff 100644 --- a/mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc +++ b/mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc @@ -27,7 +27,7 @@ #include #include #include "schema/inner/model_generated.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "tools/anf_exporter/anf_exporter.h" #include "tools/converter/quantizer/quantize_util.h" #include "utils/log_adapter.h" @@ -349,6 +349,10 @@ STATUS Calibrator::GenerateInputData(int index, mindspore::tensor::MSTensor *ten size_t size; char *bin_buf = ReadFile(path.c_str(), &size); auto data = tensor->MutableData(); + if (data == nullptr) { + MS_LOG(ERROR) << "Get tensor MutableData return nullptr"; + return RET_ERROR; + } if (size != tensor->Size()) { MS_LOG(ERROR) << "the input data is not consistent with model input, file_size: " << size << " input tensor size: " << tensor->Size(); @@ -359,6 +363,7 @@ STATUS Calibrator::GenerateInputData(int index, mindspore::tensor::MSTensor *ten MS_LOG(ERROR) << "memcpy_s error: " << ret; return RET_ERROR; } + delete[] bin_buf; return RET_OK; } @@ -528,9 +533,8 @@ STATUS PostTrainingQuantizer::DoWeightQuant(AnfNodePtr weight, std::shared_ptr

    fullname_with_scope() << " can not get value"; return RET_ERROR; } - auto status = - QuantFilter(paramValue, primitive_c, QuantType_PostTraining, quant_max, - quant_min, bit_num, perchanel, depthwise); + auto status = QuantFilter(paramValue, primitive_c, QuantType_PostTraining, quant_max, quant_min, bit_num, + perchanel, depthwise); if (status != RET_OK) { MS_LOG(ERROR) << "QuantFilter failed: " << status; return status; @@ -955,7 +959,7 @@ STATUS PostTrainingQuantizer::DoQuantize(FuncGraphPtr funcGraph) { auto model = lite::Model::Import(content, size); Context ctx; - ctx.device_ctx_.type = DT_CPU; + ctx.device_type_ = DT_CPU; ctx.thread_num_ = calibrator_->GetThreadNum(); ctx.cpu_bind_mode_ = MID_CPU; diff --git a/mindspore/lite/tools/converter/quantizer/quant_cast.cc b/mindspore/lite/tools/converter/quantizer/quant_cast.cc index c205452c26..ea90526069 100644 --- a/mindspore/lite/tools/converter/quantizer/quant_cast.cc +++ b/mindspore/lite/tools/converter/quantizer/quant_cast.cc @@ -14,7 +14,6 @@ * limitations under the License. */ - #include "mindspore/lite/tools/converter/quantizer/quant_cast.h" #include #include @@ -54,7 +53,7 @@ STATUS QuantCast::Run(FuncGraphPtr graph) { if (first) { if (curnode_quant_type == schema::QuantType_PostTraining && inputDataDType == kNumberTypeFloat32) { auto value_node = - NewQuantCastValueNode(kNumberTypeFloat32, kNumberTypeInt8, primitive_c->GetInputQuantParams().front()); + NewQuantCastValueNode(kNumberTypeFloat32, kNumberTypeInt8, primitive_c->GetInputQuantParams().front()); std::vector op_inputs = {value_node, cnode->input(1)}; auto quant_cast_cnode = graph->NewCNode(op_inputs); quant_cast_cnode->set_fullname_with_scope(cnode->fullname_with_scope() + "_quant_cast"); @@ -84,10 +83,10 @@ STATUS QuantCast::Run(FuncGraphPtr graph) { ValueNodePtr value_node = nullptr; if (curnode_quant_type == schema::QuantType_PostTraining && input_cnode_quant_type == schema::QuantType_QUANT_NONE) { - value_node = NewQuantCastValueNode(kNumberTypeFloat32, kNumberTypeInt8, - primitive_c->GetInputQuantParams().front()); + value_node = + NewQuantCastValueNode(kNumberTypeFloat32, kNumberTypeInt8, primitive_c->GetInputQuantParams().front()); } else if (curnode_quant_type == schema::QuantType_QUANT_NONE && - input_cnode_quant_type == schema::QuantType_PostTraining) { + input_cnode_quant_type == schema::QuantType_PostTraining) { value_node = NewQuantCastValueNode(kNumberTypeInt8, kNumberTypeFloat32, input_cnode_primitive_c->GetInputQuantParams().front()); } diff --git a/mindspore/lite/tools/converter/quantizer/quantizer.h b/mindspore/lite/tools/converter/quantizer/quantizer.h index 3fe37379b3..0fff3bfa4d 100644 --- a/mindspore/lite/tools/converter/quantizer/quantizer.h +++ b/mindspore/lite/tools/converter/quantizer/quantizer.h @@ -53,7 +53,8 @@ class Quantizer { virtual STATUS DoQuantize(FuncGraphPtr func_graph) = 0; - mindspore::lite::converter::Flags flags; + mindspore::lite::converter::Flags flags; + protected: FuncGraphPtr funcGraph = nullptr; }; diff --git a/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc b/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc index e5895008fd..1fa02f84b7 100644 --- a/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc +++ b/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc @@ -65,8 +65,8 @@ STATUS WeightQuantizer::DoConvQuantize(const std::list &nodes) { bool depthwise = op_type == schema::PrimitiveType_DepthwiseConv2D ? true : false; ParamValueLitePtr param_value = std::static_pointer_cast(param_node->default_param()); - auto status = QuantFilter(param_value, primitive_c, QuantType_WeightQuant, 255, 0, - bitNum, true, depthwise); + auto status = + QuantFilter(param_value, primitive_c, QuantType_WeightQuant, 255, 0, bitNum, true, depthwise); if (status != RET_OK) { MS_LOG(ERROR) << "QuantFilter failed : " << status; return status; @@ -104,9 +104,9 @@ STATUS WeightQuantizer::DoMulQuantize(const std::list &nodes) { param_node = inputNode->cast(); if ((param_node != nullptr) && (param_node->has_default() == true)) { param_value = std::static_pointer_cast(param_node->default_param()); - if ((param_value == nullptr) || (param_value->tensor_size() == 0) - || (param_value->tensor_addr() == nullptr) - || (param_value->tensor_type() != mindspore::kNumberTypeFloat32)) { + if ((param_value == nullptr) || (param_value->tensor_size() == 0) || + (param_value->tensor_addr() == nullptr) || + (param_value->tensor_type() != mindspore::kNumberTypeFloat32)) { param_value = nullptr; continue; } else { @@ -117,7 +117,7 @@ STATUS WeightQuantizer::DoMulQuantize(const std::list &nodes) { } if (param_value == nullptr) { MS_LOG(ERROR) << "No valid input param node !"; - return RET_ERROR;; + return RET_ERROR; } auto primitive_c = GetValueNode>(node->input(0)); @@ -170,4 +170,3 @@ STATUS WeightQuantizer::DoQuantize(FuncGraphPtr funcGraph) { } // namespace quant } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/quantizer/weight_quantizer.h b/mindspore/lite/tools/converter/quantizer/weight_quantizer.h index 0726dd3df1..d91c6b7b88 100644 --- a/mindspore/lite/tools/converter/quantizer/weight_quantizer.h +++ b/mindspore/lite/tools/converter/quantizer/weight_quantizer.h @@ -33,8 +33,8 @@ namespace lite { namespace quant { class WeightQuantizer : public Quantizer { public: - WeightQuantizer(FuncGraphPtr graph, const std::string& weightSize, - const std::string& covWeightChannelThreshold, const std::string& bitNum); + WeightQuantizer(FuncGraphPtr graph, const std::string &weightSize, const std::string &covWeightChannelThreshold, + const std::string &bitNum); ~WeightQuantizer() = default; @@ -50,4 +50,3 @@ class WeightQuantizer : public Quantizer { } // namespace lite } // namespace mindspore #endif - diff --git a/mindspore/lite/tools/optimizer/common/node_pass_extends.cc b/mindspore/lite/tools/optimizer/common/node_pass_extends.cc index d5e73c409d..4aacd49276 100644 --- a/mindspore/lite/tools/optimizer/common/node_pass_extends.cc +++ b/mindspore/lite/tools/optimizer/common/node_pass_extends.cc @@ -69,4 +69,3 @@ bool NodePass::Run(const FuncGraphPtr &func_graph) { } } // namespace opt } // namespace mindspore - diff --git a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc index f6532a09c6..3f37eb7da3 100644 --- a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc @@ -24,10 +24,11 @@ #include "include/context.h" #include "src/populate_parameter.h" #include "src/ops/primitive_c.h" +#include "src/tensor.h" using mindspore::lite::KernelRegistry; using mindspore::lite::PrimitiveC; -using mindspore::lite::tensor::Tensor; +using mindspore::lite::Tensor; namespace mindspore::opt { namespace { std::vector GetCNodeInputTensors(const CNodePtr &CNode) { @@ -40,8 +41,8 @@ std::vector GetCNodeInputTensors(const CNodePtr &CNode) { for (auto input_index : tmp_fb_node->inputIndex) { auto tensorT = tmp_meta_graph->allTensors.at(input_index).get(); auto tensor_shape = tensorT->dims; - auto lite_tensor = - new (std::nothrow) Tensor(TypeId(tensorT->dataType), tensor_shape, tensorT->format, tensorT->nodeType); + auto lite_tensor = new (std::nothrow) + Tensor(TypeId(tensorT->dataType), tensor_shape, tensorT->format, lite::TensorCategory(tensorT->nodeType)); if (lite_tensor == nullptr) { MS_LOG(ERROR) << "lite tensor is nullptr"; return input_tensors; @@ -83,14 +84,14 @@ ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *tensor) { param_value->set_tensor_shape(shape); param_value->set_tensor_type(type_id); param_value->set_format(tensor->GetFormat()); - if (tensor->Data() != nullptr) { + if (tensor->MutableData() != nullptr) { auto size = tensor->ElementsNum(); auto tensor_data = new (std::nothrow) float[size]; if (tensor_data == nullptr) { MS_LOG(ERROR) << "tensor_data is nullptr"; return nullptr; } - auto ret = memcpy_s(tensor_data, size * sizeof(float), tensor->Data(), size * sizeof(float)); + auto ret = memcpy_s(tensor_data, size * sizeof(float), tensor->MutableData(), size * sizeof(float)); if (ret != EOK) { delete[] tensor_data; MS_LOG(ERROR) << "memcpy error: " << ret; @@ -106,7 +107,7 @@ kernel::LiteKernel *GetLiteKernel(std::vector inputs, std::vectordata_type(); - kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, (schema::PrimitiveType) primitive->Type()}; + kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, (schema::PrimitiveType)primitive->Type()}; lite::Context context; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); if (creator != nullptr) { @@ -203,7 +204,7 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An auto inputQuantParams = lite_primitive->GetInputQuantParams(); for (size_t m = 0; m < inputQuantParams.size(); m++) { for (auto inputQuantParam : inputQuantParams[m]) { - lite::tensor::QuantArg quant_arg{}; + lite::QuantArg quant_arg{}; quant_arg.scale = inputQuantParam.scale; quant_arg.zeroPoint = inputQuantParam.zeroPoint; input_tensors[m]->AddQuantParam(quant_arg); @@ -212,7 +213,7 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An auto outputQuantParams = lite_primitive->GetOutputQuantParams(); for (size_t m = 0; m < outputQuantParams.size(); m++) { for (auto outputQuantParam : outputQuantParams[m]) { - lite::tensor::QuantArg quant_arg{}; + lite::QuantArg quant_arg{}; quant_arg.scale = outputQuantParam.scale; quant_arg.zeroPoint = outputQuantParam.zeroPoint; output_tensors[m]->AddQuantParam(quant_arg); @@ -222,7 +223,7 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An // but for the time being, we only transpose the tensor with 0/1/2/3D. // Others should be added in future. for (size_t j = 0; j < input_tensors.size(); ++j) { - input_tensors[j]->SetFormat(schema::Format_NHWC); + input_tensors[j]->SetFormat(schema::Format::Format_NHWC); if (input_tensors[j]->shape().size() == 4) { MS_LOG(INFO) << "init input_tensor format to nhwc"; } @@ -231,7 +232,7 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An auto parameter = kernel::PopulateParameter(lite_primitive.get()); if (parameter == nullptr) { MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " - << schema::EnumNamePrimitiveType((schema::PrimitiveType) (lite_primitive->Type())); + << schema::EnumNamePrimitiveType((schema::PrimitiveType)(lite_primitive->Type())); return nullptr; } auto lite_kernel = GetLiteKernel(input_tensors, output_tensors, parameter, lite_primitive.get()); diff --git a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.h b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.h index 29fde221bf..c7d08d5159 100644 --- a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_SRC_PASS_FUSION_CONSTANT_FOLDING_FUSION_H_ #include "schema/inner/model_generated.h" -#include "src/ir/tensor.h" +#include "src/tensor.h" #include "src/lite_kernel.h" #include "nnacl/op_base.h" #include "backend/optimizer/common/optimizer.h" diff --git a/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.h index ee731f7460..47f78118a7 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.h @@ -31,4 +31,3 @@ class ConvBiasaddFusion : public PatternProcessPass { } // namespace opt } // namespace mindspore #endif // MINDSPORE_LITE_SRC_PASS_FUSION_CONV_BIASADD_FUSION_H_ - diff --git a/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.h b/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.h index b01206ea82..ae93f0ca3f 100644 --- a/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.h @@ -28,7 +28,7 @@ class PoolingActivationFusion : public PatternProcessPass { explicit PoolingAActivationFusion(bool multigraph = true, const std::string &name = "pooling_activation_fusion", schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) - : PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {} + : PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {} ~PoolingAActivationFusion() override = default; const BaseRef DefinePattern() const override; const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; diff --git a/mindspore/lite/tools/time_profile/time_profile.cc b/mindspore/lite/tools/time_profile/time_profile.cc index 5ba53e26f0..f9791bf792 100644 --- a/mindspore/lite/tools/time_profile/time_profile.cc +++ b/mindspore/lite/tools/time_profile/time_profile.cc @@ -114,7 +114,7 @@ int TimeProfile::InitSession() { auto ctx = new lite::Context; ctx->cpu_bind_mode_ = static_cast(_flags->cpu_bind_mode_); - ctx->device_ctx_.type = lite::DT_CPU; + ctx->device_type_ = lite::DT_CPU; ctx->thread_num_ = _flags->num_threads_; ctx->float16_priority = _flags->fp16_priority; session_ = session::LiteSession::CreateSession(ctx); @@ -359,7 +359,7 @@ int TimeProfile::RunTimeProfile() { delete model; return RET_ERROR; } - auto outputs = session_->GetOutputMapByNode(); + auto outputs = session_->GetOutputs(); uint64_t run_end = GetTimeUs(); uint64_t time = run_end - run_begin;