From 7556389aef53ca9d53a9dab5426815f2cc245699 Mon Sep 17 00:00:00 2001 From: hangq Date: Tue, 11 Aug 2020 15:14:30 +0800 Subject: [PATCH] fix for code naming specification --- mindspore/lite/include/context.h | 16 +- mindspore/lite/include/errorcode.h | 5 +- mindspore/lite/include/lite_session.h | 30 +- mindspore/lite/include/model.h | 22 +- mindspore/lite/include/ms_tensor.h | 12 +- mindspore/lite/src/executor.cc | 22 +- mindspore/lite/src/executor.h | 5 +- mindspore/lite/src/kernel_factory.cc | 6 +- mindspore/lite/src/kernel_factory.h | 4 +- mindspore/lite/src/lite_kernel.cc | 26 +- mindspore/lite/src/lite_kernel.h | 105 ++++--- mindspore/lite/src/lite_session.cc | 74 ++--- mindspore/lite/src/lite_session.h | 14 +- mindspore/lite/src/model.cc | 6 +- mindspore/lite/src/model_impl.cc | 182 +++++------ mindspore/lite/src/model_impl.h | 11 +- .../kernel/arm/base/arg_min_max_base.cc | 22 +- .../kernel/arm/base/batch_to_space_base.cc | 4 +- .../runtime/kernel/arm/base/concat_base.cc | 2 +- .../src/runtime/kernel/arm/base/concat_base.h | 2 +- .../kernel/arm/base/convolution_base.cc | 10 +- .../kernel/arm/base/convolution_base.h | 4 +- .../kernel/arm/base/depth_to_space_base.cc | 10 +- .../kernel/arm/base/fullconnection_base.h | 2 +- .../kernel/arm/base/layout_transform.cc | 1 - .../kernel/arm/base/layout_transform.h | 1 - .../src/runtime/kernel/arm/base/matmul_base.h | 2 +- .../src/runtime/kernel/arm/base/matrix.cc | 11 +- .../lite/src/runtime/kernel/arm/base/matrix.h | 1 - .../runtime/kernel/arm/base/pooling_base.cc | 16 +- .../runtime/kernel/arm/base/pooling_base.h | 2 +- .../src/runtime/kernel/arm/base/prelu_base.cc | 11 +- .../src/runtime/kernel/arm/base/prelu_base.h | 4 +- .../src/runtime/kernel/arm/base/prior_box.cc | 18 +- .../src/runtime/kernel/arm/base/prior_box.h | 2 +- .../kernel/arm/base/quant_dtype_cast.cc | 26 +- .../runtime/kernel/arm/base/reshape_base.h | 2 +- .../runtime/kernel/arm/base/resize_base.cc | 16 +- .../runtime/kernel/arm/base/softmax_base.cc | 4 +- .../runtime/kernel/arm/base/softmax_base.h | 2 +- .../src/runtime/kernel/arm/base/split_base.cc | 20 +- .../src/runtime/kernel/arm/base/split_base.h | 8 +- .../runtime/kernel/arm/base/strided_slice.cc | 10 +- .../kernel/arm/fp16/convolution_3x3_fp16.cc | 19 +- .../kernel/arm/fp16/convolution_3x3_fp16.h | 2 +- .../arm/fp16/convolution_depthwise_fp16.cc | 12 +- .../kernel/arm/fp16/convolution_fp16.cc | 18 +- .../arm/fp16/deconvolution_depthwise_fp16.cc | 30 +- .../kernel/arm/fp16/layout_transform_fp16.cc | 30 +- .../src/runtime/kernel/arm/fp32/activation.cc | 6 +- .../lite/src/runtime/kernel/arm/fp32/addn.cc | 24 +- .../src/runtime/kernel/arm/fp32/argminmax.cc | 2 +- .../src/runtime/kernel/arm/fp32/arithmetic.cc | 14 +- .../src/runtime/kernel/arm/fp32/arithmetic.h | 6 +- .../kernel/arm/fp32/arithmetic_self.cc | 6 +- .../runtime/kernel/arm/fp32/batch_to_space.cc | 10 +- .../src/runtime/kernel/arm/fp32/batchnorm.cc | 14 +- .../lite/src/runtime/kernel/arm/fp32/bias.cc | 10 +- .../runtime/kernel/arm/fp32/broadcast_to.cc | 8 +- .../lite/src/runtime/kernel/arm/fp32/cast.cc | 12 +- .../src/runtime/kernel/arm/fp32/concat.cc | 10 +- .../runtime/kernel/arm/fp32/convolution.cc | 18 +- .../kernel/arm/fp32/convolution_1x1.cc | 14 +- .../kernel/arm/fp32/convolution_3x3.cc | 18 +- .../kernel/arm/fp32/convolution_depthwise.cc | 12 +- .../arm/fp32/convolution_depthwise_3x3.cc | 12 +- .../arm/fp32/convolution_slidewindow.cc | 18 +- .../kernel/arm/fp32/convolution_winograd.cc | 18 +- .../lite/src/runtime/kernel/arm/fp32/crop.cc | 12 +- .../runtime/kernel/arm/fp32/deconvolution.cc | 14 +- .../arm/fp32/deconvolution_depthwise.cc | 30 +- .../runtime/kernel/arm/fp32/depth_to_space.cc | 8 +- .../lite/src/runtime/kernel/arm/fp32/elu.cc | 8 +- .../kernel/arm/fp32/embedding_lookup.cc | 22 +- .../src/runtime/kernel/arm/fp32/expandDims.cc | 8 +- .../lite/src/runtime/kernel/arm/fp32/fill.cc | 8 +- .../src/runtime/kernel/arm/fp32/flatten.cc | 8 +- .../src/runtime/kernel/arm/fp32/flatten.h | 2 +- .../runtime/kernel/arm/fp32/fullconnection.cc | 18 +- .../kernel/arm/fp32/fused_batchnorm.cc | 18 +- .../src/runtime/kernel/arm/fp32/gather.cc | 10 +- .../src/runtime/kernel/arm/fp32/gatherNd.cc | 12 +- .../kernel/arm/fp32/local_response_norm.cc | 6 +- .../lite/src/runtime/kernel/arm/fp32/lstm.cc | 24 +- .../lite/src/runtime/kernel/arm/fp32/lstm.h | 2 +- .../src/runtime/kernel/arm/fp32/matmul.cc | 12 +- .../src/runtime/kernel/arm/fp32/nchw2nhwc.cc | 4 +- .../src/runtime/kernel/arm/fp32/nhwc2nchw.cc | 4 +- .../src/runtime/kernel/arm/fp32/one_hot.cc | 26 +- .../lite/src/runtime/kernel/arm/fp32/pad.cc | 18 +- .../src/runtime/kernel/arm/fp32/pooling.cc | 6 +- .../lite/src/runtime/kernel/arm/fp32/power.cc | 13 +- .../lite/src/runtime/kernel/arm/fp32/power.h | 6 +- .../lite/src/runtime/kernel/arm/fp32/prelu.cc | 4 +- .../lite/src/runtime/kernel/arm/fp32/prelu.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/range.cc | 8 +- .../lite/src/runtime/kernel/arm/fp32/rank.cc | 4 +- .../src/runtime/kernel/arm/fp32/reduce.cc | 24 +- .../src/runtime/kernel/arm/fp32/reshape.cc | 6 +- .../src/runtime/kernel/arm/fp32/resize.cc | 8 +- .../src/runtime/kernel/arm/fp32/reverse.cc | 16 +- .../kernel/arm/fp32/reverse_sequence.cc | 18 +- .../runtime/kernel/arm/fp32/roi_pooling.cc | 12 +- .../lite/src/runtime/kernel/arm/fp32/scale.cc | 24 +- .../lite/src/runtime/kernel/arm/fp32/scale.h | 2 +- .../src/runtime/kernel/arm/fp32/scatter_nd.cc | 10 +- .../lite/src/runtime/kernel/arm/fp32/shape.cc | 4 +- .../lite/src/runtime/kernel/arm/fp32/slice.cc | 16 +- .../src/runtime/kernel/arm/fp32/softmax.cc | 6 +- .../runtime/kernel/arm/fp32/space_to_batch.cc | 12 +- .../runtime/kernel/arm/fp32/space_to_depth.cc | 24 +- .../kernel/arm/fp32/sparse_to_dense.cc | 12 +- .../runtime/kernel/arm/fp32/sparse_to_dense.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/split.cc | 8 +- .../src/runtime/kernel/arm/fp32/squeeze.cc | 6 +- .../lite/src/runtime/kernel/arm/fp32/stack.cc | 12 +- .../lite/src/runtime/kernel/arm/fp32/tile.cc | 14 +- .../lite/src/runtime/kernel/arm/fp32/topk.cc | 14 +- .../lite/src/runtime/kernel/arm/fp32/topk.h | 2 +- .../src/runtime/kernel/arm/fp32/transpose.cc | 17 +- .../src/runtime/kernel/arm/fp32/unique.cc | 12 +- .../src/runtime/kernel/arm/fp32/unsqueeze.cc | 8 +- .../src/runtime/kernel/arm/fp32/unstack.cc | 16 +- .../lite/src/runtime/kernel/arm/fp32/where.cc | 8 +- .../lite/src/runtime/kernel/arm/fp32/where.h | 2 +- .../src/runtime/kernel/arm/fp32/zeroslike.cc | 4 +- .../src/runtime/kernel/arm/int8/add_int8.cc | 34 +-- .../runtime/kernel/arm/int8/argminmax_int8.cc | 42 ++- .../runtime/kernel/arm/int8/argminmax_int8.h | 3 +- .../kernel/arm/int8/arithmetic_int8.cc | 40 +-- .../kernel/arm/int8/arithmetic_self_int8.cc | 12 +- .../kernel/arm/int8/arithmetic_self_int8.h | 12 +- .../kernel/arm/int8/batch_to_space_int8.cc | 14 +- .../kernel/arm/int8/batch_to_space_int8.h | 1 + .../runtime/kernel/arm/int8/bias_add_int8.cc | 17 +- .../runtime/kernel/arm/int8/concat_int8.cc | 18 +- .../kernel/arm/int8/convolution_3x3_int8.cc | 16 +- .../arm/int8/convolution_depthwise_int8.cc | 12 +- .../kernel/arm/int8/convolution_int8.cc | 26 +- .../src/runtime/kernel/arm/int8/crop_int8.cc | 12 +- .../src/runtime/kernel/arm/int8/crop_int8.h | 4 +- .../arm/int8/deconvolution_depthwise_int8.cc | 28 +- .../kernel/arm/int8/deconvolution_int8.cc | 14 +- .../kernel/arm/int8/depth_to_space_int8.cc | 16 +- .../kernel/arm/int8/depth_to_space_int8.h | 1 + .../kernel/arm/int8/fullconnection_int8.cc | 24 +- .../runtime/kernel/arm/int8/hswish_int8.cc | 10 +- .../runtime/kernel/arm/int8/matmul_int8.cc | 18 +- .../src/runtime/kernel/arm/int8/mul_int8.cc | 32 +- .../src/runtime/kernel/arm/int8/pad_int8.cc | 16 +- .../src/runtime/kernel/arm/int8/pad_int8.h | 4 +- .../runtime/kernel/arm/int8/pooling_int8.cc | 6 +- .../src/runtime/kernel/arm/int8/prelu_int8.cc | 12 +- .../src/runtime/kernel/arm/int8/prelu_int8.h | 2 +- .../src/runtime/kernel/arm/int8/relux_int8.cc | 14 +- .../runtime/kernel/arm/int8/reshape_int8.cc | 18 +- .../runtime/kernel/arm/int8/resize_int8.cc | 19 +- .../runtime/kernel/arm/int8/sigmoid_int8.cc | 14 +- .../runtime/kernel/arm/int8/softmax_int8.cc | 14 +- .../src/runtime/kernel/arm/int8/split_int8.cc | 10 +- .../runtime/kernel/arm/int8/squeeze_int8.cc | 32 +- .../src/runtime/kernel/arm/int8/topk_int8.cc | 14 +- .../src/runtime/kernel/arm/int8/topk_int8.h | 2 +- .../runtime/kernel/arm/int8/unsqueeze_int8.cc | 16 +- .../runtime/kernel/arm/int8/unsqueeze_int8.h | 4 +- .../kernel/opencl/kernel/arithmetic.cc | 55 ++-- .../runtime/kernel/opencl/kernel/concat.cc | 61 ++-- .../kernel/opencl/kernel/conv2d_transpose.cc | 29 +- .../kernel/opencl/kernel/convolution.cc | 42 +-- .../kernel/opencl/kernel/depthwise_conv2d.cc | 62 ++-- .../runtime/kernel/opencl/kernel/matmul.cc | 21 +- .../runtime/kernel/opencl/kernel/pooling2d.cc | 26 +- .../runtime/kernel/opencl/kernel/softmax.cc | 15 +- .../runtime/kernel/opencl/kernel/transpose.cc | 18 +- .../runtime/kernel/opencl/kernel/transpose.h | 2 +- .../src/runtime/opencl/opencl_executor.cc | 18 +- mindspore/lite/src/scheduler.cc | 30 +- mindspore/lite/src/scheduler.h | 13 +- mindspore/lite/test/common/common_test.cc | 8 +- mindspore/lite/test/common/common_test.h | 2 +- mindspore/lite/test/st/benchmark_test.cc | 2 +- mindspore/lite/test/st/converter_test.cc | 2 +- .../test/ut/src/dataset/de_tensor_test.cc | 2 +- .../lite/test/ut/src/dataset/eager_test.cc | 2 +- mindspore/lite/test/ut/src/graph_test.cc | 2 +- mindspore/lite/test/ut/src/infer_test.cc | 2 +- .../runtime/kernel/arm/common/pack_tests.cc | 2 +- .../kernel/arm/common/strided_slice_tests.cc | 2 +- .../kernel/arm/fp16/convolution_fp16_tests.cc | 2 +- .../kernel/arm/fp32/activation_fp32_test.cc | 2 +- .../kernel/arm/fp32/argminmax_fp32_test.cc | 2 +- .../arm/fp32/batch_to_space_fp32_test.cc | 2 +- .../kernel/arm/fp32/batchnorm_fp32_tests.cc | 2 +- .../kernel/arm/fp32/conv1x1_fp32_tests.cc | 2 +- .../fp32/convolution_depthwise_fp32_tests.cc | 2 +- .../runtime/kernel/arm/fp32/crop_fp32_test.cc | 2 +- .../arm/fp32/deconvolution_fp32_tests.cc | 2 +- .../arm/fp32/depth_to_space_fp32_test.cc | 2 +- .../runtime/kernel/arm/fp32/elu_fp32_test.cc | 2 +- .../arm/fp32/embedding_lookup_fp32_test.cc | 2 +- .../arm/fp32/fullconnection_fp32_tests.cc | 2 +- .../kernel/arm/fp32/lstm_fp32_tests.cc | 4 +- .../kernel/arm/fp32/matmul_fp32_tests.cc | 2 +- .../kernel/arm/fp32/power_fp32_tests.cc | 2 +- .../kernel/arm/fp32/reduce_fp32_tests.cc | 2 +- .../arm/fp32/resize_bilinear_fp32_tests.cc | 2 +- .../resize_nearest_neighbor_fp32_tests.cc | 2 +- .../kernel/arm/fp32/roi_pooling_fp32_tests.cc | 2 +- .../arm/fp32/space_to_batch_fp32_tests.cc | 2 +- .../arm/fp32/space_to_depth_fp32_tests.cc | 2 +- .../kernel/arm/fp32/strassen_fp32_tests.cc | 2 +- .../kernel/arm/fp32/topk_fp32_tests.cc | 2 +- .../fp32_grad/activation_grad_fp32_tests.cc | 2 +- .../fp32_grad/arithmetic_grad_fp32_tests.cc | 2 +- .../arm/fp32_grad/bias_grad_fp32_tests.cc | 2 +- .../fp32_grad/convolution_grad_fp32_tests.cc | 2 +- .../arm/fp32_grad/pooling_grad_fp32_tests.cc | 2 +- .../softmax_crossentropy_fp32_tests.cc | 2 +- .../runtime/kernel/arm/int8/add_int8_tests.cc | 2 +- .../arm/int8/arithmetic_self_int8_tests.cc | 2 +- .../kernel/arm/int8/concat_int8_tests.cc | 2 +- .../kernel/arm/int8/crop_int8_tests.cc | 2 +- .../kernel/arm/int8/deconv_int8_tests.cc | 2 +- .../arm/int8/fullconnection_int8_tests.cc | 2 +- .../kernel/arm/int8/hswish_int8_tests.cc | 2 +- .../kernel/arm/int8/matmul_int8_tests.cc | 2 +- .../runtime/kernel/arm/int8/mul_int8_tests.cc | 2 +- .../runtime/kernel/arm/int8/pad_int8_tests.cc | 2 +- .../kernel/arm/int8/prelu_int8_tests.cc | 2 +- .../kernel/arm/int8/quant_dtype_cast_tests.cc | 2 +- .../kernel/arm/int8/relux_int8_tests.cc | 2 +- .../kernel/arm/int8/reshape_int8_tests.cc | 2 +- .../arm/int8/resize_bilinear_int8_tests.cc | 2 +- .../resize_nearest_neighbor_int8_tests.cc | 2 +- .../kernel/arm/int8/sigmoid_int8_tests.cc | 2 +- .../kernel/arm/int8/softmax_int8_tests.cc | 2 +- .../kernel/arm/int8/split_int8_tests.cc | 2 +- .../kernel/arm/int8/squeeze_int8_tests.cc | 2 +- .../kernel/arm/int8/topk_int8_tests.cc | 2 +- .../kernel/arm/int8/unsqueeze_int8_tests.cc | 2 +- .../src/runtime/kernel/common_utils_test.cc | 2 +- .../runtime/kernel/opencl/arithmetic_tests.cc | 2 +- .../kernel/opencl/avg_pooling_tests.cc | 2 +- .../src/runtime/kernel/opencl/concat_tests.cc | 2 +- .../kernel/opencl/conv2d_transpose_tests.cc | 2 +- .../kernel/opencl/convolution_tests.cc | 2 +- .../kernel/opencl/depthwise_conv2d_tests.cc | 8 +- .../src/runtime/kernel/opencl/matmul_tests.cc | 2 +- .../kernel/opencl/max_pooling_tests.cc | 2 +- .../kernel/opencl/opencl_kernel_tests.h | 2 +- .../runtime/kernel/opencl/softmax_tests.cc | 2 +- .../runtime/kernel/opencl/transpose_tests.cc | 2 +- mindspore/lite/test/ut/src/train_test.cc | 287 ------------------ .../parser/tflite/tflite_parsers_test_utils.h | 2 +- .../fusion/conv_activation_fusion_test.cc | 2 +- .../fusion/conv_biasadd_fusion_test.cc | 2 +- .../optimizer/fusion/conv_bn_fusion_test.cc | 2 +- .../fusion/conv_scale_fusion_test.cc | 2 +- .../lite/tools/converter/converter_flags.cc | 73 +---- .../lite/tools/converter/converter_flags.h | 4 - 260 files changed, 1447 insertions(+), 1816 deletions(-) delete mode 100644 mindspore/lite/test/ut/src/train_test.cc diff --git a/mindspore/lite/include/context.h b/mindspore/lite/include/context.h index 2e5dd1944a..97fc8835ff 100644 --- a/mindspore/lite/include/context.h +++ b/mindspore/lite/include/context.h @@ -22,41 +22,41 @@ #include "include/ms_tensor.h" namespace mindspore::lite { -/// \brief Allocator defined by MindSpore Lite. +/// \brief Allocator defined a memory pool for malloc memory and free memory dynamically. /// /// \note List public class and interface for reference. class Allocator; -/// \brief CpuBindMode defined by MindSpore Lite. +/// \brief CpuBindMode defined for holding bind cpu strategy argument. enum CpuBindMode { - MID_CPU = -1, /**< bind mid cpu first */ + MID_CPU = -1, /**< bind middle cpu first */ HIGHER_CPU = 1, /**< bind higher cpu first */ NO_BIND = 0 /**< no bind */ }; -/// \brief DeviceType defined by MindSpore Lite. +/// \brief DeviceType defined for holding user's preferred backend. typedef enum { DT_CPU, /**< CPU device type */ DT_GPU, /**< GPU device type */ DT_NPU /**< NPU device type */ } DeviceType; -/// \brief DeviceContext defined by MindSpore Lite. +/// \brief DeviceContext defined for holding DeviceType. typedef struct { DeviceType type; /**< device type */ } DeviceContext; -/// \brief Context defined by MindSpore Lite +/// \brief Context defined for holding some environment for runtime. class MS_API Context { public: - /// \brief Constructor of MindSpore Lite context using default value for parameters. + /// \brief Constructor of MindSpore Lite Context using default value for parameters. /// /// \return Instance of MindSpore Lite Context. Context(); /// \brief Constructor of MindSpore Lite Context using input value for parameters. /// - /// \param[in] thread_num Define the threadNum during the runtime. + /// \param[in] thread_num Define the work thread number during the runtime. /// \param[in] allocator Define the allocator for malloc. /// \param[in] device_ctx Define device information during the runtime. Context(int thread_num, std::shared_ptr allocator, DeviceContext device_ctx); diff --git a/mindspore/lite/include/errorcode.h b/mindspore/lite/include/errorcode.h index c7c8224e07..ffcfba51f2 100644 --- a/mindspore/lite/include/errorcode.h +++ b/mindspore/lite/include/errorcode.h @@ -19,6 +19,7 @@ namespace mindspore { namespace lite { +/// \brief STATUS defined error code in MindSpore Lite. using STATUS = int; /* Success */ @@ -33,8 +34,8 @@ constexpr int RET_SUCCESS_EXIT = -5; /**< No error but exit. */ constexpr int RET_MEMORY_FAILED = -6; /**< Create memory failed. */ /* Executor error code, range: [-101,-200] */ -constexpr int RET_OUT_OF_TENSOR_RANGE = -101; /**< Failed to checking range. */ -constexpr int RET_INPUT_TENSOR_ERROR = -102; /**< Failed to checking input tensor. */ +constexpr int RET_OUT_OF_TENSOR_RANGE = -101; /**< Failed to check range. */ +constexpr int RET_INPUT_TENSOR_ERROR = -102; /**< Failed to check input tensor. */ constexpr int RET_REENTRANT_ERROR = -103; /**< Exist executor running. */ /* Graph error code, range: [-201,-300] */ diff --git a/mindspore/lite/include/lite_session.h b/mindspore/lite/include/lite_session.h index 80fec03cf5..f18e84c8f8 100644 --- a/mindspore/lite/include/lite_session.h +++ b/mindspore/lite/include/lite_session.h @@ -27,15 +27,17 @@ namespace mindspore { namespace session { +/// \brief CallBackParam defined input arguments for callBack function. struct CallBackParam { - std::string name_callback_param; - std::string type_callback_param; + std::string name_callback_param; /**< node name argument */ + std::string type_callback_param; /**< node type argument */ }; +/// \brief KernelCallBack defined the function pointer for callBack. using KernelCallBack = std::function inputs, std::vector outputs, const CallBackParam &opInfo)>; -/// \brief LiteSession defined by MindSpore Lite. +/// \brief LiteSession defined session in MindSpore Lite for compiling Model and forwarding model. class MS_API LiteSession { public: /// \brief Static method to create a LiteSession pointer. @@ -48,52 +50,52 @@ class MS_API LiteSession { /// \brief Destructor of MindSpore Lite LiteSession. virtual ~LiteSession() = default; - /// \brief Try to bind or unbind threads in the thread pool to specified cpu core. + /// \brief Try to bind or unbind threads in the thread pool to the specified cpu core. /// - /// \param[in] if_bind Define weather to bind or unbind threads. + /// \param[in] if_bind Define whether to bind or unbind threads. virtual void BindThread(bool if_bind) = 0; - /// \brief Compile MindSpore lite model. + /// \brief Compile MindSpore Lite model. /// /// \note CompileGraph should called before RunGraph. /// /// \param[in] model Define the model to be compiled. /// - /// \return ErrorCode of compile graph. + /// \return STATUS as an error code of compiling graph, STATUS is defined in errorcode.h. virtual int CompileGraph(lite::Model *model) = 0; /// \brief Get input MindSpore Lite MSTensors of model. /// - /// \return A vector of MindSpore Lite MSTensor. + /// \return The vector of MindSpore Lite MSTensor. virtual std::vector GetInputs() const = 0; /// \brief Get input MindSpore Lite MSTensors of model by node name. /// /// \param[in] node_name Define node name. /// - /// \return A vector of MindSpore Lite MSTensor. + /// \return The vector of MindSpore Lite MSTensor. virtual std::vector GetInputsByName(const std::string &node_name) const = 0; /// \brief Run session with callback. /// - /// \param[in] before Define a call_back_function called before running each node - /// \param[in] after Define a call_back_function called after running each node + /// \param[in] before Define a call_back_function called before running each node. + /// \param[in] after Define a call_back_function called after running each node. /// /// \note RunGraph should called after CompileGraph. /// - /// \return ErrorCode of run graph. + /// \return STATUS as an error code of running graph, STATUS is defined in errorcode.h. virtual int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) = 0; /// \brief Get output MindSpore Lite MSTensors of model. /// - /// \return A map of output node name and MindSpore Lite MSTensor. + /// \return The map of output node name and MindSpore Lite MSTensor. virtual std::unordered_map> GetOutputs() const = 0; /// \brief Get output MindSpore Lite MSTensors of model by node name. /// /// \param[in] node_name Define node name. /// - /// \return A vector of MindSpore Lite MSTensor. + /// \return The vector of MindSpore Lite MSTensor. virtual std::vector GetOutputsByName(const std::string &node_name) const = 0; }; } // namespace session diff --git a/mindspore/lite/include/model.h b/mindspore/lite/include/model.h index 5813caeca8..74b5b7d442 100644 --- a/mindspore/lite/include/model.h +++ b/mindspore/lite/include/model.h @@ -25,24 +25,24 @@ namespace mindspore { #define MS_API __attribute__((visibility("default"))) -/// \brief ModelImpl defined by MindSpore Lite. +/// \brief ModelImpl defined the implement class of Model in MindSpore Lite. /// /// \note List public class and interface for reference. class ModelImpl; namespace lite { -/// \brief Primitive defined by MindSpore Lite. +/// \brief Primitive defined as prototype of operator. /// /// \note List public class and interface for reference. class Primitive; -/// \brief Model defined by MindSpore Lite. +/// \brief Model defined model in MindSpore Lite for managing graph. class MS_API Model { public: /// \brief Static method to create a Model pointer. /// /// \param[in] model_buf Define the buffer read from a model file. - /// \param[in] size Define bytes numbers of model buffer. + /// \param[in] size Define bytes number of model buffer. /// /// \return Pointer of MindSpore Lite Model. static Model *Import(const char *model_buf, size_t size); @@ -59,17 +59,17 @@ class MS_API Model { /// /// \param[in] name Define name of primitive to be returned. /// - /// \return A pointer of MindSpore Lite Primitive. + /// \return the pointer of MindSpore Lite Primitive. lite::Primitive *GetOp(const std::string &name) const; - /// \brief Get MindSpore Lite MetaGraph. + /// \brief Get graph defined in flatbuffers. /// - /// \return A pointer of MindSpore Lite MetaGraph. + /// \return the pointer of graph defined in flatbuffers. const schema::MetaGraph *GetMetaGraph() const; /// \brief Get MindSpore Lite ModelImpl. /// - /// \return A pointer of MindSpore Lite ModelImpl. + /// \return the pointer of MindSpore Lite ModelImpl. ModelImpl *model_impl(); /// \brief Free MetaGraph in MindSpore Lite Model. @@ -84,7 +84,7 @@ class MS_API ModelBuilder { public: /// \brief OutEdge defined by MindSpore Lite. struct OutEdge { - std::string nodeId; /**< Id of a node linked by this edge */ + std::string nodeId; /**< ID of a node linked by this edge */ size_t outEdgeIndex; /**< Index of this edge */ }; @@ -101,12 +101,12 @@ class MS_API ModelBuilder { /// \param[in] op Define the primitive to be added. /// \param[in] inputs Define input edge of primitive to be added. /// - /// \return Id of the primitive added. + /// \return ID of the primitive added. virtual std::string AddOp(const lite::Primitive &op, const std::vector &inputs) = 0; /// \brief Finish constructing the model. /// - /// \return A pointer of MindSpore Lite Model. + /// \return the pointer of MindSpore Lite Model. virtual Model *Construct(); }; } // namespace lite diff --git a/mindspore/lite/include/ms_tensor.h b/mindspore/lite/include/ms_tensor.h index 911d4faa1a..5951ab83a4 100644 --- a/mindspore/lite/include/ms_tensor.h +++ b/mindspore/lite/include/ms_tensor.h @@ -25,7 +25,7 @@ namespace mindspore { #define MS_API __attribute__((visibility("default"))) namespace tensor { -/// \brief MSTensor defined by MindSpore Lite. +/// \brief MSTensor defined tensor in MindSpore Lite. class MS_API MSTensor { public: /// \brief Constructor of MindSpore Lite MSTensor. @@ -41,7 +41,7 @@ class MS_API MSTensor { /// \note TypeId is defined in mindspore/mindspore/core/ir/dtype/type_id.h. Only number types in TypeId enum is /// suitable for MSTensor. /// - /// \return A pointer of MSTensor. + /// \return the pointer of MSTensor. static MSTensor *CreateTensor(TypeId data_type, const std::vector &shape); /// \brief Destructor of MindSpore Lite Model. @@ -69,7 +69,7 @@ class MS_API MSTensor { /// \brief Set shape for the MindSpore Lite MSTensor. /// - /// \param[in] shape Define A vector of int as shape to be set into the MindSpore Lite MSTensor. + /// \param[in] shape Define a vector of int as shape to be set into the MindSpore Lite MSTensor. /// /// \return size of shape of the MindSpore Lite MSTensor after set. virtual size_t set_shape(const std::vector &shape) = 0; @@ -96,15 +96,13 @@ class MS_API MSTensor { /// \return Byte size of data in MSTensor. virtual size_t Size() const = 0; - /// \brief Get pointer of data in MSTensor. + /// \brief Get the pointer of data in MSTensor. /// /// \note The data pointer can be used to both write or read data in MSTensor. /// - /// \return A pointer points to data in MSTensor. + /// \return the pointer points to data in MSTensor. virtual void *MutableData() const = 0; }; - -using MultiTensor = std::vector>>; } // namespace tensor } // namespace mindspore #endif // MINDSPORE_INCLUDE_MS_TENSOR_H_ diff --git a/mindspore/lite/src/executor.cc b/mindspore/lite/src/executor.cc index 01b02f4b01..3dc546a74a 100644 --- a/mindspore/lite/src/executor.cc +++ b/mindspore/lite/src/executor.cc @@ -20,11 +20,11 @@ #include "src/common/ms_tensor_utils.h" namespace mindspore::lite { -int Executor::Run(std::vector &inputs, std::vector &outputs, +int Executor::Run(std::vector &in_tensors, std::vector &out_tensors, std::vector &kernels, Allocator *allocator, const session::KernelCallBack &before, const session::KernelCallBack &after) { MS_ASSERT(nullptr != allocator); - for (auto &inTensor : inputs) { + for (auto &inTensor : in_tensors) { if (inTensor == nullptr) { MS_LOG(ERROR) << "Graph input tensor is nullptr"; return RET_ERROR; @@ -39,31 +39,31 @@ int Executor::Run(std::vector &inputs, std::vectorGetInputs()), PackToMSTensors(kernel->GetOutputs()), - {kernel->Name(), kernel->type_str()})) { - MS_LOG(ERROR) << "run kernel before_callback failed, name: " << kernel->Name(); + if (!before(PackToMSTensors(kernel->in_tensors()), PackToMSTensors(kernel->out_tensors()), + {kernel->name(), kernel->type_str()})) { + MS_LOG(ERROR) << "run kernel before_callback failed, name: " << kernel->name(); } } auto ret = kernel->Run(); if (0 != ret) { - MS_LOG(ERROR) << "run kernel failed, name: " << kernel->Name(); + MS_LOG(ERROR) << "run kernel failed, name: " << kernel->name(); return ret; } if (after != nullptr) { - if (!after(PackToMSTensors(kernel->GetInputs()), PackToMSTensors(kernel->GetOutputs()), - {kernel->Name(), kernel->type_str()})) { - MS_LOG(ERROR) << "run kernel after_callback failed, name: " << kernel->Name(); + if (!after(PackToMSTensors(kernel->in_tensors()), PackToMSTensors(kernel->out_tensors()), + {kernel->name(), kernel->type_str()})) { + MS_LOG(ERROR) << "run kernel after_callback failed, name: " << kernel->name(); } } - for (auto input_kernel : kernel->GetInKernels()) { + for (auto input_kernel : kernel->in_kernels()) { MS_ASSERT(input_kernel != nullptr); if (input_kernel->is_model_output()) { continue; } ret = input_kernel->DecOutTensorRefCount(); if (0 != ret) { - MS_LOG(WARNING) << "DecOutTensorRefCount for kernel" << kernel->Name() << " failed"; + MS_LOG(WARNING) << "DecOutTensorRefCount for kernel" << kernel->name() << " failed"; } } } diff --git a/mindspore/lite/src/executor.h b/mindspore/lite/src/executor.h index 5ee084fa5f..8e0720859a 100644 --- a/mindspore/lite/src/executor.h +++ b/mindspore/lite/src/executor.h @@ -29,7 +29,7 @@ class Executor { int Prepare(std::vector &kernels) { return 0; } - int Run(std::vector &inputs, std::vector &outputs, + int Run(std::vector &in_tensors, std::vector &out_tensors, std::vector &kernels, Allocator *allocator = nullptr, const session::KernelCallBack &before = nullptr, const session::KernelCallBack &after = nullptr); @@ -39,9 +39,6 @@ class Executor { int TransformTensorLayoutUint8(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); int TransformTensorLayout(tensor::Tensor *tensor, schema::Format dst_format, Allocator *allocator = nullptr); - - protected: - Context *context = nullptr; }; } // namespace mindspore::lite diff --git a/mindspore/lite/src/kernel_factory.cc b/mindspore/lite/src/kernel_factory.cc index fea004db99..257ba0fa0b 100644 --- a/mindspore/lite/src/kernel_factory.cc +++ b/mindspore/lite/src/kernel_factory.cc @@ -33,8 +33,8 @@ KernelFactory *KernelFactory::GetInstance() { return &instance; } -LiteKernel *KernelFactory::GetKernel(const std::vector &inputs, - const std::vector &outputs, const lite::Primitive *primitive, +LiteKernel *KernelFactory::GetKernel(const std::vector &in_tensors, + const std::vector &out_tensors, const lite::Primitive *primitive, const Context *ctx, const kernel::KernelKey &key) { MS_EXCEPTION_IF_NULL(primitive); MS_EXCEPTION_IF_NULL(ctx); @@ -45,7 +45,7 @@ LiteKernel *KernelFactory::GetKernel(const std::vector &inputs } auto creator = KernelRegistry::GetInstance()->GetCreator(key); if (creator != nullptr) { - auto kernel = creator(inputs, outputs, parameter, ctx, key, primitive); + auto kernel = creator(in_tensors, out_tensors, parameter, ctx, key, primitive); return kernel; } return nullptr; diff --git a/mindspore/lite/src/kernel_factory.h b/mindspore/lite/src/kernel_factory.h index 2065e208d0..086bdf7b46 100644 --- a/mindspore/lite/src/kernel_factory.h +++ b/mindspore/lite/src/kernel_factory.h @@ -31,8 +31,8 @@ class KernelFactory { virtual ~KernelFactory(); static KernelFactory *GetInstance(); - kernel::LiteKernel *GetKernel(const std::vector &inputs, - const std::vector &outputs, const lite::Primitive *primitive, + kernel::LiteKernel *GetKernel(const std::vector &in_tensors, + const std::vector &out_tensors, const lite::Primitive *primitive, const Context *ctx, const kernel::KernelKey &key); }; } // namespace mindspore::lite diff --git a/mindspore/lite/src/lite_kernel.cc b/mindspore/lite/src/lite_kernel.cc index 0119f1029f..fa298575a6 100644 --- a/mindspore/lite/src/lite_kernel.cc +++ b/mindspore/lite/src/lite_kernel.cc @@ -20,13 +20,13 @@ namespace mindspore::kernel { void LiteKernel::InitOutTensorRefCount() { - for (auto *tensor : this->outputs_) { - tensor->SetRefCount(this->out_kernel_.size()); + for (auto *tensor : this->out_tensors_) { + tensor->SetRefCount(this->out_kernels_.size()); } } int LiteKernel::DecOutTensorRefCount() { - for (auto *tensor : this->outputs_) { + for (auto *tensor : this->out_tensors_) { tensor->decRefCount(); if (0 >= tensor->RefCount()) { auto ret = tensor->FreeData(); @@ -43,7 +43,7 @@ std::vector LiteKernelUtil::SubgraphInputKernels( const std::vector &kernels) { std::vector input_kernels; for (const auto kernel : kernels) { - for (auto input : kernel->GetInKernels()) { + for (auto input : kernel->in_kernels()) { auto iter = std::find(kernels.begin(), kernels.end(), input); if (iter == kernels.end()) { input_kernels.emplace_back(input); @@ -57,7 +57,7 @@ std::vector LiteKernelUtil::SubgraphOutputKernels( const std::vector &kernels) { std::vector output_kernels; for (const auto kernel : kernels) { - for (const auto output : kernel->GetOutKernels()) { + for (const auto output : kernel->out_kernels()) { auto iter = std::find(kernels.begin(), kernels.end(), output); if (iter == kernels.end()) { output_kernels.emplace_back(output); @@ -72,11 +72,11 @@ std::vector LiteKernelUtil::SubgraphInputTensors( std::vector input_tensors; std::vector all_output_tensors; for (const auto &kernel : kernels) { - all_output_tensors.insert(all_output_tensors.end(), kernel->GetOutputs().begin(), kernel->GetOutputs().end()); + all_output_tensors.insert(all_output_tensors.end(), kernel->out_tensors().begin(), kernel->out_tensors().end()); } std::vector input_kernels = SubgraphInputKernels(kernels); for (const auto &kernel : input_kernels) { - for (const auto &tensor : kernel->GetInputs()) { + for (const auto &tensor : kernel->in_tensors()) { auto iter = std::find(all_output_tensors.begin(), all_output_tensors.end(), tensor); if (iter == all_output_tensors.end() && tensor->Data() == nullptr) { input_tensors.emplace_back(tensor); @@ -91,11 +91,11 @@ std::vector LiteKernelUtil::SubgraphOutputTensors( std::vector output_tensors; std::vector all_input_tensors; for (const auto &kernel : kernels) { - all_input_tensors.insert(all_input_tensors.end(), kernel->GetInputs().begin(), kernel->GetInputs().end()); + all_input_tensors.insert(all_input_tensors.end(), kernel->in_tensors().begin(), kernel->in_tensors().end()); } std::vector output_kernels = SubgraphOutputKernels(kernels); for (const auto &kernel : output_kernels) { - for (const auto &tensor : kernel->GetOutputs()) { + for (const auto &tensor : kernel->out_tensors()) { auto iter = std::find(all_input_tensors.begin(), all_input_tensors.end(), tensor); if (iter == all_input_tensors.end()) { output_tensors.emplace_back(tensor); @@ -111,13 +111,13 @@ void LiteKernelUtil::TopologicalSortKernels(std::vector &k if (search_kernel == kernel) { continue; } - for (auto *tensor : kernel->GetInputs()) { - if (lite::IsContain(search_kernel->GetOutputs(), tensor)) { + for (auto *tensor : kernel->in_tensors()) { + if (lite::IsContain(search_kernel->out_tensors(), tensor)) { kernel->AddInKernel(search_kernel); } } - for (auto *tensor : kernel->GetOutputs()) { - if (lite::IsContain(search_kernel->GetInputs(), tensor)) { + for (auto *tensor : kernel->out_tensors()) { + if (lite::IsContain(search_kernel->in_tensors(), tensor)) { kernel->AddOutKernel(search_kernel); } } diff --git a/mindspore/lite/src/lite_kernel.h b/mindspore/lite/src/lite_kernel.h index 4f715d4651..d278b27351 100644 --- a/mindspore/lite/src/lite_kernel.h +++ b/mindspore/lite/src/lite_kernel.h @@ -57,102 +57,109 @@ struct KernelKey { class LiteKernel { public: LiteKernel() = default; - explicit LiteKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + explicit LiteKernel(OpParameter *parameter, const std::vector &in_tensors, + const std::vector &out_tensors, const lite::Context *ctx, const lite::Primitive *primitive) - : opParameter(parameter), inputs_(inputs), outputs_(outputs), primitive_(primitive), context_(ctx) { - if (opParameter && ctx) { - opParameter->thread_num_ = ctx->thread_num_; + : op_parameter_(parameter), + in_tensors_(in_tensors), + out_tensors_(out_tensors), + primitive_(primitive), + context_(ctx) { + if (op_parameter_ && ctx) { + op_parameter_->thread_num_ = ctx->thread_num_; } - this->in_kernel_.clear(); - this->out_kernel_.clear(); + this->in_kernels_.clear(); + this->out_kernels_.clear(); } - virtual ~LiteKernel() { delete opParameter; } + virtual ~LiteKernel() { delete op_parameter_; } virtual int Prepare() { if (!InferShapeDone()) { - (const_cast(primitive_))->InferShape(inputs_, outputs_); - if (need_reinit) { + (const_cast(primitive_))->InferShape(in_tensors_, out_tensors_); + if (need_reinit_) { Init(); } } - auto &outputs = this->GetOutputs(); + auto &outputs = this->out_tensors(); for (auto *output : outputs) { MS_ASSERT(output != nullptr); output->MallocData(); } return RET_OK; } + virtual int Init() { return -1; } + virtual int ReSize() { return -1; } + virtual int Run() { return -1; } - std::string Name() { return this->name; } - virtual void train() { train_mode = true; } - virtual bool is_train() { return train_mode == true; } - virtual void eval() { train_mode = false; } - virtual bool is_eval() { return train_mode == false; } - void set_name(const std::string &name) { this->name = name; } + std::string name() { return this->name_; } - void set_is_model_output(bool is_model_output) { this->is_model_output_ = is_model_output; } + virtual void train() { train_mode_ = true; } + + virtual bool is_train() { return train_mode_; } + + virtual void eval() { train_mode_ = false; } + + virtual bool is_eval() { return !train_mode_; } - bool is_model_output() { return this->is_model_output_; } + void set_name(const std::string &name) { this->name_ = name; } - schema::PrimitiveType type() { return (schema::PrimitiveType)this->opParameter->type_; } + void set_is_model_output(bool is_model_output) { this->is_model_output_ = is_model_output; } + + bool is_model_output() const { return this->is_model_output_; } - std::string type_str() { - return this->opParameter ? schema::EnumNamePrimitiveType((schema::PrimitiveType)this->opParameter->type_) - : "ERROR:undefined primitive!"; + schema::PrimitiveType Type() { + return (this->op_parameter_ != nullptr) ? schema::PrimitiveType(this->op_parameter_->type_) + : schema::PrimitiveType_NONE; } - void SetInputs(const std::vector &inputs) { this->inputs_ = inputs; } + std::string type_str() { return schema::EnumNamePrimitiveType(this->Type()); } - void SetOutputs(const std::vector &outputs) { this->outputs_ = outputs; } + void set_in_tensors(const std::vector &in_tensors) { this->in_tensors_ = in_tensors; } - std::vector &GetInputs() { return this->inputs_; } + void set_out_tensors(const std::vector &out_tensors) { this->out_tensors_ = out_tensors; } - std::vector &GetOutputs() { return this->outputs_; } + std::vector &in_tensors() { return this->in_tensors_; } - void AddInKernel(LiteKernel *kernel) { this->in_kernel_.emplace_back(kernel); } + std::vector &out_tensors() { return this->out_tensors_; } - void AddOutKernel(LiteKernel *kernel) { this->out_kernel_.emplace_back(kernel); } + void AddInKernel(LiteKernel *kernel) { this->in_kernels_.emplace_back(kernel); } - std::vector &GetInKernels() { return this->in_kernel_; } + void AddOutKernel(LiteKernel *kernel) { this->out_kernels_.emplace_back(kernel); } - std::vector &GetOutKernels() { return this->out_kernel_; } + std::vector &in_kernels() { return this->in_kernels_; } + + std::vector &out_kernels() { return this->out_kernels_; } void InitOutTensorRefCount(); int DecOutTensorRefCount(); - const KernelKey Desc() const { return desc; } + KernelKey desc() const { return desc_; } - void set_desc(const KernelKey kernel_key) { desc = kernel_key; } + void set_desc(const KernelKey kernel_key) { desc_ = kernel_key; } - void SetNeedReInit() { need_reinit = true; } + void set_need_reinit() { need_reinit_ = true; } protected: - bool InferShapeDone() { - if (primitive_ != nullptr && !primitive_->GetInferFlag()) { - return false; - } - return true; - } + bool InferShapeDone() { return !(primitive_ != nullptr && !primitive_->GetInferFlag()) && true; } - KernelKey desc; - std::string name; - OpParameter *opParameter = nullptr; + KernelKey desc_; + std::string name_; + OpParameter *op_parameter_ = nullptr; const lite::Primitive *primitive_ = nullptr; const lite::Context *context_ = nullptr; // tensor will free in ~lite_session() - std::vector inputs_; - std::vector outputs_; - std::vector in_kernel_; - std::vector out_kernel_; - bool train_mode = false; - bool need_reinit = false; + std::vector in_tensors_; + std::vector out_tensors_; + std::vector in_kernels_; + std::vector out_kernels_; + bool train_mode_ = false; + bool need_reinit_ = false; bool is_model_output_ = false; }; diff --git a/mindspore/lite/src/lite_session.cc b/mindspore/lite/src/lite_session.cc index 9107bf6197..9d7a7b7f51 100644 --- a/mindspore/lite/src/lite_session.cc +++ b/mindspore/lite/src/lite_session.cc @@ -74,46 +74,46 @@ int LiteSession::ConvertTensors(const lite::Model *model) { } } - this->tensors.emplace_back(dstTensor); + this->tensors_.emplace_back(dstTensor); } return RET_OK; } void LiteSession::InitGraphInputTensors(const lite::Model *model) { auto meta_graph = model->GetMetaGraph(); - MS_ASSERT(this->inputs.empty()); + MS_ASSERT(this->inputs_.empty()); MS_ASSERT(meta_graph != nullptr); for (size_t i = 0; i < meta_graph->inputIndex()->size(); i++) { auto in_tensor_idx = size_t(meta_graph->inputIndex()->GetAs(i)); - MS_ASSERT(in_tensor_idx < this->tensors.size()); - auto *in_tensor = this->tensors.at(in_tensor_idx); + MS_ASSERT(in_tensor_idx < this->tensors_.size()); + auto *in_tensor = this->tensors_.at(in_tensor_idx); MS_ASSERT(in_tensor != nullptr); - this->inputs.emplace_back(in_tensor); + this->inputs_.emplace_back(in_tensor); } } void LiteSession::InitGraphOutputTensors(const lite::Model *model) { auto meta_graph = model->GetMetaGraph(); - MS_ASSERT(this->outputs.empty()); + MS_ASSERT(this->outputs_.empty()); MS_ASSERT(meta_graph != nullptr); for (size_t i = 0; i < meta_graph->outputIndex()->size(); i++) { auto out_tensor_idx = size_t(meta_graph->outputIndex()->GetAs(i)); - MS_ASSERT(out_tensor_idx < this->tensors.size()); - auto *out_tensor = this->tensors.at(out_tensor_idx); + MS_ASSERT(out_tensor_idx < this->tensors_.size()); + auto *out_tensor = this->tensors_.at(out_tensor_idx); MS_ASSERT(out_tensor != nullptr); - this->outputs.emplace_back(out_tensor); + this->outputs_.emplace_back(out_tensor); } } void LiteSession::InitGraphInputMap(const lite::Model *model) { auto meta_graph = model->GetMetaGraph(); - MS_ASSERT(this->input_map.empty()); + MS_ASSERT(this->input_map_.empty()); MS_ASSERT(meta_graph != nullptr); auto graph_input_node_indexes = GetGraphInputNodes(meta_graph); for (auto in_node_index : graph_input_node_indexes) { auto *in_node = meta_graph->nodes()->GetAs(in_node_index); MS_ASSERT(nullptr != in_node); - MS_ASSERT(this->input_map.find(in_node->name()->str()) == this->input_map.end()); + MS_ASSERT(this->input_map_.find(in_node->name()->str()) == this->input_map_.end()); for (size_t i = 0; i < in_node->inputIndex()->size(); i++) { auto in_tensor_index = size_t(in_node->inputIndex()->GetAs(i)); bool is_graph_input = false; @@ -126,25 +126,25 @@ void LiteSession::InitGraphInputMap(const lite::Model *model) { if (!is_graph_input) { continue; } - MS_ASSERT(in_tensor_index < this->tensors.size()); - auto *in_tensor = this->tensors.at(in_tensor_index); + MS_ASSERT(in_tensor_index < this->tensors_.size()); + auto *in_tensor = this->tensors_.at(in_tensor_index); MS_ASSERT(in_tensor != nullptr); auto *ms_tensor = new tensor::LiteTensor(in_tensor); MS_ASSERT(nullptr != ms_tensor); - this->input_map[in_node->name()->str()].emplace_back(ms_tensor); + this->input_map_[in_node->name()->str()].emplace_back(ms_tensor); } } } void LiteSession::InitGraphOutputMap(const lite::Model *model) { auto meta_graph = model->GetMetaGraph(); - MS_ASSERT(this->output_map.empty()); + MS_ASSERT(this->output_map_.empty()); MS_ASSERT(meta_graph != nullptr); auto graph_output_node_indexes = GetGraphOutputNodes(meta_graph); for (auto out_node_index : graph_output_node_indexes) { auto *out_node = meta_graph->nodes()->GetAs(out_node_index); MS_ASSERT(nullptr != out_node); - MS_ASSERT(this->output_map.find(out_node->name()->str()) == this->output_map.end()); + MS_ASSERT(this->output_map_.find(out_node->name()->str()) == this->output_map_.end()); for (size_t i = 0; i < out_node->outputIndex()->size(); i++) { auto out_tensor_index = size_t(out_node->outputIndex()->GetAs(i)); bool is_graph_output = false; @@ -157,12 +157,12 @@ void LiteSession::InitGraphOutputMap(const lite::Model *model) { if (!is_graph_output) { continue; } - MS_ASSERT(out_tensor_index < this->tensors.size()); - auto *out_tensor = this->tensors.at(out_tensor_index); + MS_ASSERT(out_tensor_index < this->tensors_.size()); + auto *out_tensor = this->tensors_.at(out_tensor_index); MS_ASSERT(out_tensor != nullptr); auto *ms_tensor = new tensor::LiteTensor(out_tensor); MS_ASSERT(nullptr != ms_tensor); - this->output_map[out_node->name()->str()].emplace_back(ms_tensor); + this->output_map_[out_node->name()->str()].emplace_back(ms_tensor); } } } @@ -191,7 +191,7 @@ int LiteSession::CompileGraph(Model *model) { // scheduler kernels Scheduler scheduler(context_); - ret = scheduler.Schedule(model, &tensors, &kernels); + ret = scheduler.Schedule(model, &tensors_, &kernels_); if (ret != RET_OK) { MS_LOG(ERROR) << "Schedule kernels failed: " << ret; return ret; @@ -202,7 +202,7 @@ int LiteSession::CompileGraph(Model *model) { std::vector LiteSession::GetInputs() const { std::vector ret; - for (auto &iter : this->input_map) { + for (auto &iter : this->input_map_) { auto &node_input_tensors = iter.second; for (auto tensor : node_input_tensors) { if (!IsContain(ret, tensor)) { @@ -219,14 +219,14 @@ int LiteSession::RunGraph(const session::KernelCallBack &before, const session:: context_->running_ = true; Executor executor; if (before == nullptr && after == nullptr) { - return executor.Run(this->inputs, this->outputs, this->kernels, this->context_->allocator.get()); + return executor.Run(this->inputs_, this->outputs_, this->kernels_, this->context_->allocator.get()); } else { - return executor.Run(this->inputs, this->outputs, this->kernels, this->context_->allocator.get(), before, after); + return executor.Run(this->inputs_, this->outputs_, this->kernels_, this->context_->allocator.get(), before, after); } } std::unordered_map> LiteSession::GetOutputs() const { - return this->output_map; + return this->output_map_; } int LiteSession::Init(Context *context) { @@ -252,46 +252,46 @@ int LiteSession::Init(Context *context) { return RET_OK; } -void LiteSession::BindThread(bool ifBind) { +void LiteSession::BindThread(bool if_bind) { if (this->context_->cpu_bind_mode_ != NO_BIND) { - DoAllThreadBind(ifBind, static_cast(this->context_->cpu_bind_mode_)); + DoAllThreadBind(if_bind, static_cast(this->context_->cpu_bind_mode_)); } } LiteSession::~LiteSession() { - for (auto *tensor : tensors) { + for (auto *tensor : tensors_) { // weight data can not be to free, we will free weight data when freeing meta_graph - if (tensor->TensorType() == schema::NodeType_ValueNode && !IsContain(this->inputs, tensor)) { + if (tensor->TensorType() == schema::NodeType_ValueNode && !IsContain(this->inputs_, tensor)) { tensor->SetData(nullptr); } delete tensor; } // tensor::Tensor * in input_map output_map are freed in tensors - for (auto iter : this->input_map) { + for (auto iter : this->input_map_) { for (auto *ms_tensor : iter.second) { ((tensor::LiteTensor *)ms_tensor)->SetTensorImpl(nullptr); delete ms_tensor; } iter.second.clear(); } - input_map.clear(); - for (auto iter : this->output_map) { + input_map_.clear(); + for (auto iter : this->output_map_) { for (auto *ms_tensor : iter.second) { ((tensor::LiteTensor *)ms_tensor)->SetTensorImpl(nullptr); delete ms_tensor; } iter.second.clear(); } - output_map.clear(); - for (auto *kernel : kernels) { + output_map_.clear(); + for (auto *kernel : kernels_) { delete kernel; } delete this->context_; } std::vector LiteSession::GetInputsByName(const std::string &name) const { - auto ret = input_map.find(name); - if (ret == input_map.end()) { + auto ret = input_map_.find(name); + if (ret == input_map_.end()) { MS_LOG(WARNING) << "Node " << name << " is not an input node"; std::vector empty_ret; return empty_ret; @@ -300,8 +300,8 @@ std::vector LiteSession::GetInputsByName(const st } std::vector LiteSession::GetOutputsByName(const std::string &name) const { - auto ret = output_map.find(name); - if (ret == output_map.end()) { + auto ret = output_map_.find(name); + if (ret == output_map_.end()) { MS_LOG(WARNING) << "Node " << name << " is not an output node"; std::vector empty_ret; return empty_ret; diff --git a/mindspore/lite/src/lite_session.h b/mindspore/lite/src/lite_session.h index 05fd11a19e..d66873450d 100644 --- a/mindspore/lite/src/lite_session.h +++ b/mindspore/lite/src/lite_session.h @@ -38,7 +38,7 @@ class LiteSession : public session::LiteSession { int Init(Context *context); - void BindThread(bool ifBind) override; + void BindThread(bool if_bind) override; int CompileGraph(Model *model) override; @@ -68,16 +68,16 @@ class LiteSession : public session::LiteSession { protected: Context *context_ = nullptr; - std::vector kernels; - std::vector tensors; + std::vector kernels_; + std::vector tensors_; // graph input tensors - std::vector inputs; + std::vector inputs_; // graph output tensors - std::vector outputs; + std::vector outputs_; // graph input node name -- input tensors - std::unordered_map> input_map; + std::unordered_map> input_map_; // graph output node name -- output tensors - std::unordered_map> output_map; + std::unordered_map> output_map_; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/model.cc b/mindspore/lite/src/model.cc index a1176216ce..79c27e8761 100644 --- a/mindspore/lite/src/model.cc +++ b/mindspore/lite/src/model.cc @@ -30,9 +30,7 @@ Model *Model::Import(const char *model_buf, size_t size) { return model; } -Model::~Model() { - delete(this->model_impl_); -} +Model::~Model() { delete (this->model_impl_); } lite::Primitive *Model::GetOp(const std::string &name) const { MS_EXCEPTION_IF_NULL(model_impl_); @@ -46,7 +44,7 @@ void Model::FreeMetaGraph() { const schema::MetaGraph *Model::GetMetaGraph() const { MS_EXCEPTION_IF_NULL(model_impl_); - return model_impl_->GetMetaGraph(); + return model_impl_->meta_graph(); } ModelImpl *Model::model_impl() { diff --git a/mindspore/lite/src/model_impl.cc b/mindspore/lite/src/model_impl.cc index 3b2f10c01d..3beab515a6 100644 --- a/mindspore/lite/src/model_impl.cc +++ b/mindspore/lite/src/model_impl.cc @@ -47,8 +47,8 @@ ModelImpl *ModelImpl::Import(const char *model_buf, size_t size) { } lite::Primitive *ModelImpl::GetOp(const std::string &name) const { - auto iter = ops.find(name); - if (iter == ops.end()) { + auto iter = ops_.find(name); + if (iter == ops_.end()) { return nullptr; } else { return iter->second; @@ -57,10 +57,10 @@ lite::Primitive *ModelImpl::GetOp(const std::string &name) const { ModelImpl::~ModelImpl() { delete[](this->model_buf_); - for (auto iter : ops) { + for (auto iter : ops_) { delete (iter.second); } - ops.clear(); + ops_.clear(); } void ModelImpl::FreeMetaGraph() { @@ -68,168 +68,168 @@ void ModelImpl::FreeMetaGraph() { model_buf_ = nullptr; } -const schema::MetaGraph *ModelImpl::GetMetaGraph() const { return this->meta_graph; } +const schema::MetaGraph *ModelImpl::meta_graph() const { return this->meta_graph_; } -lite::Primitive *ModelImpl::CopyPrimitive(const schema::Primitive *srcPrim) { - MS_EXCEPTION_IF_NULL(srcPrim); - auto op_type = srcPrim->value_type(); +lite::Primitive *ModelImpl::CopyPrimitive(const schema::Primitive *src_prim) { + MS_EXCEPTION_IF_NULL(src_prim); + auto op_type = src_prim->value_type(); switch (op_type) { case schema::PrimitiveType_SoftMax: - return new lite::SoftMax(const_cast(srcPrim)); + return new lite::SoftMax(const_cast(src_prim)); case schema::PrimitiveType_Activation: - return new lite::Activation(const_cast(srcPrim)); + return new lite::Activation(const_cast(src_prim)); case schema::PrimitiveType_Conv2D: - return new lite::Conv2D(const_cast(srcPrim)); + return new lite::Conv2D(const_cast(src_prim)); case schema::PrimitiveType_DeConv2D: - return new lite::DeConv2D(const_cast(srcPrim)); + return new lite::DeConv2D(const_cast(src_prim)); case schema::PrimitiveType_Reduce: - return new lite::Reduce(const_cast(srcPrim)); + return new lite::Reduce(const_cast(src_prim)); case schema::PrimitiveType_Pooling: - return new lite::Pooling(const_cast(srcPrim)); + return new lite::Pooling(const_cast(src_prim)); case schema::PrimitiveType_DepthwiseConv2D: - return new lite::DepthwiseConv2D(const_cast(srcPrim)); + return new lite::DepthwiseConv2D(const_cast(src_prim)); case schema::PrimitiveType_FusedBatchNorm: - return new lite::FusedBatchNorm(const_cast(srcPrim)); + return new lite::FusedBatchNorm(const_cast(src_prim)); case schema::PrimitiveType_BatchNorm: - return new lite::BatchNorm(const_cast(srcPrim)); + return new lite::BatchNorm(const_cast(src_prim)); case schema::PrimitiveType_FullConnection: - return new lite::FullConnection(const_cast(srcPrim)); + return new lite::FullConnection(const_cast(src_prim)); case schema::PrimitiveType_Power: - return new lite::Power(const_cast(srcPrim)); + return new lite::Power(const_cast(src_prim)); case schema::PrimitiveType_Range: - return new lite::Range(const_cast(srcPrim)); + return new lite::Range(const_cast(src_prim)); case schema::PrimitiveType_Mul: - return new lite::Mul(const_cast(srcPrim)); + return new lite::Mul(const_cast(src_prim)); case schema::PrimitiveType_Add: - return new lite::Add(const_cast(srcPrim)); + return new lite::Add(const_cast(src_prim)); case schema::PrimitiveType_Sub: - return new lite::Sub(const_cast(srcPrim)); + return new lite::Sub(const_cast(src_prim)); case schema::PrimitiveType_Div: - return new lite::Div(const_cast(srcPrim)); + return new lite::Div(const_cast(src_prim)); case schema::PrimitiveType_BiasAdd: - return new lite::BiasAdd(const_cast(srcPrim)); + return new lite::BiasAdd(const_cast(src_prim)); case schema::PrimitiveType_ExpandDims: - return new lite::ExpandDims(const_cast(srcPrim)); + return new lite::ExpandDims(const_cast(src_prim)); case schema::PrimitiveType_ArgMax: - return new lite::ArgMax(const_cast(srcPrim)); + return new lite::ArgMax(const_cast(src_prim)); case schema::PrimitiveType_ArgMin: - return new lite::ArgMin(const_cast(srcPrim)); + return new lite::ArgMin(const_cast(src_prim)); case schema::PrimitiveType_Cast: - return new lite::Cast(const_cast(srcPrim)); + return new lite::Cast(const_cast(src_prim)); case schema::PrimitiveType_Reshape: - return new lite::Reshape(const_cast(srcPrim)); + return new lite::Reshape(const_cast(src_prim)); case schema::PrimitiveType_Scale: - return new lite::Scale(const_cast(srcPrim)); + return new lite::Scale(const_cast(src_prim)); case schema::PrimitiveType_Eltwise: - return new lite::Eltwise(const_cast(srcPrim)); + return new lite::Eltwise(const_cast(src_prim)); case schema::PrimitiveType_Concat: - return new lite::Concat(const_cast(srcPrim)); + return new lite::Concat(const_cast(src_prim)); case schema::PrimitiveType_Fill: - return new lite::Fill(const_cast(srcPrim)); + return new lite::Fill(const_cast(src_prim)); case schema::PrimitiveType_Transpose: - return new lite::Transpose(const_cast(srcPrim)); + return new lite::Transpose(const_cast(src_prim)); case schema::PrimitiveType_Slice: - return new lite::Slice(const_cast(srcPrim)); + return new lite::Slice(const_cast(src_prim)); case schema::PrimitiveType_Squeeze: - return new lite::Squeeze(const_cast(srcPrim)); + return new lite::Squeeze(const_cast(src_prim)); case schema::PrimitiveType_Nchw2Nhwc: - return new lite::Nchw2Nhwc(const_cast(srcPrim)); + return new lite::Nchw2Nhwc(const_cast(src_prim)); case schema::PrimitiveType_Nhwc2Nchw: - return new lite::Nhwc2Nchw(const_cast(srcPrim)); + return new lite::Nhwc2Nchw(const_cast(src_prim)); case schema::PrimitiveType_Flatten: - return new lite::Flatten(const_cast(srcPrim)); + return new lite::Flatten(const_cast(src_prim)); case schema::PrimitiveType_Mean: - return new lite::Mean(const_cast(srcPrim)); + return new lite::Mean(const_cast(src_prim)); case schema::PrimitiveType_Stack: - return new lite::Stack(const_cast(srcPrim)); + return new lite::Stack(const_cast(src_prim)); case schema::PrimitiveType_Crop: - return new lite::Crop(const_cast(srcPrim)); + return new lite::Crop(const_cast(src_prim)); case schema::PrimitiveType_SquaredDifference: - return new lite::SquaredDifference(const_cast(srcPrim)); + return new lite::SquaredDifference(const_cast(src_prim)); case schema::PrimitiveType_AddN: - return new lite::AddN(const_cast(srcPrim)); + return new lite::AddN(const_cast(src_prim)); case schema::PrimitiveType_Abs: - return new lite::Abs(const_cast(srcPrim)); + return new lite::Abs(const_cast(src_prim)); case schema::PrimitiveType_Sin: - return new lite::Sin(const_cast(srcPrim)); + return new lite::Sin(const_cast(src_prim)); case schema::PrimitiveType_Cos: - return new lite::Cos(const_cast(srcPrim)); + return new lite::Cos(const_cast(src_prim)); case schema::PrimitiveType_Log: - return new lite::Log(const_cast(srcPrim)); + return new lite::Log(const_cast(src_prim)); case schema::PrimitiveType_Sqrt: - return new lite::Sqrt(const_cast(srcPrim)); + return new lite::Sqrt(const_cast(src_prim)); case schema::PrimitiveType_Rsqrt: - return new lite::Rsqrt(const_cast(srcPrim)); + return new lite::Rsqrt(const_cast(src_prim)); case schema::PrimitiveType_Square: - return new lite::Square(const_cast(srcPrim)); + return new lite::Square(const_cast(src_prim)); case schema::PrimitiveType_Exp: - return new lite::Exp(const_cast(srcPrim)); + return new lite::Exp(const_cast(src_prim)); case schema::PrimitiveType_Gather: - return new lite::Gather(const_cast(srcPrim)); + return new lite::Gather(const_cast(src_prim)); case schema::PrimitiveType_GatherNd: - return new lite::GatherNd(const_cast(srcPrim)); + return new lite::GatherNd(const_cast(src_prim)); case schema::PrimitiveType_LocalResponseNormalization: - return new lite::LocalResponseNormalization(const_cast(srcPrim)); + return new lite::LocalResponseNormalization(const_cast(src_prim)); case schema::PrimitiveType_Maximum: - return new lite::Maximum(const_cast(srcPrim)); + return new lite::Maximum(const_cast(src_prim)); case schema::PrimitiveType_Minimum: - return new lite::Minimum(const_cast(srcPrim)); + return new lite::Minimum(const_cast(src_prim)); case schema::PrimitiveType_Pad: - return new lite::Pad(const_cast(srcPrim)); + return new lite::Pad(const_cast(src_prim)); case schema::PrimitiveType_StridedSlice: - return new lite::StridedSlice(const_cast(srcPrim)); + return new lite::StridedSlice(const_cast(src_prim)); case schema::PrimitiveType_Prelu: - return new lite::Prelu(const_cast(srcPrim)); + return new lite::Prelu(const_cast(src_prim)); case schema::PrimitiveType_Round: - return new lite::Round(const_cast(srcPrim)); + return new lite::Round(const_cast(src_prim)); case schema::PrimitiveType_Reverse: - return new lite::Reverse(const_cast(srcPrim)); + return new lite::Reverse(const_cast(src_prim)); case schema::PrimitiveType_ReverseSequence: - return new lite::ReverseSequence(const_cast(srcPrim)); + return new lite::ReverseSequence(const_cast(src_prim)); case schema::PrimitiveType_LogicalAnd: - return new lite::LogicalAnd(const_cast(srcPrim)); + return new lite::LogicalAnd(const_cast(src_prim)); case schema::PrimitiveType_LogicalOr: - return new lite::LogicalOr(const_cast(srcPrim)); + return new lite::LogicalOr(const_cast(src_prim)); case schema::PrimitiveType_LogicalNot: - return new lite::LogicalNot(const_cast(srcPrim)); + return new lite::LogicalNot(const_cast(src_prim)); case schema::PrimitiveType_FloorDiv: - return new lite::FloorDiv(const_cast(srcPrim)); + return new lite::FloorDiv(const_cast(src_prim)); case schema::PrimitiveType_FloorMod: - return new lite::FloorMod(const_cast(srcPrim)); + return new lite::FloorMod(const_cast(src_prim)); case schema::PrimitiveType_Equal: - return new lite::Equal(const_cast(srcPrim)); + return new lite::Equal(const_cast(src_prim)); case schema::PrimitiveType_NotEqual: - return new lite::NotEqual(const_cast(srcPrim)); + return new lite::NotEqual(const_cast(src_prim)); case schema::PrimitiveType_Less: - return new lite::Less(const_cast(srcPrim)); + return new lite::Less(const_cast(src_prim)); case schema::PrimitiveType_LessEqual: - return new lite::LessEqual(const_cast(srcPrim)); + return new lite::LessEqual(const_cast(src_prim)); case schema::PrimitiveType_Greater: - return new lite::Greater(const_cast(srcPrim)); + return new lite::Greater(const_cast(src_prim)); case schema::PrimitiveType_GreaterEqual: - return new lite::GreaterEqual(const_cast(srcPrim)); + return new lite::GreaterEqual(const_cast(src_prim)); case schema::PrimitiveType_Floor: - return new lite::Floor(const_cast(srcPrim)); + return new lite::Floor(const_cast(src_prim)); case schema::PrimitiveType_Ceil: - return new lite::Ceil(const_cast(srcPrim)); + return new lite::Ceil(const_cast(src_prim)); case schema::PrimitiveType_Split: - return new lite::Split(const_cast(srcPrim)); + return new lite::Split(const_cast(src_prim)); case schema::PrimitiveType_OneHot: - return new lite::OneHot(const_cast(srcPrim)); + return new lite::OneHot(const_cast(src_prim)); case schema::PrimitiveType_Resize: - return new lite::Resize(const_cast(srcPrim)); + return new lite::Resize(const_cast(src_prim)); case schema::PrimitiveType_MatMul: - return new lite::MatMul(const_cast(srcPrim)); + return new lite::MatMul(const_cast(src_prim)); case schema::PrimitiveType_QuantDTypeCast: - return new lite::QuantDTypeCast(const_cast(srcPrim)); + return new lite::QuantDTypeCast(const_cast(src_prim)); case schema::PrimitiveType_EmbeddingLookup: - return new lite::EmbeddingLookup(const_cast(srcPrim)); + return new lite::EmbeddingLookup(const_cast(src_prim)); case schema::PrimitiveType_Elu: - return new lite::Elu(const_cast(srcPrim)); + return new lite::Elu(const_cast(src_prim)); case schema::PrimitiveType_DeDepthwiseConv2D: - return new lite::DeconvDepthwiseConv2D(const_cast(srcPrim)); + return new lite::DeconvDepthwiseConv2D(const_cast(src_prim)); case schema::PrimitiveType_Shape: - return new lite::Shape(const_cast(srcPrim)); + return new lite::Shape(const_cast(src_prim)); default: break; } @@ -237,17 +237,17 @@ lite::Primitive *ModelImpl::CopyPrimitive(const schema::Primitive *srcPrim) { } int ModelImpl::BuildOps() { - if (this->meta_graph == nullptr) { + if (this->meta_graph_ == nullptr) { MS_LOG(ERROR) << "mete_graph is nullptr"; return -1; } - MS_EXCEPTION_IF_NULL(meta_graph->nodes()); - for (size_t i = 0; i < meta_graph->nodes()->size(); i++) { - auto cNode = meta_graph->nodes()->GetAs(i); + MS_EXCEPTION_IF_NULL(meta_graph_->nodes()); + for (size_t i = 0; i < meta_graph_->nodes()->size(); i++) { + auto cNode = meta_graph_->nodes()->GetAs(i); auto name = cNode->name()->str(); auto srcPrim = cNode->primitive(); - this->ops[name] = CopyPrimitive(srcPrim); + this->ops_[name] = CopyPrimitive(srcPrim); // flatbuffers::FlatBufferBuilder fbb(1024); // schema::Conv2DBuilder conv2DBuilder(fbb); // conv2DBuilder.add_padMode(srcPrim->value_as_Conv2D()->padMode()); diff --git a/mindspore/lite/src/model_impl.h b/mindspore/lite/src/model_impl.h index 8a1af3e9dc..82a74e1bf6 100644 --- a/mindspore/lite/src/model_impl.h +++ b/mindspore/lite/src/model_impl.h @@ -30,25 +30,24 @@ class ModelImpl { static ModelImpl *Import(const char *model_buf, size_t size); ModelImpl() = default; explicit ModelImpl(const char *model_buf, size_t size) : model_buf_(model_buf), buf_size_(size) { - meta_graph = schema::GetMetaGraph(model_buf); + meta_graph_ = schema::GetMetaGraph(model_buf); } virtual ~ModelImpl(); lite::Primitive *GetOp(const std::string &name) const; - const schema::MetaGraph *GetMetaGraph() const; + const schema::MetaGraph *meta_graph() const; void FreeMetaGraph(); int BuildOps(); protected: - lite::Primitive *CopyPrimitive(const schema::Primitive *srcPrim); + lite::Primitive *CopyPrimitive(const schema::Primitive *src_prim); protected: const char *model_buf_; size_t buf_size_; - const schema::MetaGraph *meta_graph = nullptr; - std::map ops; + const schema::MetaGraph *meta_graph_ = nullptr; + std::map ops_; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_INCLUDE_MODEL_H_ - diff --git a/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc index d0108c17c6..1f05c73e86 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc @@ -33,8 +33,8 @@ using mindspore::schema::PrimitiveType_ArgMin; namespace mindspore::kernel { int ArgMinMaxBaseCPUKernel::Init() { - auto param = reinterpret_cast(opParameter); - switch (opParameter->type_) { + auto param = reinterpret_cast(op_parameter_); + switch (op_parameter_->type_) { case PrimitiveType_ArgMax: param->get_max_ = true; break; @@ -42,7 +42,7 @@ int ArgMinMaxBaseCPUKernel::Init() { param->get_max_ = false; break; default: - MS_LOG(ERROR) << "Unexpected type " << opParameter->type_; + MS_LOG(ERROR) << "Unexpected type " << op_parameter_->type_; return RET_ERROR; } @@ -50,9 +50,9 @@ int ArgMinMaxBaseCPUKernel::Init() { } int ArgMinMaxBaseCPUKernel::ReSize() { - auto in_shape = inputs_.at(0)->shape(); + auto in_shape = in_tensors_.at(0)->shape(); auto dims_size = in_shape.size(); - auto param = reinterpret_cast(opParameter); + auto param = reinterpret_cast(op_parameter_); int axis = param->axis_ < 0 ? param->axis_ + dims_size : param->axis_; param->axis_ = axis; param->dims_size_ = dims_size; @@ -75,25 +75,25 @@ int ArgMinMaxBaseCPUKernel::ReSize() { } } ComputeStrides(in_shape.data(), param->in_strides_, in_shape.size()); - auto out_shape = outputs_.at(0)->shape(); + auto out_shape = out_tensors_.at(0)->shape(); ComputeStrides(out_shape.data(), param->out_strides_, out_shape.size()); return RET_OK; } int ArgMinMaxBaseCPUKernel::Run() { - auto input = inputs_.at(0); + auto input = in_tensors_.at(0); - auto input_data = reinterpret_cast(inputs_.at(0)->Data()); - auto output_data = outputs_.at(0)->Data(); + auto input_data = reinterpret_cast(in_tensors_.at(0)->Data()); + auto output_data = out_tensors_.at(0)->Data(); auto shape = input->shape().data(); - auto param = reinterpret_cast(opParameter); + auto param = reinterpret_cast(op_parameter_); ArgMinMax(input_data, output_data, reinterpret_cast(shape), param); return RET_OK; } void ArgMinMaxBaseCPUKernel::FreeTmpMemory() { - auto param = reinterpret_cast(opParameter); + auto param = reinterpret_cast(op_parameter_); if (param->arg_elements_ == nullptr) { return; } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc index 459364679e..acc9ab8ef2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc @@ -30,7 +30,7 @@ using mindspore::schema::PrimitiveType_BatchToSpace; namespace mindspore::kernel { int BatchToSpaceBaseCPUKernel::Init() { - BatchToSpaceParameter *param = reinterpret_cast(this->opParameter); + BatchToSpaceParameter *param = reinterpret_cast(this->op_parameter_); for (int i = 0; i < BATCH_TO_SPACE_CROPS_SIZE; ++i) { if (param->crops_[i] != 0) { no_crop_ = false; @@ -40,7 +40,7 @@ int BatchToSpaceBaseCPUKernel::Init() { } int BatchToSpaceBaseCPUKernel::ReSize() { - if (inputs_[0]->GetFormat() != schema::Format_NHWC) { + if (in_tensors_[0]->GetFormat() != schema::Format_NHWC) { MS_LOG(ERROR) << "batch_to_space only support NHWC now!"; return RET_FORMAT_ERR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc index c8195413ba..e28d88b771 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc @@ -32,7 +32,7 @@ namespace mindspore::kernel { int ConcatBaseCPUKernel::Init() { return RET_OK; } int ConcatBaseCPUKernel::ReSize() { - axis_ = concat_param_->axis_ >= 0 ? concat_param_->axis_ : inputs_.front()->shape().size() + concat_param_->axis_; + axis_ = concat_param_->axis_ >= 0 ? concat_param_->axis_ : in_tensors_.front()->shape().size() + concat_param_->axis_; return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/concat_base.h b/mindspore/lite/src/runtime/kernel/arm/base/concat_base.h index 1d058b6b69..a505563cb8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/concat_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/concat_base.h @@ -31,7 +31,7 @@ class ConcatBaseCPUKernel : public LiteKernel { const std::vector &outputs, const Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - concat_param_ = reinterpret_cast(opParameter); + concat_param_ = reinterpret_cast(op_parameter_); } virtual ~ConcatBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc index 43cc5605c6..2549ff251b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc @@ -77,8 +77,8 @@ void ConvolutionBaseCPUKernel::FreeQuantParam() { } int ConvolutionBaseCPUKernel::Init() { - auto input = this->inputs_.front(); - auto output = this->outputs_.front(); + auto input = this->in_tensors_.front(); + auto output = this->out_tensors_.front(); conv_param_->input_batch_ = input->Batch(); conv_param_->input_h_ = input->Height(); conv_param_->input_w_ = input->Width(); @@ -118,9 +118,9 @@ int ConvolutionBaseCPUKernel::SetQuantParam() { return RET_ERROR; } } - auto input_tensor = inputs_.at(kInputIndex); - auto weight_tensor = inputs_.at(kWeightIndex); - auto output_tensor = outputs_.at(kOutputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); + auto weight_tensor = in_tensors_.at(kWeightIndex); + auto output_tensor = out_tensors_.at(kOutputIndex); auto input_quant_arg = input_tensor->GetQuantParams().front(); auto weight_quant_arg = weight_tensor->GetQuantParams().front(); auto output_quant_arg = output_tensor->GetQuantParams().front(); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h index a2845b6897..1394293bed 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h @@ -40,8 +40,8 @@ class ConvolutionBaseCPUKernel : public LiteKernel { const std::vector &outputs, const Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - opParameter->thread_num_ = ctx->thread_num_; - conv_param_ = reinterpret_cast(opParameter); + op_parameter_->thread_num_ = ctx->thread_num_; + conv_param_ = reinterpret_cast(op_parameter_); } ~ConvolutionBaseCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc index cfb555e025..a5b284d0c2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc @@ -34,27 +34,27 @@ namespace mindspore::kernel { int DepthToSpaceBaseCPUKernel::Init() { return RET_OK; } int DepthToSpaceBaseCPUKernel::ReSize() { - if (inputs_[0]->GetFormat() != schema::Format_NHWC) { + if (in_tensors_[0]->GetFormat() != schema::Format_NHWC) { MS_LOG(ERROR) << "depth_to_space only support NHWC now!"; return RET_FORMAT_ERR; } - DepthToSpaceParameter *param = reinterpret_cast(opParameter); + DepthToSpaceParameter *param = reinterpret_cast(op_parameter_); if (param->block_size_ <= 0) { MS_LOG(ERROR) << "Input block_size should > 0!"; return RET_PARAM_INVALID; } - auto shape_size = inputs_[0]->shape().size(); + auto shape_size = in_tensors_[0]->shape().size(); if (shape_size != DIMENSION_4D) { MS_LOG(ERROR) << "Input shape size should be " << DIMENSION_4D; return RET_PARAM_INVALID; } int32_t in_strides[DIMENSION_4D]; - ComputeStrides(const_cast(inputs_[0]->shape().data()), in_strides, shape_size); + ComputeStrides(const_cast(in_tensors_[0]->shape().data()), in_strides, shape_size); param->in_stride_dim0_ = in_strides[0]; param->in_stride_dim1_ = in_strides[1]; param->in_stride_dim2_ = in_strides[2]; int32_t out_strides[DIMENSION_4D]; - ComputeStrides(const_cast(outputs_[0]->shape().data()), out_strides, shape_size); + ComputeStrides(const_cast(out_tensors_[0]->shape().data()), out_strides, shape_size); param->out_stride_dim0_ = out_strides[0]; param->out_stride_dim1_ = out_strides[1]; param->out_stride_dim2_ = out_strides[2]; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h b/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h index bf03869188..e08206c1a9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h @@ -31,7 +31,7 @@ class FullconnectionBaseCPUKernel : public LiteKernel { const std::vector &outputs, const Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - fc_param_ = reinterpret_cast(opParameter); + fc_param_ = reinterpret_cast(op_parameter_); } ~FullconnectionBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.cc b/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.cc index a97c392bf5..9af5de693d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.cc @@ -59,4 +59,3 @@ LayoutConvertor LayoutTransform(TypeId data_type, schema::Format src_format, sch } } } // namespace mindspore::kernel - diff --git a/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.h b/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.h index 99f37d923f..5296c53066 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.h @@ -38,4 +38,3 @@ LayoutConvertor LayoutTransform(TypeId data_type, schema::Format src_format, sch } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_LAYOUT_TRANSFORM_H_ - diff --git a/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h b/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h index a1cda81bb4..5d089a4216 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h @@ -31,7 +31,7 @@ class MatmulBaseCPUKernel : public LiteKernel { const std::vector &outputs, const Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - params_ = reinterpret_cast(opParameter); + params_ = reinterpret_cast(op_parameter_); } ~MatmulBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/matrix.cc b/mindspore/lite/src/runtime/kernel/arm/base/matrix.cc index a979c70d67..4eb619562f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/matrix.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/matrix.cc @@ -20,13 +20,13 @@ namespace mindspore::kernel { Matrix *TransformMatrixGenerator(int m, int k) { auto matrix = new Matrix; - auto aa = malloc(m * k * sizeof(float)); + auto aa = malloc(m * k * sizeof(float)); matrix->SetData(aa); matrix->SetNum(m, k); -// matrix->data_ = malloc(m * k * sizeof(float)); -// matrix->m_ = m; -// matrix->k_ = k; -// matrix->row_major_ = true; + // matrix->data_ = malloc(m * k * sizeof(float)); + // matrix->m_ = m; + // matrix->k_ = k; + // matrix->row_major_ = true; return matrix; } @@ -80,4 +80,3 @@ void MatrixMultiply(const float *matrix_a, const float *matrix_b, float *matrix_ } } } // namespace mindspore::kernel - diff --git a/mindspore/lite/src/runtime/kernel/arm/base/matrix.h b/mindspore/lite/src/runtime/kernel/arm/base/matrix.h index eb0c5e4cb2..c29d0944ac 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/matrix.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/matrix.h @@ -95,4 +95,3 @@ void MatrixMultiply(const float *matrix_a, const float *matrix_b, float *matrix_ } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_MATRIX_H_ - diff --git a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc index 45e7186b6e..010467116d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc @@ -33,9 +33,9 @@ int PoolingBaseCPUKernel::SetQuantParam() { pooling_quant_arg_ = reinterpret_cast(malloc(2 * sizeof(QuantArg *))); pooling_quant_arg_[0] = reinterpret_cast(malloc(sizeof(QuantArg))); pooling_quant_arg_[1] = reinterpret_cast(malloc(sizeof(QuantArg))); - auto *input_tensor = inputs_.at(kInputIndex); + auto *input_tensor = in_tensors_.at(kInputIndex); auto in_quant_arg = input_tensor->GetQuantParams(); - auto *out_tensor = outputs_.at(kOutputIndex); + auto *out_tensor = out_tensors_.at(kOutputIndex); auto out_quant_arg = out_tensor->GetQuantParams(); pooling_quant_arg_[0][0].scale_ = in_quant_arg.front().scale; pooling_quant_arg_[0][0].zp_ = in_quant_arg.front().zeroPoint; @@ -57,15 +57,15 @@ void PoolingBaseCPUKernel::FreeQuantParam() { int PoolingBaseCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - MS_ASSERT(inputs_.size() == 1); - MS_ASSERT(outputs_.size() == 1); + MS_ASSERT(in_tensors_.size() == 1); + MS_ASSERT(out_tensors_.size() == 1); pooling_param_->thread_num_ = thread_count_; - MS_ASSERT(this->opParameter != nullptr); - auto in_tensor = this->inputs_.front(); - auto out_tensor = this->outputs_.front(); + MS_ASSERT(this->op_parameter_ != nullptr); + auto in_tensor = this->in_tensors_.front(); + auto out_tensor = this->out_tensors_.front(); MS_ASSERT(in_tensor != nullptr); MS_ASSERT(out_tensor != nullptr); pooling_param_->input_batch_ = in_tensor->Batch(); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h index 4a56a44f9c..25cea6749a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h @@ -32,7 +32,7 @@ class PoolingBaseCPUKernel : public LiteKernel { const std::vector &outputs, const Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - pooling_param_ = reinterpret_cast(opParameter); + pooling_param_ = reinterpret_cast(op_parameter_); } ~PoolingBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/prelu_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/prelu_base.cc index 1c765d6de3..f590883576 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/prelu_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/prelu_base.cc @@ -27,18 +27,18 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Prelu; namespace mindspore::kernel { -int PreluBaseCPUKernel::Init() {return RET_OK;} +int PreluBaseCPUKernel::Init() { return RET_OK; } kernel::LiteKernel *CpuPreluInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const std::vector &outputs, + OpParameter *opParameter, const Context *ctx, + const kernel::KernelKey &desc, const lite::Primitive *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; } MS_ASSERT(desc.type == schema::PrimitiveType_Prelu); - auto *kernel = new(std::nothrow) PreluInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) PreluInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive); if (kernel == nullptr) { MS_LOG(ERROR) << "new PreluCPUKernel fail!"; return nullptr; @@ -55,4 +55,3 @@ kernel::LiteKernel *CpuPreluInt8KernelCreator(const std::vector &outputs, const Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - opParameter->thread_num_ = ctx->thread_num_; - prelu_param_ = reinterpret_cast(opParameter); + op_parameter_->thread_num_ = ctx->thread_num_; + prelu_param_ = reinterpret_cast(op_parameter_); } ~PreluBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc index 680e62e329..a90a2ab07f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc @@ -41,11 +41,11 @@ int PriorBoxCPUKernel::Init() { } if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - MS_ASSERT(inputs_.size() == kInputNum); - MS_ASSERT(outputs_.size() == kOutputNum); + MS_ASSERT(in_tensors_.size() == kInputNum); + MS_ASSERT(out_tensors_.size() == kOutputNum); auto ret = GeneratePriorBox(); @@ -53,11 +53,11 @@ int PriorBoxCPUKernel::Init() { } int PriorBoxCPUKernel::GeneratePriorBox() { - const int fmap_w = inputs_[0]->Width(); - const int fmap_h = inputs_[0]->Height(); + const int fmap_w = in_tensors_[0]->Width(); + const int fmap_h = in_tensors_[0]->Height(); - const int image_w = prior_box_param_->image_size_w > 0 ? prior_box_param_->image_size_w : inputs_[1]->Width(); - const int image_h = prior_box_param_->image_size_h > 0 ? prior_box_param_->image_size_h : inputs_[1]->Height(); + const int image_w = prior_box_param_->image_size_w > 0 ? prior_box_param_->image_size_w : in_tensors_[1]->Width(); + const int image_h = prior_box_param_->image_size_h > 0 ? prior_box_param_->image_size_h : in_tensors_[1]->Height(); const float step_w = prior_box_param_->step_w > 0.0f ? prior_box_param_->step_w : static_cast(image_w) / fmap_w; @@ -128,7 +128,7 @@ int PriorBoxCPUKernel::GeneratePriorBox() { } // variance - for (auto i = 0; i < outputs_[0]->Height() / PRIOR_BOX_VAR_NUM; i++) { + for (auto i = 0; i < out_tensors_[0]->Height() / PRIOR_BOX_VAR_NUM; i++) { for (auto j = 0; j < PRIOR_BOX_VAR_NUM; j++) { output_.emplace_back(prior_box_param_->variances[j]); } @@ -138,7 +138,7 @@ int PriorBoxCPUKernel::GeneratePriorBox() { int PriorBoxCPUKernel::PriorBoxImpl(int task_id) { auto src = output_.data(); - auto output = outputs_.at(0); + auto output = out_tensors_.at(0); if (output == nullptr) { return RET_NULL_PTR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h index 98e5453a90..4685090c56 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h @@ -31,7 +31,7 @@ class PriorBoxCPUKernel : public LiteKernel { const std::vector &outputs, const Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - prior_box_param_ = reinterpret_cast(opParameter); + prior_box_param_ = reinterpret_cast(op_parameter_); } ~PriorBoxCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc index dd82d2af5f..10068184d5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc @@ -30,17 +30,17 @@ using mindspore::schema::PrimitiveType_QuantDTypeCast; namespace mindspore::kernel { int QuantDTypeCastCPUKernel::Init() { - if (inputs_.size() != 1) { - MS_LOG(ERROR) << "inputs number should be 1, but " << inputs_.size() << " is given."; + if (in_tensors_.size() != 1) { + MS_LOG(ERROR) << "inputs number should be 1, but " << in_tensors_.size() << " is given."; return RET_PARAM_INVALID; } - if (outputs_.size() != 1) { - MS_LOG(ERROR) << "outputs number should be 1, but " << inputs_.size() << " is given."; + if (out_tensors_.size() != 1) { + MS_LOG(ERROR) << "outputs number should be 1, but " << out_tensors_.size() << " is given."; return RET_PARAM_INVALID; } - auto in_tensor = inputs_.front(); - auto out_tensor = outputs_.front(); - auto param = reinterpret_cast(opParameter); + auto in_tensor = in_tensors_.front(); + auto out_tensor = out_tensors_.front(); + auto param = reinterpret_cast(op_parameter_); if (param->srcT == kNumberTypeFloat32 && param->dstT == kNumberTypeInt8) { if (in_tensor->data_type() != kNumberTypeFloat32 || out_tensor->data_type() != kNumberTypeInt8) { MS_LOG(ERROR) << "param data type and tensor data type do not match."; @@ -65,7 +65,7 @@ int QuantDTypeCastCPUKernel::Init() { } int QuantDTypeCastCPUKernel::ReSize() { - auto in_tensor = inputs_.front(); + auto in_tensor = in_tensors_.front(); num_unit_ = static_cast(in_tensor->DataSize()); thread_n_num_ = MSMIN(thread_num_, num_unit_); thread_n_stride_ = UP_DIV(num_unit_, thread_n_num_); @@ -78,7 +78,7 @@ int QuantDTypeCastCPUKernel::QuantDTypeCast(int task_id) { return RET_OK; } int thread_offset = task_id * thread_n_stride_; - auto quant_arg = inputs_.front()->GetQuantParams().front(); + auto quant_arg = in_tensors_.front()->GetQuantParams().front(); int ret; if (inverse_) { ret = DequantizeInt8(int8_ptr_ + thread_offset, float32_ptr_ + thread_offset, quant_arg.scale, quant_arg.zeroPoint, @@ -111,11 +111,11 @@ int QuantDTypeCastCPUKernel::Run() { return prepare_ret; } if (inverse_) { - int8_ptr_ = reinterpret_cast(inputs_[0]->Data()); - float32_ptr_ = reinterpret_cast(outputs_[0]->Data()); + int8_ptr_ = reinterpret_cast(in_tensors_[0]->Data()); + float32_ptr_ = reinterpret_cast(out_tensors_[0]->Data()); } else { - float32_ptr_ = reinterpret_cast(inputs_[0]->Data()); - int8_ptr_ = reinterpret_cast(outputs_[0]->Data()); + float32_ptr_ = reinterpret_cast(in_tensors_[0]->Data()); + int8_ptr_ = reinterpret_cast(out_tensors_[0]->Data()); } int ret = LiteBackendParallelLaunch(QuantDTypeCastRun, this, thread_n_num_); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h index aeff32e82b..db5e536ada 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h @@ -30,7 +30,7 @@ class ReshapeBaseCPUKernel : public LiteKernel { const std::vector &outputs, const Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx) { - reshape_param_ = reinterpret_cast(opParameter); + reshape_param_ = reinterpret_cast(op_parameter_); } ~ReshapeBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc index 9d2959dc47..8ae036b326 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc @@ -35,7 +35,7 @@ constexpr int kRank = 4; } // namespace int ResizeBaseCPUKernel::CheckParameters() { - auto parameter = reinterpret_cast(opParameter); + auto parameter = reinterpret_cast(op_parameter_); if (parameter == nullptr) { MS_LOG(ERROR) << "cast ResizeParameter failed."; return RET_NULL_PTR; @@ -65,19 +65,19 @@ int ResizeBaseCPUKernel::CheckParameters() { } int ResizeBaseCPUKernel::CheckInputsOuputs() { - if (inputs_.size() != kInputNum) { - MS_LOG(ERROR) << "Resize input num should be " << kInputNum << ", but got " << inputs_.size(); + if (in_tensors_.size() != kInputNum) { + MS_LOG(ERROR) << "Resize input num should be " << kInputNum << ", but got " << in_tensors_.size(); return RET_ERROR; } - auto input = inputs_.at(0); + auto input = in_tensors_.at(0); if (input == nullptr) { return RET_NULL_PTR; } - if (outputs_.size() != kOutputNum) { - MS_LOG(ERROR) << "Resize output num should be " << kOutputNum << ", but got " << outputs_.size(); + if (out_tensors_.size() != kOutputNum) { + MS_LOG(ERROR) << "Resize output num should be " << kOutputNum << ", but got " << out_tensors_.size(); return RET_ERROR; } - auto output = outputs_.at(0); + auto output = out_tensors_.at(0); if (output == nullptr) { return RET_NULL_PTR; } @@ -94,7 +94,7 @@ int ResizeBaseCPUKernel::Init() { return ret; } - auto input = inputs_.at(0); + auto input = in_tensors_.at(0); auto input_shape = input->shape(); if (input_shape.size() != kRank) { MS_LOG(ERROR) << "Resize op support input rank 4, got " << input_shape.size(); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc index bb6a8965b8..8e210aa086 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc @@ -25,8 +25,8 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; -using mindspore::lite::RET_OK; using mindspore::lite::RET_NULL_PTR; +using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SoftMax; namespace mindspore::kernel { @@ -40,7 +40,7 @@ int SoftmaxBaseCPUKernel::Init() { } int SoftmaxBaseCPUKernel::ReSize() { - auto input_tensor = inputs_.front(); + auto input_tensor = in_tensors_.front(); auto in_shape = input_tensor->shape(); auto in_dims = in_shape.size(); int ele_size = 1; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h index 02a973deea..a933e7a25a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h @@ -28,7 +28,7 @@ class SoftmaxBaseCPUKernel : public LiteKernel { const std::vector &outputs, const lite::Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - softmax_param_ = reinterpret_cast(opParameter); + softmax_param_ = reinterpret_cast(op_parameter_); } ~SoftmaxBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc index e523af3bdb..8768226fd3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc @@ -31,7 +31,7 @@ namespace mindspore::kernel { int SplitBaseCPUKernel::Init() { return RET_OK; } int SplitBaseCPUKernel::ReSize() { - auto in_tensor = inputs_.front(); + auto in_tensor = in_tensors_.front(); auto input_shape = in_tensor->shape(); param->strides_[input_shape.size() - 1] = 1; @@ -61,9 +61,9 @@ int SplitBaseCPUKernel::ReSize() { } kernel::LiteKernel *CpuSplitInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const std::vector &outputs, + OpParameter *opParameter, const Context *ctx, + const kernel::KernelKey &desc, const lite::Primitive *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -85,9 +85,9 @@ kernel::LiteKernel *CpuSplitInt8KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const std::vector &outputs, + OpParameter *opParameter, const Context *ctx, + const kernel::KernelKey &desc, const lite::Primitive *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -109,9 +109,9 @@ kernel::LiteKernel *CpuSplitInt32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const std::vector &outputs, + OpParameter *opParameter, const Context *ctx, + const kernel::KernelKey &desc, const lite::Primitive *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/split_base.h b/mindspore/lite/src/runtime/kernel/arm/base/split_base.h index 0e0531a3ac..d5ae60ddc2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/split_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/split_base.h @@ -27,10 +27,10 @@ namespace mindspore::kernel { class SplitBaseCPUKernel : public LiteKernel { public: SplitBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - param = reinterpret_cast(opParameter); + const std::vector &outputs, const Context *ctx, + const lite::Primitive *primitive) + : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { + param = reinterpret_cast(op_parameter_); } ~SplitBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc index 6e294121a3..429b5a3f8b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc @@ -39,8 +39,8 @@ int StridedSliceCPUKernel::Init() { } int StridedSliceCPUKernel::ReSize() { - auto input = inputs_.at(0); - auto parameter = reinterpret_cast(opParameter); + auto input = in_tensors_.at(0); + auto parameter = reinterpret_cast(op_parameter_); MS_ASSERT(input); MS_ASSERT(parameter); parameter->data_type = input->data_type() == kNumberTypeInt8 ? kDataTypeInt8 : kDataTypeFloat; @@ -54,12 +54,12 @@ int StridedSliceCPUKernel::Run() { return ret; } - auto input = inputs_.at(0); - auto output = outputs_.at(0); + auto input = in_tensors_.at(0); + auto output = out_tensors_.at(0); MS_ASSERT(input); MS_ASSERT(output); - ret = DoStridedSlice(input->Data(), output->Data(), reinterpret_cast(opParameter)); + ret = DoStridedSlice(input->Data(), output->Data(), reinterpret_cast(op_parameter_)); if (ret != RET_OK) { MS_LOG(ERROR) << "StridedSlice error error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.cc index b294bb30db..9dc2aeae41 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.cc @@ -63,7 +63,7 @@ int Convolution3x3FP16CPUKernel::InitWeightBias() { return RET_ERROR; } memset(transformed_filter_addr_, 0, transformed_size); - float *origin_weight = reinterpret_cast(inputs_.at(kWeightIndex)->Data()); + float *origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); size_t fp16_weight_size = input_channel * output_channel * kernel_h * kernel_w * sizeof(float16_t); fp16_weight_ = reinterpret_cast(malloc(fp16_weight_size)); if (fp16_weight_ == nullptr) { @@ -85,8 +85,8 @@ int Convolution3x3FP16CPUKernel::InitWeightBias() { } memset(bias_data_, 0, new_bias_size); auto fp16_bias_data = reinterpret_cast(bias_data_); - if (inputs_.size() == kInputSize2) { - auto ori_bias_addr = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias_addr = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); for (int i = 0; i < output_channel; ++i) { fp16_bias_data[i] = (float16_t)ori_bias_addr[i]; } @@ -131,8 +131,7 @@ int Convolution3x3FP16CPUKernel::InitTmpBuffer() { /*=============================tmp_out_============================*/ int new_out_plane = UP_DIV(conv_param_->output_h_, C4NUM) * UP_DIV(conv_param_->output_w_, C4NUM) * C4NUM * C4NUM; - size_t tmp_out_size = - oC8 * C8NUM * conv_param_->output_batch_ * new_out_plane * sizeof(float16_t); + size_t tmp_out_size = oC8 * C8NUM * conv_param_->output_batch_ * new_out_plane * sizeof(float16_t); tmp_out_ = reinterpret_cast(malloc(tmp_out_size)); if (tmp_out_ == nullptr) { MS_LOG(ERROR) << "malloc tmp_out_ failed."; @@ -172,7 +171,7 @@ int Convolution3x3FP16CPUKernel::InitTmpBuffer() { } void Convolution3x3FP16CPUKernel::ConfigInputOutput() { - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto input_format = input_tensor->GetFormat(); schema::Format execute_format = schema::Format_NHWC4; convert_func_ = LayoutTransformFp16(input_format, execute_format); @@ -184,7 +183,7 @@ void Convolution3x3FP16CPUKernel::ConfigInputOutput() { int Convolution3x3FP16CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } auto ret = ConvolutionBaseCPUKernel::Init(); @@ -265,7 +264,7 @@ int Convolution3x3FP16CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto ori_input_data = reinterpret_cast(input_tensor->Data()); for (int i = 0; i < input_tensor->ElementsNum(); ++i) { fp16_input_[i] = (float16_t)ori_input_data[i]; @@ -284,10 +283,10 @@ int Convolution3x3FP16CPUKernel::Run() { } // cast fp16 out to fp32 data - auto out_tensor = outputs_.at(kOutputIndex); + auto out_tensor = out_tensors_.at(kOutputIndex); auto output_addr = reinterpret_cast(out_tensor->Data()); for (int j = 0; j < out_tensor->ElementsNum(); ++j) { - output_addr[j] = static_cast(fp16_out_[j]); + output_addr[j] = static_cast(fp16_out_[j]); } return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.h index 8b5994be0a..80cde56287 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.h @@ -38,7 +38,7 @@ class Convolution3x3FP16CPUKernel : public ConvolutionBaseCPUKernel { free(fp16_weight_); } if (fp16_out_ != nullptr) { - free(fp16_out_); + free(fp16_out_); } if (transformed_filter_addr_ != nullptr) { free(transformed_filter_addr_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc index c288550e86..1e0c8546b7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc @@ -68,7 +68,7 @@ int ConvolutionDepthwiseFp16CPUKernel::InitBuffer() { int ConvolutionDepthwiseFp16CPUKernel::InitWeightBias() { // init weight: o, h, w, i; o == group, i == 1 int OC8 = UP_DIV(conv_param_->output_channel_, C8NUM); - auto weight_tensor = inputs_[kWeightIndex]; + auto weight_tensor = in_tensors_[kWeightIndex]; auto origin_weight = reinterpret_cast(weight_tensor->Data()); int pack_weight_size = C8NUM * OC8 * conv_param_->kernel_h_ * conv_param_->kernel_w_; @@ -89,8 +89,8 @@ int ConvolutionDepthwiseFp16CPUKernel::InitWeightBias() { } memset(bias_data_, 0, C8NUM * OC8 * sizeof(float16_t)); auto bias_fp16 = reinterpret_cast(bias_data_); - if (inputs_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); for (int i = 0; i < conv_param_->output_channel_; i++) { bias_fp16[i] = (float16_t)ori_bias[i]; } @@ -102,7 +102,7 @@ int ConvolutionDepthwiseFp16CPUKernel::InitWeightBias() { int ConvolutionDepthwiseFp16CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } // conv base init @@ -176,7 +176,7 @@ int ConvolutionDepthwiseFp16CPUKernel::Run() { return RET_ERROR; } - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto input_addr = reinterpret_cast(input_tensor->Data()); // pack input: to nhwc8 PackNHWCFp32ToNHWC8Fp16(input_addr, packed_input_, conv_param_->input_batch_, @@ -188,7 +188,7 @@ int ConvolutionDepthwiseFp16CPUKernel::Run() { return RET_ERROR; } - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); PackNHWC8Fp16ToNHWCFp32(packed_output_, output_addr, conv_param_->output_batch_, conv_param_->output_h_ * conv_param_->output_w_, conv_param_->output_channel_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc index 96ed5bc6fa..b7b76191a1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc @@ -43,7 +43,7 @@ int ConvolutionFP16CPUKernel::InitWeightBias() { int pack_weight_size = oc8 * ic4 * C8NUM * C4NUM * kernel_plane; // init weight - float *origin_weight = reinterpret_cast(inputs_.at(kWeightIndex)->Data()); + float *origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); size_t fp16_weight_size = in_channel * out_channel * kernel_h * kernel_w * sizeof(float16_t); fp16_weight_ = reinterpret_cast(malloc(fp16_weight_size)); if (fp16_weight_ == nullptr) { @@ -70,8 +70,8 @@ int ConvolutionFP16CPUKernel::InitWeightBias() { } memset(bias_data_, 0, oc8 * C8NUM * sizeof(float16_t)); auto fp16_bias_data = reinterpret_cast(bias_data_); - if (inputs_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); for (int i = 0; i < out_channel; ++i) { fp16_bias_data[i] = (float16_t)ori_bias[i]; } @@ -143,7 +143,7 @@ int ConvolutionFP16CPUKernel::InitTmpBuffer() { } void ConvolutionFP16CPUKernel::ConfigInputOutput() { - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto input_format = input_tensor->GetFormat(); schema::Format execute_format = schema::Format_NHWC4; convert_func_ = LayoutTransformFp16(input_format, execute_format); @@ -155,7 +155,7 @@ void ConvolutionFP16CPUKernel::ConfigInputOutput() { int ConvolutionFP16CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } auto ret = ConvolutionBaseCPUKernel::Init(); @@ -229,7 +229,7 @@ int ConvolutionFP16CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto ori_input_data = reinterpret_cast(input_tensor->Data()); for (int i = 0; i < input_tensor->ElementsNum(); ++i) { fp16_input_[i] = (float16_t)ori_input_data[i]; @@ -248,7 +248,7 @@ int ConvolutionFP16CPUKernel::Run() { } // cast fp16 out to fp32 data - auto out_tensor = outputs_.at(kOutputIndex); + auto out_tensor = out_tensors_.at(kOutputIndex); auto output_addr = reinterpret_cast(out_tensor->Data()); for (int j = 0; j < out_tensor->ElementsNum(); ++j) { output_addr[j] = static_cast(fp16_out_[j]); @@ -293,8 +293,8 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vectorInit(); if (ret != RET_OK) { delete kernel; - MS_LOG(INFO) << "Init fp16 kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); + MS_LOG(INFO) << "Init fp16 kernel failed, name: " << opParameter->name_ + << ", type: " << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); return nullptr; } return kernel; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc index 96aa37c63e..ee3feb8294 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc @@ -45,14 +45,14 @@ DeconvolutionDepthwiseFp16CPUKernel::~DeconvolutionDepthwiseFp16CPUKernel() { } int DeconvolutionDepthwiseFp16CPUKernel::InitSlideParam() { - conv_param_->input_batch_ = outputs_.front()->shape().at(kNHWC_N); - conv_param_->input_h_ = outputs_.front()->shape().at(kNHWC_H); - conv_param_->input_w_ = outputs_.front()->shape().at(kNHWC_W); - conv_param_->input_channel_ = outputs_.front()->shape().at(kNHWC_C); - conv_param_->output_batch_ = inputs_.front()->shape().at(kNHWC_N); - conv_param_->output_h_ = inputs_.front()->shape().at(kNHWC_H); - conv_param_->output_w_ = inputs_.front()->shape().at(kNHWC_W); - conv_param_->output_channel_ = inputs_.front()->shape().at(kNHWC_C); + conv_param_->input_batch_ = out_tensors_.front()->shape().at(kNHWC_N); + conv_param_->input_h_ = out_tensors_.front()->shape().at(kNHWC_H); + conv_param_->input_w_ = out_tensors_.front()->shape().at(kNHWC_W); + conv_param_->input_channel_ = out_tensors_.front()->shape().at(kNHWC_C); + conv_param_->output_batch_ = in_tensors_.front()->shape().at(kNHWC_N); + conv_param_->output_h_ = in_tensors_.front()->shape().at(kNHWC_H); + conv_param_->output_w_ = in_tensors_.front()->shape().at(kNHWC_W); + conv_param_->output_channel_ = in_tensors_.front()->shape().at(kNHWC_C); // init sliding_ window param InitSlidingParam(sliding_, conv_param_, C8NUM); @@ -83,7 +83,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::InitBuffer() { int DeconvolutionDepthwiseFp16CPUKernel::InitWeightBias() { // init weight: o, h, w, i; o == group, i == 1 int OC8 = UP_DIV(conv_param_->output_channel_, C8NUM); - auto weight_tensor = inputs_[kWeightIndex]; + auto weight_tensor = in_tensors_[kWeightIndex]; auto origin_weight = reinterpret_cast(weight_tensor->Data()); int pack_weight_size = C8NUM * OC8 * conv_param_->kernel_h_ * conv_param_->kernel_w_; @@ -103,8 +103,8 @@ int DeconvolutionDepthwiseFp16CPUKernel::InitWeightBias() { return RET_ERROR; } memset(bias_data_, 0, C8NUM * OC8 * sizeof(float16_t)); - if (inputs_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); for (int i = 0; i < conv_param_->output_channel_; i++) { reinterpret_cast(bias_data_)[i] = (float16_t)ori_bias[i]; } @@ -116,7 +116,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::InitWeightBias() { int DeconvolutionDepthwiseFp16CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } sliding_ = new SlidingWindowParam; @@ -124,7 +124,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::Init() { // conv base init auto ret = ConvolutionBaseCPUKernel::Init(); if (ret != RET_OK) { - return ret; + return ret; } ret = InitWeightBias(); @@ -189,7 +189,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::Run() { return RET_ERROR; } - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto input_addr = reinterpret_cast(input_tensor->Data()); // pack input: to nhwc8 PackNHWCFp32ToNHWC8Fp16(input_addr, packed_input_, conv_param_->input_batch_, @@ -201,7 +201,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::Run() { return RET_ERROR; } - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); PackNHWC8Fp16ToNHWCFp32(packed_output_, output_addr, conv_param_->output_batch_, conv_param_->output_h_ * conv_param_->output_w_, conv_param_->output_channel_); return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.cc index 419c10b06e..a628a55e6d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.cc @@ -20,20 +20,20 @@ namespace mindspore::kernel { LayoutConvertor LayoutTransformFp16(schema::Format src_format, schema::Format dst_format) { - if (src_format == schema::Format_NHWC && dst_format == schema::Format_NC4HW4) { - return PackNHWCToNC4HW4Fp16; - } else if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) { - return PackNHWCToNHWC4Fp16; - } else if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC4) { - return PackNC4HW4ToNHWC4Fp16; - } else if (src_format == schema::Format_NCHW && dst_format == schema::Format_NC4HW4) { - return PackNCHWToNC4HW4Fp16; - } else if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC) { - return PackNC4HW4ToNHWCFp16; - } else { - MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(src_format) << " to " - << schema::EnumNameFormat(dst_format); - return nullptr; - } + if (src_format == schema::Format_NHWC && dst_format == schema::Format_NC4HW4) { + return PackNHWCToNC4HW4Fp16; + } else if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) { + return PackNHWCToNHWC4Fp16; + } else if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC4) { + return PackNC4HW4ToNHWC4Fp16; + } else if (src_format == schema::Format_NCHW && dst_format == schema::Format_NC4HW4) { + return PackNCHWToNC4HW4Fp16; + } else if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC) { + return PackNC4HW4ToNHWCFp16; + } else { + MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(src_format) << " to " + << schema::EnumNameFormat(dst_format); + return nullptr; + } } } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc index 756fa678fb..9d614303ae 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc @@ -37,9 +37,9 @@ int ActivationCPUKernel::Init() { return RET_OK; } int ActivationCPUKernel::ReSize() { return RET_OK; } int ActivationCPUKernel::DoActivation(int task_id) { - auto input_addr = reinterpret_cast(inputs_.at(0)->Data()); - auto output_addr = reinterpret_cast(outputs_.at(0)->Data()); - auto length = inputs_.at(0)->ElementsNum(); + auto input_addr = reinterpret_cast(in_tensors_.at(0)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto length = in_tensors_.at(0)->ElementsNum(); int stride = UP_DIV(length, thread_count_); int count = MSMIN(stride, length - stride * task_id); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc index 0a21d38e82..ce3b9a34b0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc @@ -43,7 +43,7 @@ int AddNCPUKernel::Init() { return RET_OK; } int AddNCPUKernel::ReSize() { return RET_OK; } int AddNCPUKernel::AddNParallelRun(int thread_id) { - int count_per_thread = UP_DIV(elements_num_, opParameter->thread_num_); + int count_per_thread = UP_DIV(elements_num_, op_parameter_->thread_num_); int count = MSMIN(count_per_thread, elements_num_ - thread_id * count_per_thread); auto stride = count_per_thread * thread_id; auto ret = ElementAdd(in1_addr_ + stride, in2_addr_ + stride, out_addr_ + stride, count); @@ -60,29 +60,29 @@ int AddNCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - elements_num_ = inputs_[0]->ElementsNum(); - auto input0_data = reinterpret_cast(inputs_[0]->Data()); - auto input1_data = reinterpret_cast(inputs_[1]->Data()); - auto output_data = reinterpret_cast(outputs_[0]->Data()); - if (elements_num_ < opParameter->thread_num_) { + elements_num_ = in_tensors_[0]->ElementsNum(); + auto input0_data = reinterpret_cast(in_tensors_[0]->Data()); + auto input1_data = reinterpret_cast(in_tensors_[1]->Data()); + auto output_data = reinterpret_cast(out_tensors_[0]->Data()); + if (elements_num_ < op_parameter_->thread_num_) { ElementAdd(input0_data, input1_data, output_data, elements_num_); - for (int i = 2; i < inputs_.size(); ++i) { - ElementAdd(reinterpret_cast(inputs_[i]->Data()), output_data, output_data, elements_num_); + for (int i = 2; i < in_tensors_.size(); ++i) { + ElementAdd(reinterpret_cast(in_tensors_[i]->Data()), output_data, output_data, elements_num_); } return RET_OK; } in1_addr_ = input0_data; in2_addr_ = input1_data; out_addr_ = output_data; - ret = LiteBackendParallelLaunch(AddNLaunch, this, opParameter->thread_num_); + ret = LiteBackendParallelLaunch(AddNLaunch, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "addn launch fail!ret: " << ret; return RET_ERROR; } - for (size_t i = 2; i < inputs_.size(); ++i) { - in1_addr_ = reinterpret_cast(inputs_[i]->Data()); + for (size_t i = 2; i < in_tensors_.size(); ++i) { + in1_addr_ = reinterpret_cast(in_tensors_[i]->Data()); in2_addr_ = output_data; - ret = LiteBackendParallelLaunch(AddNLaunch, this, opParameter->thread_num_); + ret = LiteBackendParallelLaunch(AddNLaunch, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "addn launch fail!ret: " << ret << ", input index: " << i; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.cc index a9cb85147c..8e0badfa90 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.cc @@ -34,7 +34,7 @@ int ArgMinMaxCPUKernel::Init() { if (ret != RET_OK) { return ret; } - auto param = reinterpret_cast(opParameter); + auto param = reinterpret_cast(op_parameter_); param->data_type_ = kNumberTypeFloat32; if (!InferShapeDone()) { return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc index f3016e9972..3d43b7de84 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc @@ -53,7 +53,7 @@ int ArithmeticCPUKernel::Init() { int ArithmeticCPUKernel::ReSize() { FreeTileData(); - auto element_num = outputs_[0]->ElementsNum(); + auto element_num = out_tensors_[0]->ElementsNum(); tile_data0_ = new float[element_num]; tile_data1_ = new float[element_num]; @@ -61,10 +61,10 @@ int ArithmeticCPUKernel::ReSize() { } int ArithmeticCPUKernel::DoArithmetic(int task_id) { - auto input0_data = reinterpret_cast(inputs_[0]->Data()); - auto input1_data1 = reinterpret_cast(inputs_[1]->Data()); - auto output_data = reinterpret_cast(outputs_[0]->Data()); - auto element_num = outputs_[0]->ElementsNum(); + auto input0_data = reinterpret_cast(in_tensors_[0]->Data()); + auto input1_data1 = reinterpret_cast(in_tensors_[1]->Data()); + auto output_data = reinterpret_cast(out_tensors_[0]->Data()); + auto element_num = out_tensors_[0]->ElementsNum(); MS_ASSERT(thread_count_ != 0); int stride = UP_DIV(element_num, thread_count_); @@ -107,8 +107,8 @@ int ArithmeticCPUKernel::Run() { return ret; } if (arithmeticParameter_->broadcasting_) { - auto input_data0 = reinterpret_cast(inputs_[0]->Data()); - auto input_data1 = reinterpret_cast(inputs_[1]->Data()); + auto input_data0 = reinterpret_cast(in_tensors_[0]->Data()); + auto input_data1 = reinterpret_cast(in_tensors_[1]->Data()); TileDimensions(input_data0, input_data1, tile_data0_, tile_data1_, arithmeticParameter_); } int error_code = LiteBackendParallelLaunch(ArithmeticsRun, this, thread_count_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h index 1d3d2f0fa7..d165c182d3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h @@ -67,7 +67,7 @@ class ArithmeticCPUKernel : public LiteKernel { } break; case PrimitiveType_Add: - switch (arithmeticParameter_->activation_type_) { + switch (arithmeticParameter_->activation_type_) { case schema::ActivationType_RELU: arithmetic_run_ = ElementAddRelu; break; @@ -80,7 +80,7 @@ class ArithmeticCPUKernel : public LiteKernel { } break; case PrimitiveType_Sub: - switch (arithmeticParameter_->activation_type_) { + switch (arithmeticParameter_->activation_type_) { case schema::ActivationType_RELU: arithmetic_run_ = ElementSubRelu; break; @@ -93,7 +93,7 @@ class ArithmeticCPUKernel : public LiteKernel { } break; case PrimitiveType_Div: - switch (arithmeticParameter_->activation_type_) { + switch (arithmeticParameter_->activation_type_) { case schema::ActivationType_RELU: arithmetic_run_ = ElementDivRelu; break; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc index e69f0891e7..2e88cc2ca9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc @@ -35,7 +35,7 @@ int ArithmeticSelfCPUKernel::Init() { } int ArithmeticSelfCPUKernel::ReSize() { - data_size_ = inputs_[0]->ElementsNum(); + data_size_ = in_tensors_[0]->ElementsNum(); thread_sz_count_ = MSMIN(thread_count_, data_size_); thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_); return RET_OK; @@ -76,8 +76,8 @@ int ArithmeticSelfCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto input_tensor = inputs_.at(0); - auto out_tensor = outputs_.at(0); + auto input_tensor = in_tensors_.at(0); + auto out_tensor = out_tensors_.at(0); in_ptr_ = reinterpret_cast(input_tensor->Data()); out_ptr_ = reinterpret_cast(out_tensor->Data()); ret = LiteBackendParallelLaunch(ArithmeticSelfRuns, this, thread_sz_count_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.cc index e42193d9f9..580605df78 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.cc @@ -35,9 +35,7 @@ int BatchToSpaceCPUKernel::Init() { return ReSize(); } -int BatchToSpaceCPUKernel::ReSize() { - return BatchToSpaceBaseCPUKernel::ReSize(); -} +int BatchToSpaceCPUKernel::ReSize() { return BatchToSpaceBaseCPUKernel::ReSize(); } int BatchToSpaceCPUKernel::Run() { auto prepare_ret = Prepare(); @@ -45,13 +43,13 @@ int BatchToSpaceCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input = inputs_[0]; - auto output = outputs_[0]; + auto input = in_tensors_[0]; + auto output = out_tensors_[0]; const float *input_data = reinterpret_cast(input->Data()); float *output_data = reinterpret_cast(output->Data()); auto in_shape = input->shape(); auto out_shape = output->shape(); - BatchToSpaceParameter *param = reinterpret_cast(this->opParameter); + BatchToSpaceParameter *param = reinterpret_cast(this->op_parameter_); if (IsNoCrop()) { BatchToSpaceNoCropForNHWC(input_data, output_data, in_shape.data(), out_shape[0], param->block_shape_, diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc index 157c4b76c1..766a98de29 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc @@ -39,7 +39,7 @@ BatchnormCPUKernel::~BatchnormCPUKernel() { } int BatchnormCPUKernel::InitConstTensor() { - auto mean = inputs_[1]; + auto mean = in_tensors_[1]; mean_addr_ = reinterpret_cast(malloc(mean->ElementsNum() * sizeof(float))); if (mean_addr_ == nullptr) { MS_LOG(ERROR) << "Malloc buffer failed."; @@ -47,7 +47,7 @@ int BatchnormCPUKernel::InitConstTensor() { } memcpy(mean_addr_, mean->Data(), mean->ElementsNum() * sizeof(float)); - auto variance = inputs_[2]; + auto variance = in_tensors_[2]; var_addr_ = reinterpret_cast(malloc(variance->ElementsNum() * sizeof(float))); if (var_addr_ == nullptr) { MS_LOG(ERROR) << "Malloc buffer failed."; @@ -59,11 +59,11 @@ int BatchnormCPUKernel::InitConstTensor() { int BatchnormCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - auto input_shapes = inputs_[0]->shape(); + auto input_shapes = in_tensors_[0]->shape(); auto n_dim = input_shapes.size(); batchnorm_param_->channel_ = input_shapes[n_dim - 1]; batchnorm_param_->unit_ = 1; @@ -82,7 +82,7 @@ int BatchnormCPUKernel::Init() { } int BatchnormCPUKernel::ReSize() { - auto input_shapes = inputs_[0]->shape(); + auto input_shapes = in_tensors_[0]->shape(); batchnorm_param_->unit_ = 1; for (int i = 0; i < input_shapes.size() - 1; i++) { batchnorm_param_->unit_ *= input_shapes[i]; @@ -111,8 +111,8 @@ int BatchnormCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail! Ret error code: " << prepare_ret; return prepare_ret; } - in_addr_ = reinterpret_cast(inputs_.at(0)->Data()); - out_addr_ = reinterpret_cast(outputs_.at(0)->Data()); + in_addr_ = reinterpret_cast(in_tensors_.at(0)->Data()); + out_addr_ = reinterpret_cast(out_tensors_.at(0)->Data()); int ret = LiteBackendParallelLaunch(BatchNormRun, this, batchnorm_param_->op_parameter_.thread_num_); if (ret != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc index f071e98104..34e456edc1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc @@ -29,7 +29,7 @@ using mindspore::schema::PrimitiveType_BiasAdd; namespace mindspore::kernel { int BiasCPUKernel::ReSize() { - auto dims = inputs_[0]->shape(); + auto dims = in_tensors_[0]->shape(); MS_ASSERT(dims.size() <= 5); bias_param_->ndim_ = dims.size(); for (int i = 0; i < bias_param_->ndim_; i++) { @@ -47,10 +47,10 @@ int BiasCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto in = reinterpret_cast(inputs_.at(0)->Data()); - auto bias = reinterpret_cast(inputs_.at(1)->Data()); - auto out = reinterpret_cast(outputs_.at(0)->Data()); - size_t data_size = inputs_.at(0)->ElementsNum(); + auto in = reinterpret_cast(in_tensors_.at(0)->Data()); + auto bias = reinterpret_cast(in_tensors_.at(1)->Data()); + auto out = reinterpret_cast(out_tensors_.at(0)->Data()); + size_t data_size = in_tensors_.at(0)->ElementsNum(); auto tile_in = new float[data_size]; auto tile_bias = new float[data_size]; BroadcastAdd(in, bias, tile_in, tile_bias, out, data_size, bias_param_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc index 3d4c740c05..d9a4af1479 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc @@ -26,13 +26,13 @@ using mindspore::schema::PrimitiveType_BroadcastTo; namespace mindspore::kernel { int BroadcastToCPUKernel::ReSize() { - auto input_shape = inputs_[0]->shape(); + auto input_shape = in_tensors_[0]->shape(); for (size_t i = 0; i < input_shape.size(); ++i) { shape_info_.input_shape_[i] = input_shape[i]; } shape_info_.input_shape_size_ = static_cast(input_shape.size()); - auto output_shape = outputs_[0]->shape(); + auto output_shape = out_tensors_[0]->shape(); for (size_t i = 0; i < output_shape.size(); ++i) { shape_info_.output_shape_[i] = output_shape[i]; } @@ -54,8 +54,8 @@ int BroadcastToCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input_data = reinterpret_cast(inputs_.at(0)->Data()); - auto output_data = reinterpret_cast(outputs_.at(0)->Data()); + auto input_data = reinterpret_cast(in_tensors_.at(0)->Data()); + auto output_data = reinterpret_cast(out_tensors_.at(0)->Data()); return BroadcastTo(input_data, &shape_info_, output_data); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc index 1d8a1d5f9e..9a0d5e2655 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc @@ -48,24 +48,24 @@ int CastCPUKernel::Init() { } int CastCPUKernel::ReSize() { - data_num_ = inputs_[0]->ElementsNum(); + data_num_ = in_tensors_[0]->ElementsNum(); if (data_num_ == 0) { return RET_OK; } - opParameter->thread_num_ = MSMIN(opParameter->thread_num_, data_num_); - stride_ = UP_DIV(data_num_, opParameter->thread_num_); + op_parameter_->thread_num_ = MSMIN(op_parameter_->thread_num_, data_num_); + stride_ = UP_DIV(data_num_, op_parameter_->thread_num_); return RET_OK; } int CastCPUKernel::DoCast(int thread_id) { - auto input = inputs_.at(0); + auto input = in_tensors_.at(0); int data_num = MSMIN(stride_, data_num_ - thread_id * stride_); if (data_num <= 0) { return RET_OK; } auto offset = thread_id * stride_; - auto output_data = reinterpret_cast(outputs_.at(0)->Data()); + auto output_data = reinterpret_cast(out_tensors_.at(0)->Data()); switch (input->data_type()) { case kNumberTypeUInt8: Uint8ToFloat32(reinterpret_cast(input->Data()) + offset, output_data + offset, data_num); @@ -89,7 +89,7 @@ int CastCPUKernel::Run() { if (data_num_ == 0) { return RET_OK; } - return LiteBackendParallelLaunch(CastRun, this, opParameter->thread_num_); + return LiteBackendParallelLaunch(CastRun, this, op_parameter_->thread_num_); } kernel::LiteKernel *CpuCastFp32KernelCreator(const std::vector &inputs, diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/concat.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/concat.cc index b93348c191..ed83589366 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/concat.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/concat.cc @@ -48,19 +48,19 @@ int ConcatCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input_num = inputs_.size(); + auto input_num = in_tensors_.size(); std::vector inputs_addr(input_num, nullptr); std::vector inputs_output_shape(input_num + 1, nullptr); std::vector> shapes; for (size_t i = 0; i < input_num; ++i) { - inputs_addr[i] = inputs_[i]->Data(); - shapes.push_back(inputs_[i]->shape()); + inputs_addr[i] = in_tensors_[i]->Data(); + shapes.push_back(in_tensors_[i]->shape()); inputs_output_shape[i] = shapes[i].data(); } - auto output_shape = outputs_.at(0)->shape(); + auto output_shape = out_tensors_.at(0)->shape(); inputs_output_shape[input_num] = output_shape.data(); - auto output_addr = outputs_.at(0)->Data(); + auto output_addr = out_tensors_.at(0)->Data(); Concat(reinterpret_cast(inputs_addr.data()), input_num, axis_, inputs_output_shape.data(), output_shape.size(), output_addr); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc index 20d3668c94..1e9fe2bc57 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc @@ -52,7 +52,7 @@ int ConvolutionCPUKernel::InitWeightBias() { int pack_weight_size = oc_block_num * oc_block * ic4 * C4NUM * kernel_plane; // init weight - auto origin_weight = reinterpret_cast(inputs_.at(kWeightIndex)->Data()); + auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); packed_weight_ = reinterpret_cast(malloc(pack_weight_size * sizeof(float))); if (packed_weight_ == nullptr) { MS_LOG(ERROR) << "malloc packed weight failed."; @@ -68,11 +68,11 @@ int ConvolutionCPUKernel::InitWeightBias() { return RET_ERROR; } memset(bias_data_, 0, oc_block_num * oc_block * sizeof(float)); - if (inputs_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); memcpy(bias_data_, ori_bias, out_channel * sizeof(float)); } else { - MS_ASSERT(inputs_.size() == kInputSize1); + MS_ASSERT(in_tensors_.size() == kInputSize1); } return RET_OK; } @@ -120,11 +120,11 @@ int ConvolutionCPUKernel::InitTmpBuffer() { void ConvolutionCPUKernel::ConfigInputOutput() { // set output format - auto output_tensor = outputs_.at(kOutputIndex); + auto output_tensor = out_tensors_.at(kOutputIndex); output_tensor->SetFormat(schema::Format_NHWC); // select trans func for input - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto ret = CheckLayout(input_tensor); if (ret != RET_OK) { MS_LOG(ERROR) << "Check layout failed."; @@ -139,7 +139,7 @@ void ConvolutionCPUKernel::ConfigInputOutput() { int ConvolutionCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } auto ret = ConvolutionBaseCPUKernel::Init(); @@ -193,7 +193,7 @@ int ConvolutionCPUKernel::RunImpl(int task_id) { MS_LOG(ERROR) << "gemm_func is nullptr."; return RET_ERROR; } - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); ConvFp32(reinterpret_cast(nhwc4_input_), packed_input_, packed_weight_, reinterpret_cast(bias_data_), tmp_output_block_, output_addr, task_id, conv_param_, gemm_func_); return RET_OK; @@ -215,7 +215,7 @@ int ConvolutionCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto ori_input_data = input_tensor->Data(); int in_batch = conv_param_->input_batch_; int in_h = conv_param_->input_h_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.cc index d5f11a5104..94820b9480 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.cc @@ -68,14 +68,14 @@ void Convolution1x1CPUKernel::InitConv1x1MatmulParam() { } int Convolution1x1CPUKernel::InitConv1x1BiasWeight() { - if (inputs_.size() == 3) { + if (in_tensors_.size() == 3) { bias_data_ = malloc(matmul_param_->col_8_ * sizeof(float)); if (bias_data_ == nullptr) { MS_LOG(ERROR) << "Conv1x1 Malloc bias_ptr_ error!"; return RET_ERROR; } memset(bias_data_, 0, matmul_param_->col_8_ * sizeof(float)); - memcpy(bias_data_, inputs_[2]->Data(), conv_param_->output_channel_ * sizeof(float)); + memcpy(bias_data_, in_tensors_[2]->Data(), conv_param_->output_channel_ * sizeof(float)); } else { bias_data_ = nullptr; } @@ -86,7 +86,7 @@ int Convolution1x1CPUKernel::InitConv1x1BiasWeight() { return RET_ERROR; } memset(weight_ptr_, 0, matmul_param_->deep_ * matmul_param_->col_8_ * sizeof(float)); - RowMajor2Col8Major(reinterpret_cast(inputs_[1]->Data()), weight_ptr_, matmul_param_->col_, + RowMajor2Col8Major(reinterpret_cast(in_tensors_[1]->Data()), weight_ptr_, matmul_param_->col_, matmul_param_->deep_); return RET_OK; } @@ -103,7 +103,7 @@ int Convolution1x1CPUKernel::InitConv1x1Param() { memset(input_ptr_, 0, matmul_param_->row_ * matmul_param_->deep_ * sizeof(float)); } - thread_count_ = MSMIN(opParameter->thread_num_, UP_DIV(matmul_param_->col_, C8NUM)); + thread_count_ = MSMIN(op_parameter_->thread_num_, UP_DIV(matmul_param_->col_, C8NUM)); thread_stride_ = UP_DIV(UP_DIV(matmul_param_->col_, C8NUM), thread_count_) * C8NUM; pack_input_ = reinterpret_cast(malloc(matmul_param_->row_8_ * matmul_param_->deep_ * sizeof(float))); @@ -137,7 +137,7 @@ void Convolution1x1CPUKernel::Pre1x1Trans(float *src_input, float *src_output) { int Convolution1x1CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } ConvolutionBaseCPUKernel::Init(); @@ -204,8 +204,8 @@ int Convolution1x1CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto src_in = reinterpret_cast(inputs_[0]->Data()); - auto src_out = reinterpret_cast(outputs_[0]->Data()); + auto src_in = reinterpret_cast(in_tensors_[0]->Data()); + auto src_out = reinterpret_cast(out_tensors_[0]->Data()); for (int batch_index = 0; batch_index < conv_param_->input_batch_; batch_index++) { Pre1x1Trans(src_in + batch_index * conv_param_->input_h_ * conv_param_->input_w_ * conv_param_->input_channel_, diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc index 9bd829bb67..f676a9fd62 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc @@ -70,7 +70,7 @@ int Convolution3x3CPUKernel::InitWeightBias() { return RET_ERROR; } memset(transformed_filter_addr_, 0, transformed_size); - auto weight_data = reinterpret_cast(inputs_.at(kWeightIndex)->Data()); + auto weight_data = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); ProcessFilter(weight_data, transformed_filter_addr_, conv_param_, oc_block, oc_block_num); // init bias @@ -81,11 +81,11 @@ int Convolution3x3CPUKernel::InitWeightBias() { return RET_ERROR; } memset(bias_data_, 0, new_bias_size); - if (inputs_.size() == kInputSize2) { - auto ori_bias_addr = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias_addr = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); memcpy(bias_data_, ori_bias_addr, output_channel * sizeof(float)); } else { - MS_ASSERT(inputs_.size() == kInputSize1); + MS_ASSERT(in_tensors_.size() == kInputSize1); } return RET_OK; } @@ -149,10 +149,10 @@ int Convolution3x3CPUKernel::InitTmpBuffer() { } void Convolution3x3CPUKernel::ConfigInputOutput() { - auto output_tensor = outputs_.at(kOutputIndex); + auto output_tensor = out_tensors_.at(kOutputIndex); output_tensor->SetFormat(schema::Format_NHWC); - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto ret = CheckLayout(input_tensor); if (ret != RET_OK) { MS_LOG(ERROR) << "Check layout failed."; @@ -167,7 +167,7 @@ void Convolution3x3CPUKernel::ConfigInputOutput() { int Convolution3x3CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } auto ret = ConvolutionBaseCPUKernel::Init(); @@ -224,7 +224,7 @@ int Convolution3x3CPUKernel::RunImpl(int task_id) { MS_LOG(ERROR) << "gemm_func is nullptr."; return RET_ERROR; } - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); Conv3x3Fp32(reinterpret_cast(nhwc4_input_), transformed_filter_addr_, reinterpret_cast(bias_data_), output_addr, tmp_buffer_address_list_, task_id, conv_param_, gemm_func_); return RET_OK; @@ -246,7 +246,7 @@ int Convolution3x3CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto ori_input_data = input_tensor->Data(); int in_batch = conv_param_->input_batch_; int in_h = conv_param_->input_h_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc index f9ca15665d..49d5a92751 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc @@ -49,7 +49,7 @@ ConvolutionDepthwiseCPUKernel::~ConvolutionDepthwiseCPUKernel() { int ConvolutionDepthwiseCPUKernel::InitWeightBias() { // init weight: o, h, w, i; o == group, i == 1 - auto weight_tensor = inputs_[kWeightIndex]; + auto weight_tensor = in_tensors_[kWeightIndex]; auto origin_weight = reinterpret_cast(weight_tensor->Data()); int OC4 = UP_DIV(conv_param_->output_channel_, C4NUM); int pack_weight_size = C4NUM * OC4 * conv_param_->kernel_h_ * conv_param_->kernel_w_; @@ -70,8 +70,8 @@ int ConvolutionDepthwiseCPUKernel::InitWeightBias() { return RET_ERROR; } memset(bias_data_, 0, C4NUM * OC4 * sizeof(float)); - if (inputs_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); memcpy(bias_data_, ori_bias, conv_param_->output_channel_ * sizeof(float)); } @@ -106,7 +106,7 @@ int ConvolutionDepthwiseCPUKernel::InitBuffer() { int ConvolutionDepthwiseCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } // conv base init @@ -183,7 +183,7 @@ int ConvolutionDepthwiseCPUKernel::Run() { MS_LOG(ERROR) << "Only support input channel equals output channel."; return RET_ERROR; } - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto input_addr = reinterpret_cast(input_tensor->Data()); // pack input: to nhwc4 @@ -194,7 +194,7 @@ int ConvolutionDepthwiseCPUKernel::Run() { packed_input_ = input_addr; } - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); if (!need_align_) { packed_output_ = output_addr; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.cc index 798cd72a04..136459b543 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.cc @@ -29,7 +29,7 @@ using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::kernel { int ConvolutionDepthwise3x3CPUKernel::InitWeightBias() { // init weight: o, h, w, i; o == group, i == 1 - auto weight_tensor = inputs_[kWeightIndex]; + auto weight_tensor = in_tensors_[kWeightIndex]; auto origin_weight = reinterpret_cast(weight_tensor->Data()); // o h w 1 -> o/4 h w 1 4 int OC4 = UP_DIV(conv_param_->output_channel_, C4NUM); @@ -60,8 +60,8 @@ int ConvolutionDepthwise3x3CPUKernel::InitWeightBias() { return RET_ERROR; } memset(bias_data_, 0, C4NUM * OC4 * sizeof(float)); - if (inputs_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); memcpy(bias_data_, ori_bias, conv_param_->output_channel_ * sizeof(float)); } return RET_OK; @@ -101,7 +101,7 @@ int ConvolutionDepthwise3x3CPUKernel::InitBuffer() { int ConvolutionDepthwise3x3CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } // conv base init @@ -177,7 +177,7 @@ int ConvolutionDepthwise3x3CPUKernel::Run() { MS_LOG(ERROR) << "Only support input channel equals output channel."; return RET_ERROR; } - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto input_addr = reinterpret_cast(input_tensor->Data()); // pack input: to nhwc4 @@ -188,7 +188,7 @@ int ConvolutionDepthwise3x3CPUKernel::Run() { packed_input_ = input_addr; } - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); if (!need_align_) { packed_output_ = output_addr; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc index 15a493542a..3ebf4d3561 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.cc @@ -41,7 +41,7 @@ int ConvolutionSWCPUKernel::InitWeightBias() { int pack_weight_size = oc_block_num * oc_block * ic4 * C4NUM * kernel_plane; // ==================================init weight======================================// - auto origin_weight = reinterpret_cast(inputs_.at(kWeightIndex)->Data()); + auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); packed_weight_ = reinterpret_cast(malloc(pack_weight_size * sizeof(float))); if (packed_weight_ == nullptr) { MS_LOG(ERROR) << "malloc packed weight failed."; @@ -65,11 +65,11 @@ int ConvolutionSWCPUKernel::InitWeightBias() { return RET_ERROR; } memset(bias_data_, 0, oc_block_num * oc_block * sizeof(float)); - if (inputs_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); memcpy(bias_data_, ori_bias, out_channel * sizeof(float)); } else { - MS_ASSERT(inputs_.size() == kInputSize1); + MS_ASSERT(in_tensors_.size() == kInputSize1); } return RET_OK; } @@ -102,11 +102,11 @@ int ConvolutionSWCPUKernel::InitTmpBuffer() { void ConvolutionSWCPUKernel::ConfigInputOutput() { // set output format - auto output_tensor = outputs_.at(kOutputIndex); + auto output_tensor = out_tensors_.at(kOutputIndex); output_tensor->SetFormat(schema::Format_NHWC); // select trans func for input - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto ret = CheckLayout(input_tensor); if (ret != RET_OK) { MS_LOG(ERROR) << "Check layout failed."; @@ -116,7 +116,7 @@ void ConvolutionSWCPUKernel::ConfigInputOutput() { int ConvolutionSWCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } auto ret = ConvolutionBaseCPUKernel::Init(); @@ -171,7 +171,7 @@ int ConvolutionSWCPUKernel::ReSize() { } int ConvolutionSWCPUKernel::RunImpl(int task_id) { - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); ConvSWFp32(reinterpret_cast(nhwc4_input_), packed_weight_, reinterpret_cast(bias_data_), tmp_output_block_, output_addr, task_id, conv_param_, slidingWindow_param_); return RET_OK; @@ -193,7 +193,7 @@ int ConvolutionSWCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto ori_input_data = input_tensor->Data(); int in_batch = conv_param_->input_batch_; int in_h = conv_param_->input_h_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc index 95bbb960e1..d72a3b4dad 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc @@ -103,7 +103,7 @@ int ConvolutionWinogradCPUKernel::InitWeightBias() { MS_LOG(ERROR) << "Malloc filter matrix failed."; return RET_ERROR; } - auto weight_tensor = inputs_.at(kWeightIndex); + auto weight_tensor = in_tensors_.at(kWeightIndex); auto weight_data = reinterpret_cast(weight_tensor->Data()); WinogradFilterTransform(weight_data, trans_weight_, kernel_unit_, input_unit_, conv_param_, oc_block); @@ -111,11 +111,11 @@ int ConvolutionWinogradCPUKernel::InitWeightBias() { size_t new_bias_size = oc4 * C4NUM * sizeof(float); bias_data_ = reinterpret_cast(malloc(new_bias_size)); memset(bias_data_, 0, new_bias_size); - if (inputs_.size() == kInputSize2) { - auto ori_bias_addr = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias_addr = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); memcpy(bias_data_, ori_bias_addr, output_channel * sizeof(float)); } else { - MS_ASSERT(inputs_.size() == kInputSize1); + MS_ASSERT(in_tensors_.size() == kInputSize1); } return RET_OK; } @@ -218,13 +218,13 @@ int ConvolutionWinogradCPUKernel::InitTmpBuffer() { } int ConvolutionWinogradCPUKernel::ConfigInputOutput() { - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto ret = CheckLayout(input_tensor); if (ret != RET_OK) { MS_LOG(ERROR) << "Check layout failed."; return RET_ERROR; } - auto output_tensor = outputs_.at(kOutputIndex); + auto output_tensor = out_tensors_.at(kOutputIndex); output_tensor->SetFormat(schema::Format_NHWC); // choose input transformer function (4x4 unit or 8x8 unit) @@ -248,7 +248,7 @@ int ConvolutionWinogradCPUKernel::ConfigInputOutput() { int ConvolutionWinogradCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } auto ret = ConvolutionBaseCPUKernel::Init(); @@ -325,7 +325,7 @@ int ConvolutionWinogradCPUKernel::RunImpl(int task_id) { MS_LOG(ERROR) << "gemm_func is nullptr."; return RET_ERROR; } - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); ConvWinogardFp32(reinterpret_cast(nhwc4_input_), reinterpret_cast(trans_weight_->GetData()), reinterpret_cast(bias_data_), output_addr, tmp_buffer_address_list_, task_id, conv_param_, input_trans_func_, output_trans_func_, gemm_func_); @@ -348,7 +348,7 @@ int ConvolutionWinogradCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto ori_input_data = input_tensor->Data(); int in_batch = conv_param_->input_batch_; int in_h = conv_param_->input_h_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/crop.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/crop.cc index 6b529637e2..a045e796b7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/crop.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/crop.cc @@ -43,12 +43,12 @@ int CropLaunch(int thread_id, LiteParallelGroupEnv *penv, void *cdata) { int CropCPUKernel::Init() { return RET_OK; } int CropCPUKernel::CropParallelRun(int thread_id) { - auto input = inputs_[0]; - auto output = outputs_[0]; + auto input = in_tensors_[0]; + auto output = out_tensors_[0]; float *input_data = reinterpret_cast(input->Data()); float *output_data = reinterpret_cast(output->Data()); Crop4D(input_data, output_data, input->shape().data(), output->shape().data(), - reinterpret_cast(opParameter)); + reinterpret_cast(op_parameter_)); return RET_OK; } @@ -58,9 +58,9 @@ int CropCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input = inputs_[0]; - auto output = outputs_[0]; - auto param = reinterpret_cast(opParameter); + auto input = in_tensors_[0]; + auto output = out_tensors_[0]; + auto param = reinterpret_cast(op_parameter_); if (output->shape()[1] < param->op_parameter_.thread_num_) { float *input_data = reinterpret_cast(input->Data()); float *output_data = reinterpret_cast(output->Data()); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc index a9e6b15d55..1e6fcf3663 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc @@ -64,14 +64,14 @@ int DeConvolutionCPUKernel::ReSize() { } int DeConvolutionCPUKernel::InitWeightBias() { - if (inputs_.size() == 3) { + if (in_tensors_.size() == 3) { bias_data_ = malloc(UP_ROUND(conv_param_->output_channel_, C4NUM) * sizeof(float)); if (bias_data_ == nullptr) { MS_LOG(ERROR) << "deconv malloc bias_data_ error!"; return RET_ERROR; } memset(bias_data_, 0, UP_ROUND(conv_param_->output_channel_, C4NUM) * sizeof(float)); - memcpy(bias_data_, inputs_[2]->Data(), conv_param_->output_channel_ * sizeof(float)); + memcpy(bias_data_, in_tensors_[2]->Data(), conv_param_->output_channel_ * sizeof(float)); } else { bias_data_ = nullptr; } @@ -84,7 +84,7 @@ int DeConvolutionCPUKernel::InitWeightBias() { return RET_ERROR; } memset(weight_ptr_, 0, weight_pack_size); - PackNHWCToC8HWN8Fp32(reinterpret_cast(inputs_[1]->Data()), weight_ptr_, conv_param_->input_channel_, + PackNHWCToC8HWN8Fp32(reinterpret_cast(in_tensors_[1]->Data()), weight_ptr_, conv_param_->input_channel_, kernel_plane_, conv_param_->output_channel_); return RET_OK; } @@ -100,7 +100,7 @@ int DeConvolutionCPUKernel::InitParam() { matmul_param_->row_8_ = UP_ROUND(matmul_param_->row_, C8NUM); matmul_param_->col_8_ = UP_ROUND(conv_param_->output_channel_, C8NUM) * kernel_plane_; - thread_count_ = MSMIN(opParameter->thread_num_, UP_DIV(conv_param_->output_channel_, C8NUM)); + thread_count_ = MSMIN(op_parameter_->thread_num_, UP_DIV(conv_param_->output_channel_, C8NUM)); thread_stride_ = UP_DIV(UP_DIV(conv_param_->output_channel_, C8NUM), thread_count_); pack_input_ = reinterpret_cast(malloc(matmul_param_->row_8_ * matmul_param_->deep_ * sizeof(float))); @@ -174,7 +174,7 @@ int DeConvolutionCPUKernel::DoPostFunc(int task_id) { int DeConvolutionCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } ConvolutionBaseCPUKernel::Init(); @@ -199,8 +199,8 @@ int DeConvolutionCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - float *src_in = reinterpret_cast(inputs_[0]->Data()); - float *src_out = reinterpret_cast(outputs_[0]->Data()); + float *src_in = reinterpret_cast(in_tensors_[0]->Data()); + float *src_out = reinterpret_cast(out_tensors_[0]->Data()); for (int batch_index = 0; batch_index < conv_param_->input_batch_; batch_index++) { input_ptr_ = src_in + batch_index * input_plane_ * conv_param_->input_channel_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc index 77cc8ac22e..fc5177fb09 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc @@ -46,14 +46,14 @@ DeconvolutionDepthwiseCPUKernel::~DeconvolutionDepthwiseCPUKernel() { } int DeconvolutionDepthwiseCPUKernel::InitSlideParam() { - conv_param_->input_batch_ = outputs_.front()->shape().at(kNHWC_N); - conv_param_->input_h_ = outputs_.front()->shape().at(kNHWC_H); - conv_param_->input_w_ = outputs_.front()->shape().at(kNHWC_W); - conv_param_->input_channel_ = outputs_.front()->shape().at(kNHWC_C); - conv_param_->output_batch_ = inputs_.front()->shape().at(kNHWC_N); - conv_param_->output_h_ = inputs_.front()->shape().at(kNHWC_H); - conv_param_->output_w_ = inputs_.front()->shape().at(kNHWC_W); - conv_param_->output_channel_ = inputs_.front()->shape().at(kNHWC_C); + conv_param_->input_batch_ = out_tensors_.front()->shape().at(kNHWC_N); + conv_param_->input_h_ = out_tensors_.front()->shape().at(kNHWC_H); + conv_param_->input_w_ = out_tensors_.front()->shape().at(kNHWC_W); + conv_param_->input_channel_ = out_tensors_.front()->shape().at(kNHWC_C); + conv_param_->output_batch_ = in_tensors_.front()->shape().at(kNHWC_N); + conv_param_->output_h_ = in_tensors_.front()->shape().at(kNHWC_H); + conv_param_->output_w_ = in_tensors_.front()->shape().at(kNHWC_W); + conv_param_->output_channel_ = in_tensors_.front()->shape().at(kNHWC_C); // init sliding window param sliding_ = new SlidingWindowParam; @@ -63,7 +63,7 @@ int DeconvolutionDepthwiseCPUKernel::InitSlideParam() { int DeconvolutionDepthwiseCPUKernel::InitWeightBias() { // init weight: o, h, w, i; o == group, i == 1 - auto weight_tensor = inputs_[kWeightIndex]; + auto weight_tensor = in_tensors_[kWeightIndex]; auto origin_weight = reinterpret_cast(weight_tensor->Data()); int OC4 = UP_DIV(conv_param_->output_channel_, C4NUM); int pack_weight_size = C4NUM * OC4 * conv_param_->kernel_h_ * conv_param_->kernel_w_; @@ -84,8 +84,8 @@ int DeconvolutionDepthwiseCPUKernel::InitWeightBias() { return RET_ERROR; } memset(bias_data_, 0, C4NUM * OC4 * sizeof(float)); - if (inputs_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); memcpy(bias_data_, ori_bias, conv_param_->output_channel_ * sizeof(float)); } @@ -121,7 +121,7 @@ int DeconvolutionDepthwiseCPUKernel::InitBuffer() { int DeconvolutionDepthwiseCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } InitSlideParam(); @@ -192,7 +192,7 @@ int DeconvolutionDepthwiseCPUKernel::Run() { MS_LOG(ERROR) << "Only support input channel equals output channel."; return RET_ERROR; } - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto input_addr = reinterpret_cast(input_tensor->Data()); // pack input: to nhwc4 @@ -203,9 +203,9 @@ int DeconvolutionDepthwiseCPUKernel::Run() { packed_input_ = input_addr; } - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); if (!need_align_) { - memset(output_addr, 0, outputs_.at(kOutputIndex)->ElementsNum() * sizeof(float)); + memset(output_addr, 0, out_tensors_.at(kOutputIndex)->ElementsNum() * sizeof(float)); packed_output_ = output_addr; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.cc index f1768779f3..fb31ffeab0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.cc @@ -35,7 +35,7 @@ int DepthToSpaceCPUKernel::Init() { if (ret != RET_OK) { return ret; } - DepthToSpaceParameter *param = reinterpret_cast(opParameter); + DepthToSpaceParameter *param = reinterpret_cast(op_parameter_); param->data_type_size_ = sizeof(float); if (!InferShapeDone()) { return RET_OK; @@ -52,12 +52,12 @@ int DepthToSpaceCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input = inputs_[0]; - auto output = outputs_[0]; + auto input = in_tensors_[0]; + auto output = out_tensors_[0]; const float *input_data = reinterpret_cast(input->Data()); float *output_data = reinterpret_cast(output->Data()); auto in_shape = input->shape(); - DepthToSpaceParameter *param = reinterpret_cast(opParameter); + DepthToSpaceParameter *param = reinterpret_cast(op_parameter_); if (input->GetFormat() == schema::Format_NHWC) { DepthToSpaceForNHWC(input_data, output_data, in_shape.data(), param); return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc index dc5144b0c7..d8c90fefdd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc @@ -26,7 +26,7 @@ using mindspore::schema::PrimitiveType_Elu; namespace mindspore::kernel { int EluCPUKernel::Init() { - elu_parameter_ = reinterpret_cast(opParameter); + elu_parameter_ = reinterpret_cast(op_parameter_); elu_parameter_->thread_num_ = thread_count_; if (!InferShapeDone()) { @@ -37,7 +37,7 @@ int EluCPUKernel::Init() { } int EluCPUKernel::ReSize() { - elu_parameter_->in_size_ = inputs_.front()->ElementsNum(); + elu_parameter_->in_size_ = in_tensors_.front()->ElementsNum(); return RET_OK; } @@ -59,8 +59,8 @@ int EluCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - input_addr = reinterpret_cast(inputs_.front()->Data()); - output_addr = reinterpret_cast(outputs_.front()->Data()); + input_addr = reinterpret_cast(in_tensors_.front()->Data()); + output_addr = reinterpret_cast(out_tensors_.front()->Data()); auto ret = LiteBackendParallelLaunch(EluRun, this, elu_parameter_->thread_num_); if (ret != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc index d41590d06d..c4688822a1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc @@ -26,7 +26,7 @@ using mindspore::schema::PrimitiveType_EmbeddingLookup; namespace mindspore::kernel { int EmbeddingLookupCPUKernel::Init() { - embedding_lookup_parameter_ = reinterpret_cast(opParameter); + embedding_lookup_parameter_ = reinterpret_cast(op_parameter_); embedding_lookup_parameter_->thread_num = thread_count_; if (!InferShapeDone()) { @@ -36,17 +36,17 @@ int EmbeddingLookupCPUKernel::Init() { } int EmbeddingLookupCPUKernel::ReSize() { - embedding_lookup_parameter_->ids_size_ = inputs_.back()->ElementsNum(); + embedding_lookup_parameter_->ids_size_ = in_tensors_.back()->ElementsNum(); embedding_lookup_parameter_->layer_size_ = 1; - auto in_shape = inputs_.front()->shape(); + auto in_shape = in_tensors_.front()->shape(); for (int i = 1; i < in_shape.size(); ++i) { embedding_lookup_parameter_->layer_size_ *= in_shape[i]; } embedding_lookup_parameter_->layer_num_ = 0; - for (int i = 0; i < inputs_.size() - 1; ++i) { - embedding_lookup_parameter_->layer_num_ += inputs_[i]->shape()[0]; + for (int i = 0; i < in_tensors_.size() - 1; ++i) { + embedding_lookup_parameter_->layer_num_ += in_tensors_[i]->shape()[0]; } if (input_addr_ != nullptr) { @@ -112,13 +112,13 @@ int EmbeddingLookupCPUKernel::Run() { return prepare_ret; } int dest_loc = 0; - for (int i = 0; i < inputs_.size() - 1; i++) { - auto input_t = reinterpret_cast(inputs_.at(i)->Data()); - memcpy(input_addr_ + dest_loc, input_t, sizeof(float) * inputs_.at(i)->ElementsNum()); - dest_loc += inputs_.at(i)->ElementsNum(); + for (int i = 0; i < in_tensors_.size() - 1; i++) { + auto input_t = reinterpret_cast(in_tensors_.at(i)->Data()); + memcpy(input_addr_ + dest_loc, input_t, sizeof(float) * in_tensors_.at(i)->ElementsNum()); + dest_loc += in_tensors_.at(i)->ElementsNum(); } - output_addr_ = reinterpret_cast(outputs_.front()->Data()); - ids_addr_ = reinterpret_cast(inputs_.back()->Data()); + output_addr_ = reinterpret_cast(out_tensors_.front()->Data()); + ids_addr_ = reinterpret_cast(in_tensors_.back()->Data()); auto ret = LiteBackendParallelLaunch(EmbeddingLookupRun, this, embedding_lookup_parameter_->thread_num); if (ret != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc index 98bf98d2ca..cdb6191697 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc @@ -30,7 +30,7 @@ using mindspore::schema::PrimitiveType_ExpandDims; namespace mindspore::kernel { int ExpandDimsCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } int ret = ReSize(); @@ -38,7 +38,7 @@ int ExpandDimsCPUKernel::Init() { } int ExpandDimsCPUKernel::ReSize() { - data_size_ = inputs_.at(0)->ElementsNum(); + data_size_ = in_tensors_.at(0)->ElementsNum(); thread_sz_count_ = MSMIN(thread_count_, data_size_); thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_); return RET_OK; @@ -74,8 +74,8 @@ int ExpandDimsCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - in_ptr_ = reinterpret_cast(inputs_.at(0)->Data()); - out_ptr_ = reinterpret_cast(outputs_.at(0)->Data()); + in_ptr_ = reinterpret_cast(in_tensors_.at(0)->Data()); + out_ptr_ = reinterpret_cast(out_tensors_.at(0)->Data()); auto ret = LiteBackendParallelLaunch(ExpandDimsRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "ExpandDimsRun error error_code[" << ret << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc index 32866a54f2..e3e103d6dd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc @@ -36,10 +36,10 @@ constexpr int kOutputNum = 1; int FillCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - data_size_ = outputs_.front()->ElementsNum(); + data_size_ = out_tensors_.front()->ElementsNum(); thread_sz_count_ = MSMIN(thread_count_, data_size_); thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_); return RET_OK; @@ -77,8 +77,8 @@ int FillCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto fillData = inputs_.at(inputs_.size() - 1); - auto output = outputs_.front(); + auto fillData = in_tensors_.at(in_tensors_.size() - 1); + auto output = out_tensors_.front(); auto fill_data = reinterpret_cast(fillData->Data()); src_data_ = fill_data[0]; out_ptr_ = reinterpret_cast(output->Data()); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc index 30e1ede1ed..a0356f9e0c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc @@ -29,7 +29,7 @@ using mindspore::schema::PrimitiveType_Flatten; namespace mindspore::kernel { int FlattenCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } @@ -38,7 +38,7 @@ int FlattenCPUKernel::Init() { } int FlattenCPUKernel::ReSize() { - auto output_shape = outputs_[0]->shape(); + auto output_shape = out_tensors_[0]->shape(); flatten_param_->size = sizeof(float); for (int i = 0; i < output_shape.size(); i++) { flatten_param_->size *= output_shape[i]; @@ -52,8 +52,8 @@ int FlattenCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input = reinterpret_cast(inputs_[0]->Data()); - auto output = reinterpret_cast(outputs_[0]->Data()); + auto input = reinterpret_cast(in_tensors_[0]->Data()); + auto output = reinterpret_cast(out_tensors_[0]->Data()); Flatten(input, output, flatten_param_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.h b/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.h index 944259a313..beb74b59e4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.h @@ -33,7 +33,7 @@ class FlattenCPUKernel : public LiteKernel { : LiteKernel(parameter, inputs, outputs, ctx, primitive) { flatten_param_ = reinterpret_cast(parameter); } - ~FlattenCPUKernel() override = default;; + ~FlattenCPUKernel() override = default; int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.cc index e5a17176cf..1299b3e932 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.cc @@ -45,12 +45,12 @@ int FullconnectionCPUKernel::ReSize() { return RET_OK; } int FullconnectionCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - fc_param_->row_ = (inputs_[0]->shape())[0]; - fc_param_->col_ = (inputs_[1]->shape())[0]; - fc_param_->deep_ = (inputs_[1]->shape())[1]; + fc_param_->row_ = (in_tensors_[0]->shape())[0]; + fc_param_->col_ = (in_tensors_[1]->shape())[0]; + fc_param_->deep_ = (in_tensors_[1]->shape())[1]; fc_param_->row_8_ = UP_ROUND(fc_param_->row_, 8); fc_param_->col_8_ = UP_ROUND(fc_param_->col_, 8); @@ -60,8 +60,8 @@ int FullconnectionCPUKernel::Init() { bias_ptr_ = reinterpret_cast(malloc(fc_param_->col_8_ * sizeof(float))); memset(bias_ptr_, 0, fc_param_->col_8_ * sizeof(float)); - if (inputs_.size() == 3) { - memcpy(bias_ptr_, inputs_[2]->Data(), fc_param_->col_ * sizeof(float)); + if (in_tensors_.size() == 3) { + memcpy(bias_ptr_, in_tensors_[2]->Data(), fc_param_->col_ * sizeof(float)); } a_c8_ptr_ = reinterpret_cast(malloc(fc_param_->row_8_ * fc_param_->deep_ * sizeof(float))); @@ -75,7 +75,7 @@ int FullconnectionCPUKernel::Init() { return RET_MEMORY_FAILED; } memset(b_r8_ptr_, 0, fc_param_->col_8_ * fc_param_->deep_ * sizeof(float)); - RowMajor2Col8Major(reinterpret_cast(inputs_[1]->Data()), b_r8_ptr_, fc_param_->col_, fc_param_->deep_); + RowMajor2Col8Major(reinterpret_cast(in_tensors_[1]->Data()), b_r8_ptr_, fc_param_->col_, fc_param_->deep_); c_r8x8_ptr_ = reinterpret_cast(malloc(fc_param_->row_8_ * fc_param_->col_8_ * sizeof(float))); if (c_r8x8_ptr_ == nullptr) { @@ -114,8 +114,8 @@ int FullconnectionCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto a_ptr = reinterpret_cast(inputs_.at(0)->Data()); - auto output_ptr = reinterpret_cast(outputs_.at(0)->Data()); + auto a_ptr = reinterpret_cast(in_tensors_.at(0)->Data()); + auto output_ptr = reinterpret_cast(out_tensors_.at(0)->Data()); RowMajor2Col8Major(a_ptr, a_c8_ptr_, fc_param_->row_, fc_param_->deep_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc index 9f39c30eae..9b5e28d870 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc @@ -47,7 +47,7 @@ FusedBatchnormCPUKernel::~FusedBatchnormCPUKernel() { } int FusedBatchnormCPUKernel::InitConstTensor() { - auto scale = inputs_[1]; + auto scale = in_tensors_[1]; scale_addr_ = reinterpret_cast(malloc(scale->ElementsNum() * sizeof(float))); if (scale_addr_ == nullptr) { MS_LOG(ERROR) << "Malloc buffer failed."; @@ -55,7 +55,7 @@ int FusedBatchnormCPUKernel::InitConstTensor() { } memcpy(scale_addr_, scale->Data(), scale->ElementsNum() * sizeof(float)); - auto offset = inputs_[2]; + auto offset = in_tensors_[2]; offset_addr_ = reinterpret_cast(malloc(offset->ElementsNum() * sizeof(float))); if (offset_addr_ == nullptr) { MS_LOG(ERROR) << "Malloc buffer failed."; @@ -63,7 +63,7 @@ int FusedBatchnormCPUKernel::InitConstTensor() { } memcpy(offset_addr_, offset->Data(), offset->ElementsNum() * sizeof(float)); - auto mean = inputs_[3]; + auto mean = in_tensors_[3]; mean_addr_ = reinterpret_cast(malloc(mean->ElementsNum() * sizeof(float))); if (mean_addr_ == nullptr) { MS_LOG(ERROR) << "Malloc buffer failed."; @@ -71,7 +71,7 @@ int FusedBatchnormCPUKernel::InitConstTensor() { } memcpy(mean_addr_, mean->Data(), mean->ElementsNum() * sizeof(float)); - auto variance = inputs_[4]; + auto variance = in_tensors_[4]; var_addr_ = reinterpret_cast(malloc(variance->ElementsNum() * sizeof(float))); if (var_addr_ == nullptr) { MS_LOG(ERROR) << "Malloc buffer failed."; @@ -83,10 +83,10 @@ int FusedBatchnormCPUKernel::InitConstTensor() { int FusedBatchnormCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - auto input_shapes = inputs_[0]->shape(); + auto input_shapes = in_tensors_[0]->shape(); auto n_dim = input_shapes.size(); batchnorm_param_->channel_ = input_shapes[n_dim - 1]; batchnorm_param_->unit_ = 1; @@ -105,7 +105,7 @@ int FusedBatchnormCPUKernel::Init() { } int FusedBatchnormCPUKernel::ReSize() { - auto input_shapes = inputs_[0]->shape(); + auto input_shapes = in_tensors_[0]->shape(); batchnorm_param_->unit_ = 1; for (int i = 0; i < input_shapes.size() - 1; i++) { batchnorm_param_->unit_ *= input_shapes[i]; @@ -134,8 +134,8 @@ int FusedBatchnormCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail! Ret error code: " << prepare_ret; return prepare_ret; } - in_addr_ = reinterpret_cast(inputs_.at(0)->Data()); - out_addr_ = reinterpret_cast(outputs_.at(0)->Data()); + in_addr_ = reinterpret_cast(in_tensors_.at(0)->Data()); + out_addr_ = reinterpret_cast(out_tensors_.at(0)->Data()); int ret = LiteBackendParallelLaunch(FusedBatchNormRun, this, batchnorm_param_->op_parameter_.thread_num_); if (ret != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc index d75c836d74..add33c961e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc @@ -30,17 +30,17 @@ using mindspore::schema::PrimitiveType_Gather; namespace mindspore::kernel { int GatherCPUKernel::Init() { - axis_ = (reinterpret_cast(opParameter))->axis_; - batchDims_ = (reinterpret_cast(opParameter))->batchDims_; + axis_ = (reinterpret_cast(op_parameter_))->axis_; + batchDims_ = (reinterpret_cast(op_parameter_))->batchDims_; return RET_OK; } int GatherCPUKernel::ReSize() { return RET_OK; } int GatherCPUKernel::DoGather(int task_id) { - auto input_tensor = inputs_.at(0); - auto indices_tensor = inputs_.at(1); - auto out_tensor = outputs_.at(0); + auto input_tensor = in_tensors_.at(0); + auto indices_tensor = in_tensors_.at(1); + auto out_tensor = out_tensors_.at(0); auto input_ptr = reinterpret_cast(input_tensor->Data()); auto indices_ptr = reinterpret_cast(indices_tensor->Data()); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc index 3d1c2a245e..e76585980e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc @@ -39,10 +39,10 @@ GatherNdCPUKernel::~GatherNdCPUKernel() { int GatherNdCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - auto indices_tensor = inputs_.at(1); + auto indices_tensor = in_tensors_.at(1); auto indices_shape = indices_tensor->shape(); int indices_rank = indices_shape.size(); count_ = 1; @@ -64,9 +64,9 @@ int GatherNdCPUKernel::Init() { } int GatherNdCPUKernel::ReSize() { - auto in_shape = inputs_.front()->shape(); + auto in_shape = in_tensors_.front()->shape(); int in_rank = in_shape.size(); - auto indices_tensor = inputs_.at(1); + auto indices_tensor = in_tensors_.at(1); auto indices_shape = indices_tensor->shape(); int indices_rank = indices_shape.size(); int idx_lastshape = indices_shape[indices_rank - 1]; @@ -121,8 +121,8 @@ int GatherNdCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - in_ptr_ = reinterpret_cast(inputs_.front()->Data()); - out_ptr_ = reinterpret_cast(outputs_.front()->Data()); + in_ptr_ = reinterpret_cast(in_tensors_.front()->Data()); + out_ptr_ = reinterpret_cast(out_tensors_.front()->Data()); auto ret = LiteBackendParallelLaunch(GatherNdRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "gatherNd error error_code[" << ret << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc index 3a62427a29..1695e3ae03 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc @@ -34,8 +34,8 @@ int LocalResponseNormCPUKernel::Init() { return RET_OK; } int LocalResponseNormCPUKernel::ReSize() { return RET_OK; } int LocalResponseNormCPUKernel::DoLocalResponseNorm(int task_id) { - auto input_tensor = inputs_.front(); - auto out_tensor = outputs_.front(); + auto input_tensor = in_tensors_.front(); + auto out_tensor = out_tensors_.front(); auto input_ptr = reinterpret_cast(input_tensor->Data()); auto output_ptr = reinterpret_cast(out_tensor->Data()); @@ -55,7 +55,7 @@ int LocalResponseNormCPUKernel::DoLocalResponseNorm(int task_id) { output_ptr += stride * task_id * channel; auto error_code = LocalResponseNorm(input_ptr, count, channel, output_ptr, - reinterpret_cast(opParameter)); + reinterpret_cast(op_parameter_)); if (error_code != RET_OK) { MS_LOG(ERROR) << "DoLocalResponseNorm error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc index ea01c6a93a..a5b800b671 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc @@ -28,14 +28,14 @@ using mindspore::schema::PrimitiveType_Lstm; namespace mindspore::kernel { int LstmCPUKernel::InitParam() { - auto input = inputs_.front(); + auto input = in_tensors_.front(); MS_ASSERT(input != nullptr); std::vector in_shape = input->shape(); lstm_parm_->seq_len_ = in_shape[0]; lstm_parm_->batch_ = in_shape[1]; lstm_parm_->input_size_ = in_shape[2]; - auto weight_i = inputs_[1]; + auto weight_i = in_tensors_[1]; MS_ASSERT(weight_i != nullptr); std::vector w_shape = weight_i->shape(); lstm_parm_->hidden_size_ = w_shape[1] / 4; @@ -57,7 +57,7 @@ int LstmCPUKernel::InitBuffer() { int LstmCPUKernel::InitWeightBias() { // copy weight_i and weight_h - auto weight_i = inputs_.at(1); + auto weight_i = in_tensors_.at(1); MS_ASSERT(weight_i != nullptr); weight_i_ptr_ = reinterpret_cast(malloc(weight_i->ElementsNum() * sizeof(float))); if (weight_i_ptr_ == nullptr) { @@ -66,7 +66,7 @@ int LstmCPUKernel::InitWeightBias() { } memcpy(weight_i_ptr_, weight_i->Data(), weight_i->ElementsNum() * sizeof(float)); - auto weight_h = inputs_.at(2); + auto weight_h = in_tensors_.at(2); MS_ASSERT(weight_h != nullptr); weight_h_ptr_ = reinterpret_cast(malloc(weight_h->ElementsNum() * sizeof(float))); if (weight_h_ptr_ == nullptr) { @@ -83,7 +83,7 @@ int LstmCPUKernel::InitWeightBias() { return RET_ERROR; } - auto bias_data = reinterpret_cast(inputs_.at(3)->Data()); + auto bias_data = reinterpret_cast(in_tensors_.at(3)->Data()); int state_bias_offset = 4 * lstm_parm_->hidden_size_; for (int i = 0; i < state_bias_offset; i++) { bias_ptr_[i] = bias_data[i] + bias_data[i + state_bias_offset]; @@ -100,7 +100,7 @@ int LstmCPUKernel::InitWeightBias() { int LstmCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } auto ret = InitParam(); @@ -146,21 +146,21 @@ int LstmCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input = inputs_.at(kInputIndex); + auto input = in_tensors_.at(kInputIndex); MS_ASSERT(input != nullptr); - auto hidden_state = inputs_.at(4); + auto hidden_state = in_tensors_.at(4); MS_ASSERT(hidden_state != nullptr); - auto cell_state = inputs_.at(5); + auto cell_state = in_tensors_.at(5); MS_ASSERT(cell_state != nullptr); - auto output = outputs_.at(0); + auto output = out_tensors_.at(0); MS_ASSERT(output != nullptr); auto input_ptr = reinterpret_cast(input->Data()); auto output_ptr = reinterpret_cast(output->Data()); - auto output_hidden_state = outputs_[1]; + auto output_hidden_state = out_tensors_[1]; memcpy(output_hidden_state->Data(), hidden_state->Data(), hidden_state->ElementsNum() * sizeof(float)); - auto output_cell_state = outputs_[2]; + auto output_cell_state = out_tensors_[2]; memcpy(output_cell_state->Data(), cell_state->Data(), cell_state->ElementsNum() * sizeof(float)); Lstm(output_ptr, input_ptr, weight_i_ptr_, weight_h_ptr_, bias_ptr_, diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.h b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.h index 8fdd4dd8d9..bb6221c1f8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.h @@ -28,7 +28,7 @@ class LstmCPUKernel : public LiteKernel { const std::vector &outputs, const lite::Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { - lstm_parm_ = reinterpret_cast(opParameter); + lstm_parm_ = reinterpret_cast(op_parameter_); } ~LstmCPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.cc index 61eea4fd40..dc515468e0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.cc @@ -34,12 +34,12 @@ int MatmulCPUKernel::ReSize() { return RET_OK; } int MatmulCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } int batch = 1; - auto a_shape = inputs_[0]->shape(); - auto c_shape = outputs_[0]->shape(); + auto a_shape = in_tensors_[0]->shape(); + auto c_shape = out_tensors_[0]->shape(); for (int i = 0; i < a_shape.size() - 2; ++i) { batch *= a_shape[i]; } @@ -97,9 +97,9 @@ int MatmulCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto a_ptr = reinterpret_cast(inputs_[0]->Data()); - auto b_ptr = reinterpret_cast(inputs_[1]->Data()); - auto c_ptr = reinterpret_cast(outputs_[0]->Data()); + auto a_ptr = reinterpret_cast(in_tensors_[0]->Data()); + auto b_ptr = reinterpret_cast(in_tensors_[1]->Data()); + auto c_ptr = reinterpret_cast(out_tensors_[0]->Data()); auto a_stride = params_->row_ * params_->deep_; auto b_stride = params_->deep_ * params_->col_; auto c_stride = params_->row_ * params_->col_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc index 74a77bf579..8bcc61f2f1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc @@ -33,8 +33,8 @@ int Nchw2NhwcCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input = inputs_[0]; - auto output = outputs_[0]; + auto input = in_tensors_[0]; + auto output = out_tensors_[0]; if (input->shape().size() == 4) { PackNCHWToNHWCFp32(input->Data(), output->Data(), output->Batch(), output->Height() * output->Width(), diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc index 634961aa71..5926611681 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc @@ -33,8 +33,8 @@ int Nhwc2NchwCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input = inputs_[0]; - auto output = outputs_[0]; + auto input = in_tensors_[0]; + auto output = out_tensors_[0]; if (input->shape().size() == 4) { PackNHWCToNCHWFp32(input->Data(), output->Data(), output->Batch(), output->Height() * output->Width(), diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc index 5cb964a3ea..ed9253626a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc @@ -36,17 +36,17 @@ constexpr size_t kOutputNum = 1; int OneHotCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } // indices depth on_value off_value - if (inputs_.size() != kInputNum || outputs_.size() != kOutputNum) { - MS_LOG(ERROR) << "OneHot input size should be " << kInputNum << ", got " << inputs_.size() - << ", output size should be" << kOutputNum << ", got " << outputs_.size(); + if (in_tensors_.size() != kInputNum || out_tensors_.size() != kOutputNum) { + MS_LOG(ERROR) << "OneHot input size should be " << kInputNum << ", got " << in_tensors_.size() + << ", output size should be" << kOutputNum << ", got " << out_tensors_.size(); return RET_ERROR; } - auto indices = inputs_.at(0); + auto indices = in_tensors_.at(0); if (indices == nullptr) { MS_LOG(ERROR) << "OneHot inputs[0] indices nullptr"; return RET_NULL_PTR; @@ -64,7 +64,7 @@ int OneHotCPUKernel::Init() { } thread_num_ = context_->thread_num_; - const int indices_rank = static_cast(inputs_.at(0)->shape().size()); + const int indices_rank = static_cast(in_tensors_.at(0)->shape().size()); if (axis_ < 0) { axis_ += indices_rank + 1; } @@ -87,8 +87,8 @@ int RunOneHot(int task_id, LiteParallelGroupEnv *penv, void *cdata) { } int OneHotCPUKernel::OneHotImpl(int task_id) { - auto indices_data = static_cast(inputs_.at(0)->Data()); - auto output = outputs_.at(0); + auto indices_data = static_cast(in_tensors_.at(0)->Data()); + auto output = out_tensors_.at(0); if (output == nullptr) { MS_LOG(ERROR) << "OneHot output nullptr"; return RET_NULL_PTR; @@ -99,20 +99,20 @@ int OneHotCPUKernel::OneHotImpl(int task_id) { if (ret != RET_OK) { return ret; } - auto one_hot_param = reinterpret_cast(opParameter); + auto one_hot_param = reinterpret_cast(op_parameter_); ret = OneHot(indices_data, output_data, one_hot_param, task_id, thread_num_); return ret; } int OneHotCPUKernel::GetParams() { - auto one_hot_param = reinterpret_cast(opParameter); + auto one_hot_param = reinterpret_cast(op_parameter_); if (one_hot_param == nullptr) { MS_LOG(ERROR) << "cast OneHotParameter nullptr"; return RET_NULL_PTR; } - auto depth_tensor = inputs_.at(1); + auto depth_tensor = in_tensors_.at(1); if (depth_tensor == nullptr) { MS_LOG(ERROR) << "OneHot inputs[1] depth nullptr"; return RET_NULL_PTR; @@ -123,7 +123,7 @@ int OneHotCPUKernel::GetParams() { } one_hot_param->depth_ = *depth; - auto on_value_tensor = inputs_.at(2); + auto on_value_tensor = in_tensors_.at(2); if (on_value_tensor == nullptr) { MS_LOG(ERROR) << "OneHot inputs[2] on_value nullptr"; return RET_NULL_PTR; @@ -134,7 +134,7 @@ int OneHotCPUKernel::GetParams() { } one_hot_param->on_value_ = *on_value; - auto off_value_tensor = inputs_.at(3); + auto off_value_tensor = in_tensors_.at(3); if (off_value_tensor == nullptr) { MS_LOG(ERROR) << "OneHot inputs[3] off_value nullptr"; return RET_NULL_PTR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc index 28be57faaa..3f8d9c43d4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc @@ -37,17 +37,17 @@ constexpr int kOutputNum = 1; int PadCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - if (inputs_.size() != kInputNum || outputs_.size() != kOutputNum) { - MS_LOG(ERROR) << "Pad input size should be " << kInputNum << ", got " << inputs_.size() << ", output size should be" - << kOutputNum << ", got " << outputs_.size(); + if (in_tensors_.size() != kInputNum || out_tensors_.size() != kOutputNum) { + MS_LOG(ERROR) << "Pad input size should be " << kInputNum << ", got " << in_tensors_.size() + << ", output size should be" << kOutputNum << ", got " << out_tensors_.size(); return RET_ERROR; } - auto input = inputs_.at(0); - auto output = outputs_.at(0); + auto input = in_tensors_.at(0); + auto output = out_tensors_.at(0); if (input == nullptr || output == nullptr) { MS_LOG(ERROR) << "Pad input or output nullptr"; return RET_NULL_PTR; @@ -77,8 +77,8 @@ int PadImpl(int task_id, LiteParallelGroupEnv *penv, void *cdata) { } int PadCPUKernel::RunImpl(int task_id) { - auto input = inputs_.at(0); - auto output = outputs_.at(0); + auto input = in_tensors_.at(0); + auto output = out_tensors_.at(0); auto input_data = reinterpret_cast(input->Data()); auto output_data = reinterpret_cast(output->Data()); @@ -94,7 +94,7 @@ int PadCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto output = outputs_.at(0); + auto output = out_tensors_.at(0); int output_size = output->DataSize(); auto output_data = reinterpret_cast(output->Data()); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.cc index 45255f7ce8..fec2494876 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.cc @@ -30,7 +30,7 @@ using mindspore::schema::PrimitiveType_Pooling; namespace mindspore::kernel { int PoolingCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } auto ret = PoolingBaseCPUKernel::Init(); @@ -51,8 +51,8 @@ int PoolingCPUKernel::ReSize() { } int PoolingCPUKernel::RunImpl(int task_id) { - auto input_ptr = reinterpret_cast(inputs_.at(kInputIndex)->Data()); - auto output_ptr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto input_ptr = reinterpret_cast(in_tensors_.at(kInputIndex)->Data()); + auto output_ptr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); if (pooling_param_->max_pooling_) { MaxPooling(input_ptr, output_ptr, pooling_param_, task_id); } else { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/power.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/power.cc index 467b92efb6..ab881b96bd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/power.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/power.cc @@ -55,15 +55,15 @@ int PowerCPUKernel::Run() { } int PowerCPUKernel::RunImpl(int task_id) { - auto x_addr = reinterpret_cast(inputs_[0]->Data()); - auto output_addr = reinterpret_cast(outputs_[0]->Data()); - auto size = inputs_[0]->ElementsNum(); + auto x_addr = reinterpret_cast(in_tensors_[0]->Data()); + auto output_addr = reinterpret_cast(out_tensors_[0]->Data()); + auto size = in_tensors_[0]->ElementsNum(); int stride = UP_DIV(size, thread_count_); int len = MSMIN(stride, size - stride * task_id); float *exp_addr = nullptr; bool broadcast = true; - if (inputs_.size() == 2) { - exp_addr = reinterpret_cast(inputs_[1]->Data()); + if (in_tensors_.size() == 2) { + exp_addr = reinterpret_cast(in_tensors_[1]->Data()); broadcast = false; } float *cur_exp; @@ -82,8 +82,7 @@ kernel::LiteKernel *CpuPowerFp32KernelCreator(const std::vectorthread_num_), - power_(reinterpret_cast(opParameter)->power_), - scale_(reinterpret_cast(opParameter)->scale_), - shift_(reinterpret_cast(opParameter)->shift_) {} + power_(reinterpret_cast(op_parameter_)->power_), + scale_(reinterpret_cast(op_parameter_)->scale_), + shift_(reinterpret_cast(op_parameter_)->shift_) {} ~PowerCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc index c6d0555a1e..7682d903cd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc @@ -51,10 +51,10 @@ int PReluCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto input = inputs_.at(0); + auto input = in_tensors_.at(0); prelu_param_->input_num_ = input->ElementsNum(); input_data = reinterpret_cast(input->Data()); - output_data = reinterpret_cast(outputs_.at(0)->Data()); + output_data = reinterpret_cast(out_tensors_.at(0)->Data()); auto ret = LiteBackendParallelLaunch(PReluRun, this, prelu_param_->thread_num_); if (ret != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.h b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.h index 23ac43100f..cf0a0c5180 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.h @@ -32,7 +32,7 @@ class PReluCPUKernel : public LiteKernel { const std::vector &outputs, const lite::Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - prelu_param_ = (reinterpret_cast(opParameter)); + prelu_param_ = (reinterpret_cast(op_parameter_)); primitive_ = primitive; } ~PReluCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc index 880ccd5c99..b4972e219a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc @@ -43,10 +43,10 @@ int RangeCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - size_t start = (reinterpret_cast(opParameter))->start_; - size_t limit = (reinterpret_cast(opParameter))->limit_; - size_t delta = (reinterpret_cast(opParameter))->delta_; - auto output_ptr = reinterpret_cast(outputs_.at(0)->Data()); + size_t start = (reinterpret_cast(op_parameter_))->start_; + size_t limit = (reinterpret_cast(op_parameter_))->limit_; + size_t delta = (reinterpret_cast(op_parameter_))->delta_; + auto output_ptr = reinterpret_cast(out_tensors_.at(0)->Data()); Range(output_ptr, start, limit, delta); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc index e70b862fd1..16212071aa 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc @@ -43,8 +43,8 @@ int RankCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - auto output_ptr = reinterpret_cast(outputs_.at(0)->Data()); - auto in_shape = inputs_[0]->shape(); + auto output_ptr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto in_shape = in_tensors_[0]->shape(); auto rank = in_shape.size(); Rank(output_ptr, rank); return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.cc index d05b8a17c0..256369f218 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.cc @@ -43,20 +43,20 @@ constexpr size_t kOutputNum = 1; } // namespace int ReduceCPUKernel::CheckInputsOutputs() { - if (inputs_.size() != kInputNum) { - MS_LOG(ERROR) << "Reduce inputs size should be " << kInputNum << " but got " << inputs_.size(); + if (in_tensors_.size() != kInputNum) { + MS_LOG(ERROR) << "Reduce inputs size should be " << kInputNum << " but got " << in_tensors_.size(); return RET_ERROR; } - if (outputs_.size() != kOutputNum) { - MS_LOG(ERROR) << "Reduce outputs size should be " << kOutputNum << " but got " << outputs_.size(); + if (out_tensors_.size() != kOutputNum) { + MS_LOG(ERROR) << "Reduce outputs size should be " << kOutputNum << " but got " << out_tensors_.size(); return RET_ERROR; } - auto input = inputs_.at(0); + auto input = in_tensors_.at(0); if (input == nullptr) { MS_LOG(ERROR) << "Reduce input is nullptr"; return RET_NULL_PTR; } - auto output = outputs_.at(0); + auto output = out_tensors_.at(0); if (output == nullptr) { MS_LOG(ERROR) << "Reduce output is nullptr"; return RET_NULL_PTR; @@ -65,7 +65,7 @@ int ReduceCPUKernel::CheckInputsOutputs() { } int ReduceCPUKernel::CheckParameters() { - size_t input_rank = inputs_.at(0)->shape().size(); + size_t input_rank = in_tensors_.at(0)->shape().size(); if (static_cast(num_axes_) > input_rank) { MS_LOG(ERROR) << "Reduce num of reduce axes " << num_axes_ << " larger than input rank " << input_rank; return RET_ERROR; @@ -92,7 +92,7 @@ int ReduceCPUKernel::CheckParameters() { int ReduceCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } auto ret = CheckInputsOutputs(); @@ -162,8 +162,8 @@ int ReduceCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - tmp_shape_ = inputs_.at(0)->shape(); - src_data_ = static_cast(inputs_.at(0)->Data()); + tmp_shape_ = in_tensors_.at(0)->shape(); + src_data_ = static_cast(in_tensors_.at(0)->Data()); for (int i = 0; i < data_buffers_.size(); ++i) { dst_data_ = data_buffers_[i]; int axis = axes_[i]; @@ -195,7 +195,7 @@ int ReduceCPUKernel::Run() { inner_size_ *= tmp_shape_[i]; } axis_size_ = tmp_shape_[last_reduce_axis]; - dst_data_ = reinterpret_cast(outputs_.at(0)->Data()); + dst_data_ = reinterpret_cast(out_tensors_.at(0)->Data()); auto error_code = LiteBackendParallelLaunch(ReduceImpl, this, context_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Reduce run error, error_code[" << error_code << "]"; @@ -206,7 +206,7 @@ int ReduceCPUKernel::Run() { } int ReduceCPUKernel::MallocTmpBuffer() { - auto input_shape = inputs_.at(0)->shape(); + auto input_shape = in_tensors_.at(0)->shape(); for (auto i = 0; i < num_axes_ - 1; i++) { int axis = axes_[i]; size_t size = 1; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.cc index 347fc72ad0..1a28fe871c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.cc @@ -41,9 +41,9 @@ int ReshapeCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto input_ptr = inputs_.at(kInputIndex)->Data(); - auto output_ptr = outputs_.at(kOutputIndex)->Data(); - size_t data_size = inputs_.at(kInputIndex)->Size(); + auto input_ptr = in_tensors_.at(kInputIndex)->Data(); + auto output_ptr = out_tensors_.at(kOutputIndex)->Data(); + size_t data_size = in_tensors_.at(kInputIndex)->Size(); Reshape(input_ptr, output_ptr, data_size); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/resize.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/resize.cc index 6273744307..372f95fa1f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/resize.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/resize.cc @@ -49,12 +49,12 @@ int ResizeImpl(int task_id, LiteParallelGroupEnv *penv, void *cdata) { } int ResizeCPUKernel::RunImpl(int task_id) { - auto input = inputs_.at(0); + auto input = in_tensors_.at(0); auto input_data = reinterpret_cast(input->Data()); if (input_data == nullptr) { return RET_NULL_PTR; } - auto output_data = reinterpret_cast(outputs_.at(0)->Data()); + auto output_data = reinterpret_cast(out_tensors_.at(0)->Data()); if (output_data == nullptr) { return RET_NULL_PTR; } @@ -66,7 +66,7 @@ int ResizeCPUKernel::RunImpl(int task_id) { int ret = 0; switch (method_) { case static_cast(schema::ResizeMethod_BILINEAR): { - ret = ResizeBilinear(input_data, output_data, input_shape.data(), outputs_[0]->shape().data(), + ret = ResizeBilinear(input_data, output_data, input_shape.data(), out_tensors_[0]->shape().data(), align_corners_, task_id, context_->thread_num_); break; } @@ -75,7 +75,7 @@ int ResizeCPUKernel::RunImpl(int task_id) { MS_LOG(ERROR) << "ResizeNearestNeighbor not support align_corners."; return RET_ERROR; } - ret = ResizeNearestNeighbor(input_data, output_data, input_shape.data(), outputs_[0]->shape().data(), task_id, + ret = ResizeNearestNeighbor(input_data, output_data, input_shape.data(), out_tensors_[0]->shape().data(), task_id, context_->thread_num_); break; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc index 80ddfcf320..1c93f8aabb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc @@ -31,15 +31,15 @@ namespace mindspore::kernel { int ReverseCPUKernel::Stride(int index) { int i, stride = 1; - for (i = index + 1; i < inputs_[0]->shape().size(); ++i) { - stride *= inputs_[0]->shape()[i]; + for (i = index + 1; i < in_tensors_[0]->shape().size(); ++i) { + stride *= in_tensors_[0]->shape()[i]; } return stride; } int ReverseCPUKernel::ReSize() { - auto *param = reinterpret_cast(opParameter); - auto input_shape = inputs_[0]->shape(); + auto *param = reinterpret_cast(op_parameter_); + auto input_shape = in_tensors_[0]->shape(); if (param->num_axis_ > input_shape.size()) { MS_LOG(ERROR) << "Reverse dims : " << param->num_axis_ << "is greater than input shape size :" << input_shape.size(); @@ -90,10 +90,10 @@ int ReverseCPUKernel::ReSize() { int ReverseCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - data_size_ = inputs_.at(0)->ElementsNum(); + data_size_ = in_tensors_.at(0)->ElementsNum(); thread_sz_count_ = MSMIN(thread_count_, data_size_); thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_); int ret = ReSize(); @@ -130,8 +130,8 @@ int ReverseCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - in_ptr_ = reinterpret_cast(inputs_[0]->Data()); - out_ptr_ = reinterpret_cast(outputs_[0]->Data()); + in_ptr_ = reinterpret_cast(in_tensors_[0]->Data()); + out_ptr_ = reinterpret_cast(out_tensors_[0]->Data()); ret = LiteBackendParallelLaunch(ReverseRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "Reverse run error error_code[" << ret << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc index 3ed50ee3e5..56f8e185dc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc @@ -25,17 +25,17 @@ using mindspore::schema::PrimitiveType_ReverseSequence; namespace mindspore::kernel { int ReverseSequenceCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - auto input0 = inputs_.at(0); - auto input1 = inputs_.at(1); - auto output = outputs_.at(0); + auto input0 = in_tensors_.at(0); + auto input1 = in_tensors_.at(1); + auto output = out_tensors_.at(0); MS_ASSERT(input0 != nullptr); MS_ASSERT(input1 != nullptr); MS_ASSERT(output != nullptr); - auto para = reinterpret_cast(opParameter); + auto para = reinterpret_cast(op_parameter_); ConvertAxisToPositive(input0->shape(), &(para->batch_axis_)); ConvertAxisToPositive(input0->shape(), &(para->seq_axis_)); @@ -93,10 +93,10 @@ int ReverseSequenceCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - float *input0 = reinterpret_cast(inputs_.at(0)->Data()); - int *input1 = reinterpret_cast(inputs_.at(1)->Data()); - float *output = reinterpret_cast(outputs_.at(0)->Data()); - ReverseSequence(input0, input1, output, reinterpret_cast(opParameter)); + float *input0 = reinterpret_cast(in_tensors_.at(0)->Data()); + int *input1 = reinterpret_cast(in_tensors_.at(1)->Data()); + float *output = reinterpret_cast(out_tensors_.at(0)->Data()); + ReverseSequence(input0, input1, output, reinterpret_cast(op_parameter_)); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc index 5ce2b03e24..5848cca8d7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc @@ -62,12 +62,12 @@ int ROIPoolingCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail! ret: " << ret; return ret; } - in_ptr_ = reinterpret_cast(inputs_.front()->Data()); - out_ptr_ = reinterpret_cast(outputs_.front()->Data()); - roi_ptr_ = reinterpret_cast(inputs_.at(1)->Data()); - in_shape_ = reinterpret_cast(inputs_.front()->shape().data()); - out_shape_ = reinterpret_cast(outputs_.front()->shape().data()); - dim_ = inputs_.front()->shape().size(); + in_ptr_ = reinterpret_cast(in_tensors_.front()->Data()); + out_ptr_ = reinterpret_cast(out_tensors_.front()->Data()); + roi_ptr_ = reinterpret_cast(in_tensors_.at(1)->Data()); + in_shape_ = reinterpret_cast(in_tensors_.front()->shape().data()); + out_shape_ = reinterpret_cast(out_tensors_.front()->shape().data()); + dim_ = in_tensors_.front()->shape().size(); thread_count_ = 1; ret = LiteBackendParallelLaunch(ROIPoolingRun, this, thread_count_); if (ret != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc index 0bcf08d9d5..2e32a31399 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc @@ -47,8 +47,8 @@ void ScaleCPUKernel::FreeTmpBuffer() { int ScaleCPUKernel::InitScaleOffset() { FreeTmpBuffer(); - auto scale_tensor = inputs_.at(1); - float *scale_ptr = reinterpret_cast(inputs_.at(1)->Data()); + auto scale_tensor = in_tensors_.at(1); + float *scale_ptr = reinterpret_cast(in_tensors_.at(1)->Data()); if (scale_ptr != nullptr) { scale_param_->const_scale_ = true; scale_ = reinterpret_cast(malloc(scale_tensor->ElementsNum() * sizeof(float))); @@ -62,8 +62,8 @@ int ScaleCPUKernel::InitScaleOffset() { scale_ = nullptr; } - if (inputs_.size() == 3) { - auto offset_tensor = inputs_.at(2); + if (in_tensors_.size() == 3) { + auto offset_tensor = in_tensors_.at(2); offset_ = reinterpret_cast(malloc(offset_tensor->ElementsNum() * sizeof(float))); if (offset_ == nullptr) { MS_LOG(ERROR) << "Malloc buffer failed."; @@ -79,9 +79,9 @@ int ScaleCPUKernel::InitScaleOffset() { } int ScaleCPUKernel::InitParameter() { - auto in_tensor = inputs_.at(0); + auto in_tensor = in_tensors_.at(0); auto in_shape = in_tensor->shape(); - auto scale_tensor = inputs_.at(1); + auto scale_tensor = in_tensors_.at(1); auto scale_shape = scale_tensor->shape(); if (scale_shape.size() + scale_param_->axis_ > in_shape.size()) { @@ -108,8 +108,8 @@ int ScaleCPUKernel::InitParameter() { } int ScaleCPUKernel::Init() { - if (inputs_.size() < 2 || inputs_.size() > 3) { - MS_LOG(ERROR) << "inputs to Scale operator should be 2 or 3, but " << inputs_.size() << " is given."; + if (in_tensors_.size() < 2 || in_tensors_.size() > 3) { + MS_LOG(ERROR) << "inputs to Scale operator should be 2 or 3, but " << in_tensors_.size() << " is given."; return RET_ERROR; } @@ -159,16 +159,16 @@ int ScaleCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto in_tensor = inputs_.front(); + auto in_tensor = in_tensors_.front(); input_ptr_ = reinterpret_cast(in_tensor->Data()); if (scale_ == nullptr) { - auto scale_tensor = inputs_[1]; + auto scale_tensor = in_tensors_[1]; scale_ = reinterpret_cast(scale_tensor->Data()); } - auto out_tensor = outputs_.front(); + auto out_tensor = out_tensors_.front(); output_ptr_ = reinterpret_cast(out_tensor->Data()); - ret = LiteBackendParallelLaunch(ScaleRun, this, opParameter->thread_num_); + ret = LiteBackendParallelLaunch(ScaleRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Scale error error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scale.h b/mindspore/lite/src/runtime/kernel/arm/fp32/scale.h index 38ed517746..c168c48e5b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scale.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scale.h @@ -29,7 +29,7 @@ class ScaleCPUKernel : public LiteKernel { const std::vector &outputs, const lite::Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { - scale_param_ = reinterpret_cast(opParameter); + scale_param_ = reinterpret_cast(op_parameter_); } ~ScaleCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc index a800c184d1..4bc4f0b372 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc @@ -44,12 +44,12 @@ int ScatterNDCPUKernel::Init() { } int ScatterNDCPUKernel::ReSize() { -auto shape = inputs_.at(kScatterShapeIndex); - auto indices = inputs_.at(kScatterIndicesIndex); - auto update = inputs_.at(kScatterUpdateIndex); +auto shape = in_tensors_.at(kScatterShapeIndex); + auto indices = in_tensors_.at(kScatterIndicesIndex); + auto update = in_tensors_.at(kScatterUpdateIndex); update_ptr_ = reinterpret_cast(update->Data()); - output_ptr_ = reinterpret_cast(outputs_.at(0)->Data()); + output_ptr_ = reinterpret_cast(out_tensors_.at(0)->Data()); // check indices shape auto shape_rank = shape->ElementsNum(); @@ -119,7 +119,7 @@ auto shape = inputs_.at(kScatterShapeIndex); output_unit_offsets_.push_back(tmp_stride); } - thread_n_num_ = MSMIN(opParameter->thread_num_, num_unit_); + thread_n_num_ = MSMIN(op_parameter_->thread_num_, num_unit_); thread_n_stride_ = UP_DIV(num_unit_, thread_n_num_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc index c16d2ac102..93c02053bd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc @@ -40,8 +40,8 @@ int ShapeCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return RET_ERROR; } - auto out_tensor = outputs_.front(); - auto in_tensor = inputs_.front(); + auto out_tensor = out_tensors_.front(); + auto in_tensor = in_tensors_.front(); if (in_tensor == nullptr || out_tensor == nullptr) { MS_LOG(ERROR) << "null pointer dereferencing."; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/slice.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/slice.cc index 53024fab8a..aef42743dd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/slice.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/slice.cc @@ -40,8 +40,8 @@ int SliceLaunch(int thread_id, LiteParallelGroupEnv *penv, void *cdata) { } // namespace int SliceCPUKernel::ReSize() { - auto *param = reinterpret_cast(opParameter); - auto input_shape = inputs_[0]->shape(); + auto *param = reinterpret_cast(op_parameter_); + auto input_shape = in_tensors_[0]->shape(); if (input_shape.size() != param->param_length_) { MS_LOG(ERROR) << "Input begin's lenth " << param->param_length_ << "is not equal to input shape size " << input_shape.size(); @@ -66,9 +66,9 @@ int SliceCPUKernel::Init() { } int SliceCPUKernel::SliceParallelRun(int thread_id) { - const float *input_data = reinterpret_cast(inputs_[0]->Data()); - float *output_data = reinterpret_cast(outputs_[0]->Data()); - SliceParameter *param = reinterpret_cast(opParameter); + const float *input_data = reinterpret_cast(in_tensors_[0]->Data()); + float *output_data = reinterpret_cast(out_tensors_[0]->Data()); + SliceParameter *param = reinterpret_cast(op_parameter_); DoSlice(input_data, output_data, param); return RET_OK; } @@ -79,7 +79,7 @@ int SliceCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - SliceParameter *param = reinterpret_cast(opParameter); + SliceParameter *param = reinterpret_cast(op_parameter_); for (int i = 0; i < param->param_length_; ++i) { if (param->size_[i] < 0) { param->size_[i] = param->shape_[i] - param->begin_[i]; @@ -91,8 +91,8 @@ int SliceCPUKernel::Run() { PadSliceParameterTo4D(param); } - const float *input_data = reinterpret_cast(inputs_[0]->Data()); - float *output_data = reinterpret_cast(outputs_[0]->Data()); + const float *input_data = reinterpret_cast(in_tensors_[0]->Data()); + float *output_data = reinterpret_cast(out_tensors_[0]->Data()); if (param->size_[1] < param->op_parameter_.thread_num_) { DoSliceNoParallel(input_data, output_data, param); return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.cc index 31ce2db21a..2b813e7125 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.cc @@ -52,7 +52,7 @@ int SoftmaxCPUKernel::ReSize() { softmax_param_->axis_ += n_dim; axis = softmax_param_->axis_; } - auto in_shape = inputs_.front()->shape(); + auto in_shape = in_tensors_.front()->shape(); int out_plane_size = 1; for (int i = 0; i < axis; ++i) { out_plane_size *= in_shape[i]; @@ -75,8 +75,8 @@ int SoftmaxCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return RET_ERROR; } - auto input_ptr = reinterpret_cast(inputs_.at(kInputIndex)->Data()); - auto output_ptr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto input_ptr = reinterpret_cast(in_tensors_.at(kInputIndex)->Data()); + auto output_ptr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); Softmax(input_ptr, output_ptr, sum_data_, softmax_param_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc index 07dbca5987..cfc1ebb80b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc @@ -32,7 +32,7 @@ using mindspore::schema::PrimitiveType_SpaceToBatch; namespace mindspore::kernel { int SpaceToBatchCPUKernel::Init() { - SpaceToBatchParameter *param = reinterpret_cast(this->opParameter); + SpaceToBatchParameter *param = reinterpret_cast(this->op_parameter_); for (int i = 0; i < SPACE_TO_BATCH_PADDINGS_SIZE; ++i) { if (param->paddings_[i] != 0) { param->need_paddings_ = true; @@ -48,11 +48,11 @@ int SpaceToBatchCPUKernel::Init() { } int SpaceToBatchCPUKernel::ReSize() { - if (inputs_[0]->GetFormat() != schema::Format_NHWC) { + if (in_tensors_[0]->GetFormat() != schema::Format_NHWC) { MS_LOG(ERROR) << "space_to_batch only support NHWC now!"; return RET_FORMAT_ERR; } - SpaceToBatchParameter *param = reinterpret_cast(this->opParameter); + SpaceToBatchParameter *param = reinterpret_cast(this->op_parameter_); param->num_elements_ = EnumElement(param->in_shape_, param->n_dims_); param->num_elements_padded_ = EnumElement(param->padded_in_shape_, param->n_dims_); return RET_OK; @@ -64,11 +64,11 @@ int SpaceToBatchCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto input = inputs_[0]; - auto output = outputs_[0]; + auto input = in_tensors_[0]; + auto output = out_tensors_[0]; input_ptr_ = reinterpret_cast(input->Data()); output_ptr_ = reinterpret_cast(output->Data()); - SpaceToBatchParameter *param = reinterpret_cast(this->opParameter); + SpaceToBatchParameter *param = reinterpret_cast(this->op_parameter_); float *tmp_space[3] = {nullptr, nullptr, nullptr}; if (param->need_paddings_) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc index e5f74741fa..dbf106df09 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc @@ -32,7 +32,7 @@ using mindspore::schema::PrimitiveType_SpaceToDepth; namespace mindspore::kernel { int SpaceToDepthCPUKernel::Init() { - SpaceToDepthParameter *param = reinterpret_cast(opParameter); + SpaceToDepthParameter *param = reinterpret_cast(op_parameter_); if (param->block_size_ <= 0) { MS_LOG(ERROR) << "Input block_size should > 0!"; return RET_PARAM_INVALID; @@ -45,13 +45,13 @@ int SpaceToDepthCPUKernel::Init() { } int SpaceToDepthCPUKernel::ReSize() { -if (inputs_[0]->GetFormat() != schema::Format_NHWC) { +if (in_tensors_[0]->GetFormat() != schema::Format_NHWC) { MS_LOG(ERROR) << "space_to_depth only support NHWC now!"; return RET_FORMAT_ERR; } - num_unit_ = static_cast(inputs_[0]->shape().at(kNHWC_H)); - thread_h_num_ = MSMIN(opParameter->thread_num_, num_unit_); + num_unit_ = static_cast(in_tensors_[0]->shape().at(kNHWC_H)); + thread_h_num_ = MSMIN(op_parameter_->thread_num_, num_unit_); thread_h_stride_ = UP_DIV(num_unit_, thread_h_num_); return RET_OK; } @@ -62,9 +62,9 @@ int SpaceToDepthCPUKernel::SpaceToDepth(int task_id) { return RET_OK; } int thread_offset = task_id * thread_h_stride_; - auto in_shape = inputs_[0]->shape(); - auto out_shape = outputs_[0]->shape(); - SpaceToDepthParameter *param = reinterpret_cast(opParameter); + auto in_shape = in_tensors_[0]->shape(); + auto out_shape = out_tensors_[0]->shape(); + SpaceToDepthParameter *param = reinterpret_cast(op_parameter_); auto ret = SpaceToDepthForNHWC(input_ptr_, output_ptr_, in_shape.data(), out_shape.data(), in_shape.size(), param->block_size_, thread_offset, thread_offset + num_unit_thread); if (ret != RET_OK) { @@ -90,10 +90,10 @@ int SpaceToDepthCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - input_ptr_ = reinterpret_cast(inputs_[0]->Data()); - output_ptr_ = reinterpret_cast(outputs_[0]->Data()); - if (inputs_[0]->GetFormat() == schema::Format_NHWC) { - ret = LiteBackendParallelLaunch(SpaceToDepthRun, this, thread_h_num_); + input_ptr_ = reinterpret_cast(in_tensors_[0]->Data()); + output_ptr_ = reinterpret_cast(out_tensors_[0]->Data()); + if (in_tensors_[0]->GetFormat() == schema::Format_NHWC) { + ret = LiteBackendParallelLaunch(SpaceToDepthRun, this, thread_h_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "SpaceToDepth error error_code[" << ret << "]"; return ret; @@ -103,7 +103,7 @@ int SpaceToDepthCPUKernel::Run() { MS_LOG(ERROR) << "Only support NHWC now!"; return RET_ERROR; } - return RET_OK; + return RET_OK; } kernel::LiteKernel *CpuSpaceToDepthFp32KernelCreator(const std::vector &inputs, diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc index b48f1c4f80..87229b194e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc @@ -54,11 +54,11 @@ int SparseToDenseCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input = inputs_.at(0); - auto input1 = inputs_.at(1); - auto input2 = inputs_.at(2); - auto input3 = inputs_.at(3); - auto output0 = outputs_.at(0); + auto input = in_tensors_.at(0); + auto input1 = in_tensors_.at(1); + auto input2 = in_tensors_.at(2); + auto input3 = in_tensors_.at(3); + auto output0 = out_tensors_.at(0); input_data_ = reinterpret_cast(input->Data()); total_number_ = reinterpret_cast(input1->Data()); @@ -66,7 +66,7 @@ int SparseToDenseCPUKernel::Run() { dnum_ = reinterpret_cast(input3->Data()); sp_num_ = static_cast(input->ElementsNum() / 2); - output_data = reinterpret_cast(outputs_.at(0)->Data()); + output_data = reinterpret_cast(out_tensors_.at(0)->Data()); std::vector temp_shape = output0->shape(); output_shape_ = reinterpret_cast(temp_shape.data()); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.h b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.h index 9d2a589976..3bdb5379a2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.h @@ -32,7 +32,7 @@ class SparseToDenseCPUKernel : public LiteKernel { const std::vector &outputs, const lite::Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - s2d_param_ = (reinterpret_cast(opParameter)); + s2d_param_ = (reinterpret_cast(op_parameter_)); } ~SparseToDenseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/split.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/split.cc index 1eba6ca5ab..4fc192c131 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/split.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/split.cc @@ -53,8 +53,8 @@ int SplitCPUKernel::Split(int task_id) { return RET_OK; } int thread_offset = task_id * thread_n_stride_; - auto ret = DoSplit(input_ptr_, output_ptr_.data(), inputs_.front()->shape().data(), thread_offset, num_unit_thread, - param); + auto ret = + DoSplit(input_ptr_, output_ptr_.data(), in_tensors_.front()->shape().data(), thread_offset, num_unit_thread, param); if (ret != RET_OK) { MS_LOG(ERROR) << "Split error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; @@ -78,10 +78,10 @@ int SplitCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto in_tensor = inputs_.front(); + auto in_tensor = in_tensors_.front(); input_ptr_ = reinterpret_cast(in_tensor->Data()); for (int i = 0; i < param->num_split_; i++) { - output_ptr_.push_back(reinterpret_cast(outputs_.at(i)->Data())); + output_ptr_.push_back(reinterpret_cast(out_tensors_.at(i)->Data())); } ret = LiteBackendParallelLaunch(SplitRun, this, thread_n_num_); if (ret != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc index 96c9670d88..6aef3b92dd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc @@ -42,9 +42,9 @@ int SqueezeCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto input_ptr = reinterpret_cast(inputs_.front()->Data()); - auto output_ptr = reinterpret_cast(outputs_.front()->Data()); - size_t data_size = inputs_.front()->Size(); + auto input_ptr = reinterpret_cast(in_tensors_.front()->Data()); + auto output_ptr = reinterpret_cast(out_tensors_.front()->Data()); + size_t data_size = in_tensors_.front()->Size(); ret = DoSqueeze(input_ptr, output_ptr, data_size); if (ret != RET_OK) { MS_LOG(ERROR) << "Do squeeze failed."; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc index fabd8d424d..3334983361 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc @@ -27,8 +27,8 @@ using mindspore::schema::PrimitiveType_Stack; namespace mindspore::kernel { int StackCPUKernel::ReSize() { - StackParameter *param = reinterpret_cast(opParameter); - auto input0_shape = inputs_[0]->shape(); + StackParameter *param = reinterpret_cast(op_parameter_); + auto input0_shape = in_tensors_[0]->shape(); axis_ = param->axis_ < 0 ? param->axis_ + input0_shape.size() : param->axis_; return RET_OK; } @@ -47,12 +47,12 @@ int StackCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - size_t inputs_num = inputs_.size(); - auto input0_shape = inputs_[0]->shape(); - auto *output_data = reinterpret_cast(outputs_[0]->Data()); + size_t inputs_num = in_tensors_.size(); + auto input0_shape = in_tensors_[0]->shape(); + auto *output_data = reinterpret_cast(out_tensors_[0]->Data()); float *inputs[inputs_num]; for (size_t i = 0; i < inputs_num; ++i) { - inputs[i] = reinterpret_cast(inputs_[i]->Data()); + inputs[i] = reinterpret_cast(in_tensors_[i]->Data()); } DoStack(inputs, inputs_num, input0_shape.data(), input0_shape.size(), axis_, output_data); return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc index 9b6b6336fb..398a747227 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc @@ -26,13 +26,13 @@ using mindspore::schema::PrimitiveType_Tile; namespace mindspore::kernel { int TileCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - auto tile_parameter_ = reinterpret_cast(opParameter); + auto tile_parameter_ = reinterpret_cast(op_parameter_); for (int i = 0; i < tile_parameter_->in_dim_; ++i) { - tile_parameter_->in_shape_[i] = inputs_[0]->shape()[i]; - tile_parameter_->out_shape_[i] = outputs_[0]->shape()[i]; + tile_parameter_->in_shape_[i] = in_tensors_[0]->shape()[i]; + tile_parameter_->out_shape_[i] = out_tensors_[0]->shape()[i]; } ComputeStrides(tile_parameter_->in_shape_, tile_parameter_->in_strides_, tile_parameter_->in_dim_); ComputeStrides(tile_parameter_->out_shape_, tile_parameter_->out_strides_, tile_parameter_->in_dim_); @@ -55,10 +55,10 @@ int TileCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input_addr = reinterpret_cast(inputs_.at(0)->Data()); - auto output_addr = reinterpret_cast(outputs_.at(0)->Data()); + auto input_addr = reinterpret_cast(in_tensors_.at(0)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); - Tile(input_addr, output_addr, reinterpret_cast(opParameter)); + Tile(input_addr, output_addr, reinterpret_cast(op_parameter_)); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc index ece7442b49..742ce66653 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc @@ -26,11 +26,11 @@ using mindspore::schema::PrimitiveType_TopK; namespace mindspore::kernel { int TopKCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - TopkParameter *parameter = reinterpret_cast(opParameter); - lite::tensor::Tensor *input = inputs_.at(0); + TopkParameter *parameter = reinterpret_cast(op_parameter_); + lite::tensor::Tensor *input = in_tensors_.at(0); parameter->last_dim_size_ = input->shape()[input->shape().size() - 1]; parameter->loop_num_ = 1; for (int i = 0; i < input->shape().size() - 1; ++i) { @@ -53,11 +53,11 @@ int TopKCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input_data = reinterpret_cast(inputs_.at(0)->Data()); - auto output_data = reinterpret_cast(outputs_.at(0)->Data()); - auto output_index = reinterpret_cast(outputs_.at(1)->Data()); + auto input_data = reinterpret_cast(in_tensors_.at(0)->Data()); + auto output_data = reinterpret_cast(out_tensors_.at(0)->Data()); + auto output_index = reinterpret_cast(out_tensors_.at(1)->Data()); - Topk(input_data, output_data, output_index, reinterpret_cast(opParameter)); + Topk(input_data, output_data, output_index, reinterpret_cast(op_parameter_)); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/topk.h b/mindspore/lite/src/runtime/kernel/arm/fp32/topk.h index f07d2847fc..9dd54a95af 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/topk.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/topk.h @@ -28,7 +28,7 @@ class TopKCPUKernel : public LiteKernel { const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~TopKCPUKernel() override { - TopkParameter *parameter = reinterpret_cast(opParameter); + TopkParameter *parameter = reinterpret_cast(op_parameter_); free(parameter->topk_node_list_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc index 2a919ad39f..f08eb0b4e4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc @@ -39,9 +39,9 @@ int TransposeCPUKernel::Init() { } int TransposeCPUKernel::ReSize() { - auto &inTensor = inputs_.front(); - auto &outTensor = outputs_.front(); - auto param = reinterpret_cast(opParameter); + auto &inTensor = in_tensors_.front(); + auto &outTensor = out_tensors_.front(); + auto param = reinterpret_cast(op_parameter_); auto in_shape = inTensor->shape(); auto out_shape = outTensor->shape(); param->strides_[param->num_axes_ - 1] = 1; @@ -60,10 +60,10 @@ int TransposeCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - MS_ASSERT(inputs_.size() == TransposeInputNum); - MS_ASSERT(outputs_.size() == TransposeOutputNum); - auto &inTensor = inputs_.front(); - auto &outTensor = outputs_.front(); + MS_ASSERT(in_tensors_.size() == TransposeInputNum); + MS_ASSERT(out_tensors_.size() == TransposeOutputNum); + auto &inTensor = in_tensors_.front(); + auto &outTensor = out_tensors_.front(); if (inTensor == nullptr || outTensor == nullptr) { MS_LOG(ERROR) << "null pointer dreferencing."; return RET_ERROR; @@ -75,7 +75,8 @@ int TransposeCPUKernel::Run() { auto *input_shape = &in_shape.front(); auto *output_shape = &out_shape.front(); - ret = DoTranspose(in_data, out_data, input_shape, output_shape, reinterpret_cast(opParameter)); + ret = + DoTranspose(in_data, out_data, input_shape, output_shape, reinterpret_cast(op_parameter_)); return ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc index 37b28f59d1..c72ecd7b12 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc @@ -33,16 +33,16 @@ int UniqueCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input = reinterpret_cast(inputs_.at(0)->Data()); - auto output0 = reinterpret_cast(outputs_.at(0)->Data()); - auto output1 = reinterpret_cast(outputs_.at(1)->Data()); + auto input = reinterpret_cast(in_tensors_.at(0)->Data()); + auto output0 = reinterpret_cast(out_tensors_.at(0)->Data()); + auto output1 = reinterpret_cast(out_tensors_.at(1)->Data()); int output0_len = 0; - Unique(input, inputs_.at(0)->ElementsNum(), output0, &output0_len, output1); + Unique(input, in_tensors_.at(0)->ElementsNum(), output0, &output0_len, output1); - std::vector out_shape = outputs_.at(0)->shape(); + std::vector out_shape = out_tensors_.at(0)->shape(); out_shape[out_shape.size() - 1] = output0_len; - outputs_.at(0)->set_shape(out_shape); + out_tensors_.at(0)->set_shape(out_shape); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc index cd1034fcc0..af3e156244 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc @@ -29,7 +29,7 @@ using mindspore::schema::PrimitiveType_Unsqueeze; namespace mindspore::kernel { int UnsqueezeCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } int ret = ReSize(); @@ -37,7 +37,7 @@ int UnsqueezeCPUKernel::Init() { } int UnsqueezeCPUKernel::ReSize() { - data_size_ = inputs_.at(0)->ElementsNum(); + data_size_ = in_tensors_.at(0)->ElementsNum(); thread_sz_count_ = MSMIN(thread_count_, data_size_); thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_); return RET_OK; @@ -73,8 +73,8 @@ int UnsqueezeCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - in_ptr_ = reinterpret_cast(inputs_.at(0)->Data()); - out_ptr_ = reinterpret_cast(outputs_.at(0)->Data()); + in_ptr_ = reinterpret_cast(in_tensors_.at(0)->Data()); + out_ptr_ = reinterpret_cast(out_tensors_.at(0)->Data()); ret = LiteBackendParallelLaunch(UnsqueezeRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "UnsqueezeRun error error_code[" << ret << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.cc index 1d18036953..5254bf2402 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.cc @@ -25,14 +25,14 @@ using mindspore::schema::PrimitiveType_Unstack; namespace mindspore::kernel { int UnstackCPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - auto input = inputs_.at(0); + auto input = in_tensors_.at(0); MS_ASSERT(input != nullptr); size_t shape_size = input->shape().size(); - auto para = reinterpret_cast(opParameter); + auto para = reinterpret_cast(op_parameter_); para->pre_dims_ = 1; para->axis_dim_ = 1; para->after_dims_ = 1; @@ -49,7 +49,7 @@ int UnstackCPUKernel::Init() { } } - output_addr_array_ = reinterpret_cast(malloc(sizeof(float *) * outputs_.size())); + output_addr_array_ = reinterpret_cast(malloc(sizeof(float *) * out_tensors_.size())); if (output_addr_array_ == nullptr) { MS_LOG(ERROR) << "Failed to malloc memory"; return lite::RET_ERROR; @@ -65,12 +65,12 @@ int UnstackCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - float *input = reinterpret_cast(inputs_.at(0)->Data()); - size_t out_num = outputs_.size(); + float *input = reinterpret_cast(in_tensors_.at(0)->Data()); + size_t out_num = out_tensors_.size(); for (size_t i = 0; i < out_num; i++) { - output_addr_array_[i] = reinterpret_cast(outputs_.at(i)->Data()); + output_addr_array_[i] = reinterpret_cast(out_tensors_.at(i)->Data()); } - Unistack(input, output_addr_array_, reinterpret_cast(opParameter)); + Unistack(input, output_addr_array_, reinterpret_cast(op_parameter_)); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/where.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/where.cc index ad59ff334d..d307db840e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/where.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/where.cc @@ -53,9 +53,9 @@ int WhereCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input = inputs_.at(0); - auto input1 = inputs_.at(1); - auto input2 = inputs_.at(2); + auto input = in_tensors_.at(0); + auto input1 = in_tensors_.at(1); + auto input2 = in_tensors_.at(2); int num = input->ElementsNum(); int num1_ = input1->ElementsNum(); int num2_ = input2->ElementsNum(); @@ -63,7 +63,7 @@ int WhereCPUKernel::Run() { input_data = reinterpret_cast(input->Data()); input_data1 = reinterpret_cast(input1->Data()); input_data2 = reinterpret_cast(input2->Data()); - output_data = reinterpret_cast(outputs_.at(0)->Data()); + output_data = reinterpret_cast(out_tensors_.at(0)->Data()); int num_max = num > num1_ ? num : (num1_ > num2_ ? num1_ : num2_); where_param_->num_ = num; where_param_->num1_ = num1_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/where.h b/mindspore/lite/src/runtime/kernel/arm/fp32/where.h index d8bb43de25..61d03bd000 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/where.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/where.h @@ -32,7 +32,7 @@ class WhereCPUKernel : public LiteKernel { const std::vector &outputs, const lite::Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - where_param_ = reinterpret_cast(opParameter); + where_param_ = reinterpret_cast(op_parameter_); } ~WhereCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc index fc897e43c5..f0a65e07cc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc @@ -38,9 +38,9 @@ int ZerosLikeCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input = inputs_.at(0); + auto input = in_tensors_.at(0); auto input_data = reinterpret_cast(input->Data()); - auto output_data = reinterpret_cast(outputs_.at(0)->Data()); + auto output_data = reinterpret_cast(out_tensors_.at(0)->Data()); ApproximateZerosLike(input_data, output_data, input->ElementsNum()); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc index 121e16279c..8ded0494d7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc @@ -29,9 +29,9 @@ using mindspore::schema::PrimitiveType_Add; namespace mindspore::kernel { int QuantizedAddCPUKernel::Init() { - lite::tensor::Tensor *input0 = inputs_.at(0); - lite::tensor::Tensor *input1 = inputs_.at(1); - lite::tensor::Tensor *output = outputs_.at(0); + lite::tensor::Tensor *input0 = in_tensors_.at(0); + lite::tensor::Tensor *input1 = in_tensors_.at(1); + lite::tensor::Tensor *output = out_tensors_.at(0); MS_ASSERT(input0); MS_ASSERT(input1); MS_ASSERT(output); @@ -81,27 +81,27 @@ int QuantizedAddCPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - input0_data_ = static_cast(inputs_.at(0)->Data()); - input1_data_ = static_cast(inputs_.at(1)->Data()); - output_data_ = static_cast(outputs_.at(0)->Data()); + input0_data_ = static_cast(in_tensors_.at(0)->Data()); + input1_data_ = static_cast(in_tensors_.at(1)->Data()); + output_data_ = static_cast(out_tensors_.at(0)->Data()); - elements_num_ = inputs_.at(0)->ElementsNum(); + elements_num_ = in_tensors_.at(0)->ElementsNum(); count_unit_ = thread_count_ > 1 ? UP_DIV(elements_num_, thread_count_) : elements_num_; - if (inputs_.at(0)->ElementsNum() != inputs_.at(1)->ElementsNum()) { - input0_data_ = static_cast(ctx_->allocator->Malloc(outputs_.at(0)->Size())); - input1_data_ = static_cast(ctx_->allocator->Malloc(outputs_.at(0)->Size())); + if (in_tensors_.at(0)->ElementsNum() != in_tensors_.at(1)->ElementsNum()) { + input0_data_ = static_cast(ctx_->allocator->Malloc(out_tensors_.at(0)->Size())); + input1_data_ = static_cast(ctx_->allocator->Malloc(out_tensors_.at(0)->Size())); ArithmeticParameter tile_para = {0}; - tile_para.ndim_ = outputs_.at(0)->shape().size(); + tile_para.ndim_ = out_tensors_.at(0)->shape().size(); for (size_t i = 0; i < tile_para.ndim_; i++) { - tile_para.in_shape0_[i] = inputs_.at(0)->DimensionSize(i); - tile_para.in_shape1_[i] = inputs_.at(1)->DimensionSize(i); - tile_para.out_shape_[i] = outputs_.at(0)->DimensionSize(i); + tile_para.in_shape0_[i] = in_tensors_.at(0)->DimensionSize(i); + tile_para.in_shape1_[i] = in_tensors_.at(1)->DimensionSize(i); + tile_para.out_shape_[i] = out_tensors_.at(0)->DimensionSize(i); } - TileDimensionsUint8(static_cast(inputs_.at(0)->Data()), static_cast(inputs_.at(1)->Data()), - reinterpret_cast(input0_data_), reinterpret_cast(input1_data_), - &tile_para); + TileDimensionsUint8(static_cast(in_tensors_.at(0)->Data()), + static_cast(in_tensors_.at(1)->Data()), reinterpret_cast(input0_data_), + reinterpret_cast(input1_data_), &tile_para); ret = LiteBackendParallelLaunch(AddInt8Run, this, thread_count_); ctx_->allocator->Free(input0_data_); ctx_->allocator->Free(input1_data_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc index a35d1575f9..80a9e15281 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc @@ -29,14 +29,14 @@ int ArgMinMaxInt8CPUKernel::Init() { if (ret != RET_OK) { return ret; } - auto param = reinterpret_cast(opParameter); + auto param = reinterpret_cast(op_parameter_); param->data_type_ = kNumberTypeInt8; - auto *input_tensor = inputs_.at(kInputIndex); + auto *input_tensor = in_tensors_.at(kInputIndex); auto in_quant_args = input_tensor->GetQuantParams(); in_quant_arg_.scale_ = in_quant_args.front().scale; in_quant_arg_.zp_ = in_quant_args.front().zeroPoint; - auto *out_tensor = outputs_.at(kOutputIndex); + auto *out_tensor = out_tensors_.at(kOutputIndex); auto out_quant_args = out_tensor->GetQuantParams(); out_quant_arg_.scale_ = out_quant_args.front().scale; out_quant_arg_.zp_ = out_quant_args.front().zeroPoint; @@ -46,9 +46,7 @@ int ArgMinMaxInt8CPUKernel::Init() { return ReSize(); } -int ArgMinMaxInt8CPUKernel::ReSize() { - return ArgMinMaxBaseCPUKernel::ReSize(); -} +int ArgMinMaxInt8CPUKernel::ReSize() { return ArgMinMaxBaseCPUKernel::ReSize(); } int ArgMinMaxInt8CPUKernel::Run() { auto ret = Prepare(); @@ -56,31 +54,31 @@ int ArgMinMaxInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto input = inputs_.at(0); + auto input = in_tensors_.at(0); - const int8_t *input_data = reinterpret_cast(inputs_.at(0)->Data()); - int8_t *output_data = reinterpret_cast(outputs_.at(0)->Data()); + const int8_t *input_data = reinterpret_cast(in_tensors_.at(0)->Data()); + int8_t *output_data = reinterpret_cast(out_tensors_.at(0)->Data()); auto in_shape = input->shape().data(); - auto param = reinterpret_cast(opParameter); + auto param = reinterpret_cast(op_parameter_); if (param->topk_ == 1) { ArgMinMaxQuant(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); return RET_OK; } switch (param->axis_) { - case 0: - ArgMinMaxDim0(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); - break; - case 1: - ArgMinMaxDim1(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); - break; - case 2: - ArgMinMaxDim2(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); - break; - case 3: - ArgMinMaxDim3(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); - break; + case 0: + ArgMinMaxDim0(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); + break; + case 1: + ArgMinMaxDim1(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); + break; + case 2: + ArgMinMaxDim2(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); + break; + case 3: + ArgMinMaxDim3(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); + break; } FreeTmpMemory(); return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h index b8a8762637..b195ce4c95 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h @@ -24,7 +24,7 @@ namespace mindspore::kernel { class ArgMinMaxInt8CPUKernel : public ArgMinMaxBaseCPUKernel { public: ArgMinMaxInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, - const std::vector &outputs, const lite::Context *ctx, + const std::vector &outputs, const lite::Context *ctx, const lite::Primitive *primitive) : ArgMinMaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} @@ -33,6 +33,7 @@ class ArgMinMaxInt8CPUKernel : public ArgMinMaxBaseCPUKernel { int Init() override; int ReSize() override; int Run() override; + private: QuantArg in_quant_arg_; QuantArg out_quant_arg_; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc index 6614d26396..c5ab89807f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc @@ -24,16 +24,16 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; -using mindspore::lite::RET_PARAM_INVALID; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; +using mindspore::lite::RET_PARAM_INVALID; using mindspore::schema::PrimitiveType_Equal; -using mindspore::schema::PrimitiveType_NotEqual; -using mindspore::schema::PrimitiveType_LessEqual; using mindspore::schema::PrimitiveType_Greater; using mindspore::schema::PrimitiveType_GreaterEqual; using mindspore::schema::PrimitiveType_Less; +using mindspore::schema::PrimitiveType_LessEqual; +using mindspore::schema::PrimitiveType_NotEqual; namespace mindspore::kernel { namespace { @@ -49,7 +49,7 @@ int ArithmeticsInt8Launch(int thread_id, LiteParallelGroupEnv *penv, void *cdata } // namespace void ArithmeticInt8CPUKernel::FreeTileData() { - auto param = reinterpret_cast(opParameter); + auto param = reinterpret_cast(op_parameter_); if (!param->broadcasting_) { return; } @@ -77,7 +77,7 @@ ArithmeticInt8CPUKernel::~ArithmeticInt8CPUKernel() { } int ArithmeticInt8CPUKernel::Init() { - switch (opParameter->type_) { + switch (op_parameter_->type_) { case PrimitiveType_Equal: arithmetic_run_ = ElementEqual; break; @@ -97,7 +97,7 @@ int ArithmeticInt8CPUKernel::Init() { arithmetic_run_ = ElementGreaterEqual; break; default: - MS_LOG(ERROR) << "Error Operator type " << opParameter->type_; + MS_LOG(ERROR) << "Error Operator type " << op_parameter_->type_; arithmetic_run_ = nullptr; return RET_PARAM_INVALID; } @@ -110,8 +110,8 @@ int ArithmeticInt8CPUKernel::Init() { int ArithmeticInt8CPUKernel::ReSize() { FreeTileData(); - auto data_size = outputs_[0]->Size(); - auto param = reinterpret_cast(opParameter); + auto data_size = out_tensors_[0]->Size(); + auto param = reinterpret_cast(op_parameter_); if (param->broadcasting_) { if (context_->allocator != nullptr) { tile_data0_ = reinterpret_cast(context_->allocator->Malloc(data_size)); @@ -128,14 +128,14 @@ int ArithmeticInt8CPUKernel::ReSize() { } int ArithmeticInt8CPUKernel::DoArithmetic(int thread_id) { - auto input0_data = reinterpret_cast(inputs_[0]->Data()); - auto input1_data1 = reinterpret_cast(inputs_[1]->Data()); - auto output_data = reinterpret_cast(outputs_[0]->Data()); - auto element_num = outputs_[0]->ElementsNum(); - auto param = reinterpret_cast(opParameter); + auto input0_data = reinterpret_cast(in_tensors_[0]->Data()); + auto input1_data1 = reinterpret_cast(in_tensors_[1]->Data()); + auto output_data = reinterpret_cast(out_tensors_[0]->Data()); + auto element_num = out_tensors_[0]->ElementsNum(); + auto param = reinterpret_cast(op_parameter_); if (param->broadcasting_ && arithmetic_run_ != nullptr) { MS_ASSERT(opParameter->thread_num_ != 0); - int stride = UP_DIV(element_num, opParameter->thread_num_); + int stride = UP_DIV(element_num, op_parameter_->thread_num_); int count = MSMIN(stride, element_num - stride * thread_id); if (count <= 0) { return RET_OK; @@ -166,13 +166,13 @@ int ArithmeticInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto param = reinterpret_cast(opParameter); + auto param = reinterpret_cast(op_parameter_); if (param->broadcasting_) { - auto input_data0 = reinterpret_cast(inputs_[0]->Data()); - auto input_data1 = reinterpret_cast(inputs_[1]->Data()); + auto input_data0 = reinterpret_cast(in_tensors_[0]->Data()); + auto input_data1 = reinterpret_cast(in_tensors_[1]->Data()); TileDimensionsInt8(input_data0, input_data1, tile_data0_, tile_data1_, param); } - int error_code = LiteBackendParallelLaunch(ArithmeticsInt8Launch, this, opParameter->thread_num_); + int error_code = LiteBackendParallelLaunch(ArithmeticsInt8Launch, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Arithmetic launch function fail! ret: " << error_code; return RET_ERROR; @@ -195,8 +195,8 @@ kernel::LiteKernel *CpuArithmeticInt8KernelCreator(const std::vectorInit(); if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast(parameter->type_)); + MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_ + << ", type: " << schema::EnumNamePrimitiveType(static_cast(parameter->type_)); delete kernel; return nullptr; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc index bc59a7f2d0..c1e8fef140 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc @@ -29,16 +29,16 @@ using mindspore::lite::RET_OK; namespace mindspore::kernel { int ArithmeticSelfInt8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } int ret = ReSize(); - auto *input_tensor = inputs_.at(kInputIndex); + auto *input_tensor = in_tensors_.at(kInputIndex); auto in_quant_args = input_tensor->GetQuantParams(); para_->quant_arg_.in_args_.scale_ = in_quant_args.front().scale; para_->quant_arg_.in_args_.zp_ = in_quant_args.front().zeroPoint * (-1); - auto *out_tensor = outputs_.at(kOutputIndex); + auto *out_tensor = out_tensors_.at(kOutputIndex); auto out_quant_args = out_tensor->GetQuantParams(); para_->quant_arg_.out_args_.scale_ = out_quant_args.front().scale; para_->quant_arg_.out_args_.zp_ = out_quant_args.front().zeroPoint; @@ -61,7 +61,7 @@ int ArithmeticSelfInt8CPUKernel::Init() { } int ArithmeticSelfInt8CPUKernel::ReSize() { - data_size_ = inputs_[0]->ElementsNum(); + data_size_ = in_tensors_[0]->ElementsNum(); thread_sz_count_ = MSMIN(thread_count_, data_size_); thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_); return RET_OK; @@ -102,8 +102,8 @@ int ArithmeticSelfInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto input_tensor = inputs_.at(0); - auto out_tensor = outputs_.at(0); + auto input_tensor = in_tensors_.at(0); + auto out_tensor = out_tensors_.at(0); in_ptr_ = reinterpret_cast(input_tensor->Data()); out_ptr_ = reinterpret_cast(out_tensor->Data()); ret = LiteBackendParallelLaunch(ArithmeticSelfInt8Runs, this, thread_sz_count_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h index a507313a40..c25cd12274 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h @@ -25,17 +25,17 @@ #include "include/context.h" using mindspore::lite::Context; -using mindspore::schema::PrimitiveType_Ceil; using mindspore::schema::PrimitiveType_Abs; -using mindspore::schema::PrimitiveType_Sin; +using mindspore::schema::PrimitiveType_Ceil; using mindspore::schema::PrimitiveType_Cos; +using mindspore::schema::PrimitiveType_Floor; using mindspore::schema::PrimitiveType_Log; -using mindspore::schema::PrimitiveType_Sqrt; -using mindspore::schema::PrimitiveType_Rsqrt; -using mindspore::schema::PrimitiveType_Square; using mindspore::schema::PrimitiveType_LogicalNot; -using mindspore::schema::PrimitiveType_Floor; using mindspore::schema::PrimitiveType_Round; +using mindspore::schema::PrimitiveType_Rsqrt; +using mindspore::schema::PrimitiveType_Sin; +using mindspore::schema::PrimitiveType_Sqrt; +using mindspore::schema::PrimitiveType_Square; namespace mindspore::kernel { class ArithmeticSelfInt8CPUKernel : public LiteKernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc index 8bd5e1d5cb..c9c8d0f306 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc @@ -29,12 +29,12 @@ int BatchToSpaceInt8CPUKernel::Init() { if (ret != RET_OK) { return ret; } - auto *input_tensor = inputs_.at(kInputIndex); + auto *input_tensor = in_tensors_.at(kInputIndex); auto in_quant_args = input_tensor->GetQuantParams(); in_quant_arg_.scale_ = in_quant_args.front().scale; in_quant_arg_.zp_ = in_quant_args.front().zeroPoint; - auto *out_tensor = outputs_.at(kOutputIndex); + auto *out_tensor = out_tensors_.at(kOutputIndex); auto out_quant_args = out_tensor->GetQuantParams(); out_quant_arg_.scale_ = out_quant_args.front().scale; out_quant_arg_.zp_ = out_quant_args.front().zeroPoint; @@ -44,9 +44,7 @@ int BatchToSpaceInt8CPUKernel::Init() { return ReSize(); } -int BatchToSpaceInt8CPUKernel::ReSize() { - return BatchToSpaceBaseCPUKernel::ReSize(); -} +int BatchToSpaceInt8CPUKernel::ReSize() { return BatchToSpaceBaseCPUKernel::ReSize(); } int BatchToSpaceInt8CPUKernel::Run() { auto ret = Prepare(); @@ -54,13 +52,13 @@ int BatchToSpaceInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto input = inputs_[0]; - auto output = outputs_[0]; + auto input = in_tensors_[0]; + auto output = out_tensors_[0]; const int8_t *input_data = reinterpret_cast(input->Data()); int8_t *output_data = reinterpret_cast(output->Data()); auto in_shape = input->shape(); auto out_shape = output->shape(); - BatchToSpaceParameter *param = reinterpret_cast(this->opParameter); + BatchToSpaceParameter *param = reinterpret_cast(this->op_parameter_); if (in_quant_arg_.scale_ == out_quant_arg_.scale_ && in_quant_arg_.zp_ == out_quant_arg_.zp_) { if (IsNoCrop()) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h index 94bb228084..19b0ba4290 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h @@ -32,6 +32,7 @@ class BatchToSpaceInt8CPUKernel : public BatchToSpaceBaseCPUKernel { int Init() override; int ReSize() override; int Run() override; + private: QuantArg in_quant_arg_; QuantArg out_quant_arg_; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.cc index 1c6afa9d4b..e14d195fda 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.cc @@ -27,11 +27,11 @@ using mindspore::schema::PrimitiveType_BiasAdd; namespace mindspore::kernel { int BiasAddInt8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - auto bias_param = reinterpret_cast(opParameter); - auto dims = inputs_[0]->shape(); + auto bias_param = reinterpret_cast(op_parameter_); + auto dims = in_tensors_[0]->shape(); bias_param->ndim_ = dims.size(); for (int i = 0; i < bias_param->ndim_; i++) { bias_param->in_shape0_[i] = dims[i]; @@ -50,17 +50,18 @@ int BiasAddInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto in = reinterpret_cast(inputs_.at(0)->Data()); - auto bias = reinterpret_cast(inputs_.at(1)->Data()); - auto out = reinterpret_cast(outputs_.at(0)->Data()); - size_t data_size = inputs_.at(0)->ElementsNum(); + auto in = reinterpret_cast(in_tensors_.at(0)->Data()); + auto bias = reinterpret_cast(in_tensors_.at(1)->Data()); + auto out = reinterpret_cast(out_tensors_.at(0)->Data()); + size_t data_size = in_tensors_.at(0)->ElementsNum(); auto tile_in = static_cast(ctx_->allocator->Malloc(data_size)); auto tile_bias = static_cast(ctx_->allocator->Malloc(data_size)); if (tile_in == nullptr || tile_bias == nullptr) { MS_LOG(ERROR) << "Failed to malloc momery"; return NNACL_ERR; } - BroadcastAddInt8(in, bias, tile_in, tile_bias, out, data_size, reinterpret_cast(opParameter)); + BroadcastAddInt8(in, bias, tile_in, tile_bias, out, data_size, + reinterpret_cast(op_parameter_)); ctx_->allocator->Free(tile_in); ctx_->allocator->Free(tile_bias); return NNACL_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc index 2b40b59d4c..a4690721ed 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc @@ -28,7 +28,7 @@ namespace mindspore::kernel { int ConcatInt8CPUKernel::Init() { ConcatBaseCPUKernel::Init(); - auto input_num = inputs_.size(); + auto input_num = in_tensors_.size(); concat_param_->quant_arg_.in_args_ = reinterpret_cast(ctx_->allocator->Malloc(sizeof(QuantArg) * input_num)); if (concat_param_->quant_arg_.in_args_ == nullptr) { @@ -36,13 +36,13 @@ int ConcatInt8CPUKernel::Init() { return RET_ERROR; } for (size_t i = 0; i < input_num; i++) { - auto *input_tensor = inputs_.at(i); + auto *input_tensor = in_tensors_.at(i); auto quant_args = input_tensor->GetQuantParams(); concat_param_->quant_arg_.in_args_[i].scale_ = quant_args.front().scale; concat_param_->quant_arg_.in_args_[i].zp_ = quant_args.front().zeroPoint; } - auto output_tensor = outputs_.at(kOutputIndex); + auto output_tensor = out_tensors_.at(kOutputIndex); auto quant_args = output_tensor->GetQuantParams(); concat_param_->quant_arg_.out_args_.scale_ = quant_args.front().scale; concat_param_->quant_arg_.out_args_.zp_ = quant_args.front().zeroPoint; @@ -64,20 +64,20 @@ int ConcatInt8CPUKernel::ReSize() { if (concat_param_->input_shapes_ != nullptr) { ctx_->allocator->Free(concat_param_->input_shapes_); } - auto input_num = inputs_.size(); + auto input_num = in_tensors_.size(); concat_param_->input_num_ = input_num; concat_param_->input_shapes_ = reinterpret_cast(ctx_->allocator->Malloc(sizeof(int *) * input_num)); for (size_t i = 0; i < input_num; i++) { - concat_param_->input_shapes_[i] = reinterpret_cast(inputs_.at(i)->shape().data()); + concat_param_->input_shapes_[i] = reinterpret_cast(in_tensors_.at(i)->shape().data()); } before_axis_size = 1; for (int i = 0; i < axis_; i++) { - before_axis_size *= outputs_.at(kOutputIndex)->DimensionSize(i); + before_axis_size *= out_tensors_.at(kOutputIndex)->DimensionSize(i); } int64_t after_axis_size = 1; - auto output_tensor = outputs_.at(kOutputIndex); + auto output_tensor = out_tensors_.at(kOutputIndex); int output_dim = output_tensor->shape().size(); concat_param_->output_shapes_ = output_tensor->shape().data(); for (size_t i = axis_ + 1; i < output_dim; i++) { @@ -103,9 +103,9 @@ int ConcatInt8CPUKernel::Run() { return RET_ERROR; } for (size_t i = 0; i < input_num; i++) { - input_data_[i] = static_cast(inputs_.at(i)->Data()); + input_data_[i] = static_cast(in_tensors_.at(i)->Data()); } - output_data_ = reinterpret_cast(outputs_.at(0)->Data()); + output_data_ = reinterpret_cast(out_tensors_.at(0)->Data()); ret = LiteBackendParallelLaunch(ConcatInt8Run, this, thread_count_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc index 530e1275c3..3e3b7a6927 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc @@ -78,7 +78,7 @@ int Convolution3x3Int8CPUKernel::InitWeightBias() { return RET_ERROR; } memset(transformed_filter_addr_, 0, transformed_size); - auto weight_data = reinterpret_cast(inputs_.at(kWeightIndex)->Data()); + auto weight_data = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); ProcessFilterUint8(weight_data, transformed_filter_addr_, conv_param_); // init bias @@ -89,11 +89,11 @@ int Convolution3x3Int8CPUKernel::InitWeightBias() { return RET_ERROR; } memset(bias_data_, 0, new_bias_size); - if (inputs_.size() == kInputSize2) { - auto ori_bias_addr = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias_addr = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); memcpy(bias_data_, ori_bias_addr, output_channel * sizeof(int32_t)); } else { - MS_ASSERT(inputs_.size() == kInputSize1); + MS_ASSERT(in_tensors_.size() == kInputSize1); } return RET_OK; } @@ -156,13 +156,13 @@ int Convolution3x3Int8CPUKernel::InitTmpBuffer() { } void Convolution3x3Int8CPUKernel::ConfigInputOutput() { - auto output_tensor = outputs_.at(kOutputIndex); + auto output_tensor = out_tensors_.at(kOutputIndex); output_tensor->SetFormat(schema::Format_NHWC); } int Convolution3x3Int8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } auto ret = ConvolutionBaseCPUKernel::Init(); @@ -219,7 +219,7 @@ int Convolution3x3Int8CPUKernel::ReSize() { } int Convolution3x3Int8CPUKernel::RunImpl(int task_id) { - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); Conv3x3Int8(input_data_, transformed_filter_addr_, reinterpret_cast(bias_data_), output_addr, tile_buffer_, block_unit_buffer_, tmp_dst_buffer_, tmp_out_, task_id, conv_param_); return RET_OK; @@ -241,7 +241,7 @@ int Convolution3x3Int8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input_addr = reinterpret_cast(inputs_.at(kInputIndex)->Data()); + auto input_addr = reinterpret_cast(in_tensors_.at(kInputIndex)->Data()); PackInputToC8Int8(input_addr, input_data_, conv_param_); int error_code = LiteBackendParallelLaunch(Convolution3x3Int8Impl, this, thread_count_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc index 52e4ab6e53..06edc256e1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc @@ -49,7 +49,7 @@ ConvolutionDepthwiseInt8CPUKernel::~ConvolutionDepthwiseInt8CPUKernel() { int ConvolutionDepthwiseInt8CPUKernel::InitWeightBias() { // init weight, int8 -> int16 // o, h, w, i -> o/8, h, w, i, 8; o == group, i == 1 - auto origin_weight = reinterpret_cast(inputs_[kWeightIndex]->Data()); + auto origin_weight = reinterpret_cast(in_tensors_[kWeightIndex]->Data()); int OC4 = UP_DIV(conv_param_->output_channel_, C4NUM); int pack_weight_size = C4NUM * OC4 * conv_param_->kernel_h_ * conv_param_->kernel_w_; packed_weight_ = reinterpret_cast(malloc(pack_weight_size * sizeof(int16_t))); @@ -67,8 +67,8 @@ int ConvolutionDepthwiseInt8CPUKernel::InitWeightBias() { return RET_ERROR; } memset(bias_data_, 0, C4NUM * OC4 * sizeof(int32_t)); - if (inputs_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); memcpy(bias_data_, ori_bias, conv_param_->output_channel_ * sizeof(int32_t)); } return RET_OK; @@ -100,7 +100,7 @@ int ConvolutionDepthwiseInt8CPUKernel::InitBuffer() { int ConvolutionDepthwiseInt8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } // conv base init @@ -185,11 +185,11 @@ int ConvolutionDepthwiseInt8CPUKernel::Run() { } // pack input, assume input format: NHWC -> NHWC4 - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto input_addr = reinterpret_cast(input_tensor->Data()); PackDepthwiseInt8Input(input_addr, packed_input_, conv_param_); - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); if (!need_align_) { packed_output_ = output_addr; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc index 8aac86c288..99ecb17c77 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc @@ -73,7 +73,7 @@ int ConvolutionInt8CPUKernel::InitWeightBias() { int32_t input_zp = conv_param_->conv_quant_arg_.quant_args_[0][0].zp_; // init weight - auto origin_weight = reinterpret_cast(inputs_.at(kWeightIndex)->Data()); + auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); packed_weight_ = reinterpret_cast(malloc(pack_weight_size)); if (packed_weight_ == nullptr) { MS_LOG(ERROR) << "malloc packed_weight_ failed."; @@ -91,11 +91,11 @@ int ConvolutionInt8CPUKernel::InitWeightBias() { return RET_ERROR; } memset(bias_data_, 0, oc4 * C4NUM * sizeof(int32_t)); - if (inputs_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); memcpy(bias_data_, ori_bias, out_channel * sizeof(int32_t)); } else { - MS_ASSERT(inputs_.size() == kInputSize1); + MS_ASSERT(in_tensors_.size() == kInputSize1); } auto *bias_data = reinterpret_cast(bias_data_); int c4_kernel_plane_size = kernel_plane * ic4 * C4NUM; @@ -172,7 +172,7 @@ int ConvolutionInt8CPUKernel::InitWeightBiasOpt() { int32_t input_zp = conv_param_->conv_quant_arg_.quant_args_[0][0].zp_; // init weight - auto origin_weight = reinterpret_cast(inputs_.at(kWeightIndex)->Data()); + auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); packed_weight_ = reinterpret_cast(malloc(pack_weight_size)); if (packed_weight_ == nullptr) { MS_LOG(ERROR) << "malloc packed_weight_ failed."; @@ -190,11 +190,11 @@ int ConvolutionInt8CPUKernel::InitWeightBiasOpt() { return RET_ERROR; } memset(bias_data_, 0, oc4 * C4NUM * sizeof(int32_t)); - if (inputs_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); memcpy(bias_data_, ori_bias, out_channel * sizeof(int32_t)); } else { - MS_ASSERT(inputs_.size() == kInputSize1); + MS_ASSERT(in_tensors_.size() == kInputSize1); } auto *bias_data = reinterpret_cast(bias_data_); int c4_kernel_plane_size = kernel_plane * ic4 * C4NUM; @@ -258,9 +258,9 @@ int ConvolutionInt8CPUKernel::InitTmpBufferOpt() { } void ConvolutionInt8CPUKernel::ConfigInputOutput() { - auto output_tensor = outputs_.at(kOutputIndex); + auto output_tensor = out_tensors_.at(kOutputIndex); output_tensor->SetFormat(schema::Format_NHWC); - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto ret = CheckLayout(input_tensor); if (ret != RET_OK) { MS_LOG(ERROR) << "Check layout failed."; @@ -270,7 +270,7 @@ void ConvolutionInt8CPUKernel::ConfigInputOutput() { int ConvolutionInt8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } auto ret = ConvolutionBaseCPUKernel::Init(); @@ -359,7 +359,7 @@ int ConvolutionInt8CPUKernel::ReSize() { } int ConvolutionInt8CPUKernel::RunImpl(int task_id) { - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); if (support_optimize_) { ConvInt8Opt(reinterpret_cast(nhwc4_input_), packed_input_, packed_weight_, reinterpret_cast(bias_data_), tmp_dst_, tmp_out_, output_addr, input_sum_, task_id, @@ -388,7 +388,7 @@ int ConvolutionInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto ori_input_data = input_tensor->Data(); int in_batch = conv_param_->input_batch_; int in_h = conv_param_->input_h_; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc index 3b50f8d69f..04ed3ba4bd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc @@ -31,12 +31,12 @@ int CropInt8CPUKernel::Init() { if (ret != RET_OK) { return ret; } - auto *input_tensor = inputs_.at(kInputIndex); + auto *input_tensor = in_tensors_.at(kInputIndex); auto in_quant_args = input_tensor->GetQuantParams(); crop_para_->quant_arg.in_args_.scale_ = in_quant_args.front().scale; crop_para_->quant_arg.in_args_.zp_ = in_quant_args.front().zeroPoint; - auto *out_tensor = outputs_.at(kOutputIndex); + auto *out_tensor = out_tensors_.at(kOutputIndex); auto out_quant_args = out_tensor->GetQuantParams(); crop_para_->quant_arg.out_args_.scale_ = out_quant_args.front().scale; crop_para_->quant_arg.out_args_.zp_ = out_quant_args.front().zeroPoint; @@ -50,9 +50,9 @@ int CropInt8CPUKernel::Init() { } int CropInt8CPUKernel::ReSize() { - auto *input_tensor = inputs_.at(kInputIndex); + auto *input_tensor = in_tensors_.at(kInputIndex); crop_para_->in_shape_ = input_tensor->shape().data(); - auto *out_tensor = outputs_.at(kOutputIndex); + auto *out_tensor = out_tensors_.at(kOutputIndex); crop_para_->out_shape_ = out_tensor->shape().data(); auto input_dim = input_tensor->shape().size(); MS_ASSERT(input_dim <= CROP_OFFSET_MAX_SIZE); @@ -98,8 +98,8 @@ int CropInt8Run(int task_id, LiteParallelGroupEnv *penv, void *cdata) { } int CropInt8CPUKernel::DoExecute(int task_id) { - auto input_tensor = inputs_.at(kInputIndex); - auto out_tensor = outputs_.at(kOutputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); + auto out_tensor = out_tensors_.at(kOutputIndex); int8_t *input_data = reinterpret_cast(input_tensor->Data()); int8_t *output_data = reinterpret_cast(out_tensor->Data()); Crop(input_data, output_data, task_id, crop_para_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h index 598dde3490..2583c8708f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h @@ -32,8 +32,8 @@ class CropInt8CPUKernel : public CropBaseCPUKernel { const std::vector &outputs, const Context *ctx, const lite::Primitive *primitive) : CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { - crop_para_ = reinterpret_cast(opParameter); - crop_para_->thread_count_ = opParameter->thread_num_; + crop_para_ = reinterpret_cast(op_parameter_); + crop_para_->thread_count_ = op_parameter_->thread_num_; } ~CropInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc index b2c91baaa2..9d52058536 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc @@ -53,7 +53,7 @@ DeconvolutionDepthwiseInt8CPUKernel::~DeconvolutionDepthwiseInt8CPUKernel() { int DeconvolutionDepthwiseInt8CPUKernel::InitWeightBias() { // init weight: int8 -> int16 // o, h, w, i -> o/8, h, w, i, 8; o == group, i == 1 - auto origin_weight = reinterpret_cast(inputs_[kWeightIndex]->Data()); + auto origin_weight = reinterpret_cast(in_tensors_[kWeightIndex]->Data()); int OC4 = UP_DIV(conv_param_->output_channel_, C4NUM); int pack_weight_size = C4NUM * OC4 * conv_param_->kernel_h_ * conv_param_->kernel_w_; packed_weight_ = reinterpret_cast(malloc(pack_weight_size * sizeof(int16_t))); @@ -71,22 +71,22 @@ int DeconvolutionDepthwiseInt8CPUKernel::InitWeightBias() { return RET_ERROR; } memset(bias_data_, 0, C4NUM * OC4 * sizeof(int32_t)); - if (inputs_.size() == kInputSize2) { - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); + if (in_tensors_.size() == kInputSize2) { + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); memcpy(bias_data_, ori_bias, conv_param_->output_channel_ * sizeof(int32_t)); } return RET_OK; } int DeconvolutionDepthwiseInt8CPUKernel::InitSlideParam() { - conv_param_->input_batch_ = outputs_.front()->shape().at(kNHWC_N); - conv_param_->input_h_ = outputs_.front()->shape().at(kNHWC_H); - conv_param_->input_w_ = outputs_.front()->shape().at(kNHWC_W); + conv_param_->input_batch_ = out_tensors_.front()->shape().at(kNHWC_N); + conv_param_->input_h_ = out_tensors_.front()->shape().at(kNHWC_H); + conv_param_->input_w_ = out_tensors_.front()->shape().at(kNHWC_W); conv_param_->input_channel_ = C4NUM; - conv_param_->output_batch_ = inputs_.front()->shape().at(kNHWC_N); - conv_param_->output_h_ = inputs_.front()->shape().at(kNHWC_H); - conv_param_->output_w_ = inputs_.front()->shape().at(kNHWC_W); - conv_param_->output_channel_ = inputs_.front()->shape().at(kNHWC_C); + conv_param_->output_batch_ = in_tensors_.front()->shape().at(kNHWC_N); + conv_param_->output_h_ = in_tensors_.front()->shape().at(kNHWC_H); + conv_param_->output_w_ = in_tensors_.front()->shape().at(kNHWC_W); + conv_param_->output_channel_ = in_tensors_.front()->shape().at(kNHWC_C); // init sliding window param InitSlidingParamConvDw(sliding, conv_param_, C4NUM); @@ -138,7 +138,7 @@ int DeconvolutionDepthwiseInt8CPUKernel::InitBuffer() { int DeconvolutionDepthwiseInt8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } sliding = new SlidingWindowParam; @@ -222,13 +222,13 @@ int DeconvolutionDepthwiseInt8CPUKernel::Run() { } // pack input, assume input format: NHWC -> NHWC4 - auto input_tensor = inputs_.at(kInputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); auto input_addr = reinterpret_cast(input_tensor->Data()); PackDepthwiseInt8Input(input_addr, packed_input_, conv_param_); - auto output_addr = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); if (!need_align_) { - memset(output_addr, 0, outputs_.at(kOutputIndex)->ElementsNum() * sizeof(int8_t)); + memset(output_addr, 0, out_tensors_.at(kOutputIndex)->ElementsNum() * sizeof(int8_t)); packed_output_ = output_addr; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc index 13d078d5e0..7be9ad5ea0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc @@ -58,13 +58,13 @@ int DeConvInt8CPUKernel::InitParam() { fc_param_->col_8_ = UP_ROUND(conv_param_->output_channel_, C8NUM) * conv_param_->kernel_h_ * conv_param_->kernel_w_; size_t oc8 = UP_DIV(conv_param_->output_channel_, C8NUM); - thread_count_ = MSMIN(opParameter->thread_num_, oc8); + thread_count_ = MSMIN(op_parameter_->thread_num_, oc8); thread_stride_ = UP_DIV(oc8, thread_count_) * C8NUM; return RET_OK; } int DeConvInt8CPUKernel::InitBiasWeight() { - if (inputs_.size() == 3) { + if (in_tensors_.size() == 3) { size_t size = UP_ROUND(conv_param_->output_channel_, C8NUM) * sizeof(int32_t); bias_data_ = malloc(size); if (bias_data_ == nullptr) { @@ -72,7 +72,7 @@ int DeConvInt8CPUKernel::InitBiasWeight() { return RET_ERROR; } memset(bias_data_, 0, size); - memcpy(bias_data_, inputs_[0]->Data(), conv_param_->output_channel_ * sizeof(int32_t)); + memcpy(bias_data_, in_tensors_[0]->Data(), conv_param_->output_channel_ * sizeof(int32_t)); } else { bias_data_ = nullptr; } @@ -86,7 +86,7 @@ int DeConvInt8CPUKernel::InitBiasWeight() { return RET_ERROR; } memset(weight_ptr_, 0, size); - PackNHWCToC8HWN8Int8(inputs_[1]->Data(), weight_ptr_, conv_param_->input_channel_, + PackNHWCToC8HWN8Int8(in_tensors_[1]->Data(), weight_ptr_, conv_param_->input_channel_, conv_param_->kernel_h_ * conv_param_->kernel_w_, conv_param_->output_channel_); return RET_OK; } @@ -116,7 +116,7 @@ int DeConvInt8CPUKernel::InitData() { int DeConvInt8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } ConvolutionBaseCPUKernel::Init(); @@ -205,8 +205,8 @@ int DeConvInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - int8_t *src_in = reinterpret_cast(inputs_[0]->Data()); - int8_t *src_out = reinterpret_cast(outputs_[0]->Data()); + int8_t *src_in = reinterpret_cast(in_tensors_[0]->Data()); + int8_t *src_out = reinterpret_cast(out_tensors_[0]->Data()); for (int batch_index = 0; batch_index < conv_param_->input_batch_; batch_index++) { RowMajor2Col8MajorInt8(src_in + batch_index * fc_param_->row_ * conv_param_->input_channel_, input_ptr_, diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.cc index eea77b3d3b..5caa990b61 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.cc @@ -30,15 +30,15 @@ int DepthToSpaceInt8CPUKernel::Init() { if (ret != RET_OK) { return ret; } - DepthToSpaceParameter *param = reinterpret_cast(opParameter); + DepthToSpaceParameter *param = reinterpret_cast(op_parameter_); param->data_type_size_ = sizeof(int8_t); - auto *input_tensor = inputs_.at(kInputIndex); + auto *input_tensor = in_tensors_.at(kInputIndex); auto in_quant_args = input_tensor->GetQuantParams(); in_quant_arg_.scale_ = in_quant_args.front().scale; in_quant_arg_.zp_ = in_quant_args.front().zeroPoint; - auto *out_tensor = outputs_.at(kOutputIndex); + auto *out_tensor = out_tensors_.at(kOutputIndex); auto out_quant_args = out_tensor->GetQuantParams(); out_quant_arg_.scale_ = out_quant_args.front().scale; out_quant_arg_.zp_ = out_quant_args.front().zeroPoint; @@ -48,9 +48,7 @@ int DepthToSpaceInt8CPUKernel::Init() { return ReSize(); } -int DepthToSpaceInt8CPUKernel::ReSize() { - return DepthToSpaceBaseCPUKernel::ReSize(); -} +int DepthToSpaceInt8CPUKernel::ReSize() { return DepthToSpaceBaseCPUKernel::ReSize(); } int DepthToSpaceInt8CPUKernel::Run() { auto ret = Prepare(); @@ -58,12 +56,12 @@ int DepthToSpaceInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - auto input = inputs_[0]; - auto output = outputs_[0]; + auto input = in_tensors_[0]; + auto output = out_tensors_[0]; const int8_t *input_data = reinterpret_cast(input->Data()); int8_t *output_data = reinterpret_cast(output->Data()); auto in_shape = input->shape(); - DepthToSpaceParameter *param = reinterpret_cast(opParameter); + DepthToSpaceParameter *param = reinterpret_cast(op_parameter_); if (in_quant_arg_.scale_ == out_quant_arg_.scale_ && in_quant_arg_.zp_ == out_quant_arg_.zp_) { DepthToSpaceForNHWC(input_data, output_data, in_shape.data(), param); } else { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h index 728e31fadb..d0cf906600 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h @@ -33,6 +33,7 @@ class DepthToSpaceInt8CPUKernel : public DepthToSpaceBaseCPUKernel { int Init() override; int ReSize() override; int Run() override; + private: QuantArg in_quant_arg_; QuantArg out_quant_arg_; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.cc index 2975aa8253..b8bef7be23 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.cc @@ -26,12 +26,12 @@ using mindspore::lite::RET_OK; namespace mindspore::kernel { int FullconnectionInt8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - fc_param_->row_ = (inputs_[0]->shape())[0]; - fc_param_->col_ = (inputs_[1]->shape())[0]; - fc_param_->deep_ = (inputs_[1]->shape())[1]; + fc_param_->row_ = (in_tensors_[0]->shape())[0]; + fc_param_->col_ = (in_tensors_[1]->shape())[0]; + fc_param_->deep_ = (in_tensors_[1]->shape())[1]; fc_param_->row_8_ = UP_ROUND(fc_param_->row_, 8); fc_param_->col_8_ = UP_ROUND(fc_param_->col_, 8); @@ -50,7 +50,7 @@ int FullconnectionInt8CPUKernel::Init() { return RET_MEMORY_FAILED; } memset(b_r8_ptr_, 0, fc_param_->col_8_ * fc_param_->deep_ * sizeof(int8_t)); - auto weight_data = reinterpret_cast(inputs_[1]->Data()); + auto weight_data = reinterpret_cast(in_tensors_[1]->Data()); RowMajor2Col8MajorInt8(weight_data, b_r8_ptr_, fc_param_->col_, fc_param_->deep_); c_r8x8_ptr_ = reinterpret_cast(ctx_->allocator->Malloc(fc_param_->row_8_ * fc_param_->col_8_ * sizeof(int))); if (!c_r8x8_ptr_) { @@ -63,21 +63,21 @@ int FullconnectionInt8CPUKernel::Init() { return RET_MEMORY_FAILED; } memset(bias_ptr_, 0, bias_len); - if (inputs_.size() == 3) { - memcpy(bias_ptr_, inputs_[2]->Data(), bias_len); + if (in_tensors_.size() == 3) { + memcpy(bias_ptr_, in_tensors_[2]->Data(), bias_len); } - auto input_tensor = inputs_[0]; + auto input_tensor = in_tensors_[0]; auto params = input_tensor->GetQuantParams(); MS_ASSERT(params.size() == 1); quant_params_.input.zp_ = params.front().zeroPoint; quant_params_.input.scale_ = params.front().scale; - auto weight_tensor = inputs_[1]; + auto weight_tensor = in_tensors_[1]; params = weight_tensor->GetQuantParams(); MS_ASSERT(params.size() == 1); quant_params_.weight.zp_ = params.front().zeroPoint; quant_params_.weight.scale_ = params.front().scale; - auto output_tensor = outputs_[0]; + auto output_tensor = out_tensors_[0]; params = output_tensor->GetQuantParams(); MS_ASSERT(params.size() == 1); quant_params_.output.zp_ = params.front().zeroPoint; @@ -122,8 +122,8 @@ int FullconnectionInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto a_ptr = reinterpret_cast(inputs_[0]->Data()); - auto output_ptr = reinterpret_cast(outputs_[0]->Data()); + auto a_ptr = reinterpret_cast(in_tensors_[0]->Data()); + auto output_ptr = reinterpret_cast(out_tensors_[0]->Data()); auto &p = quant_params_; RowMajor2Col8MajorInt8(a_ptr, a_c8_ptr_, fc_param_->row_, fc_param_->deep_); LiteBackendParallelLaunch(FcInt8Run, this, thread_count_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.cc index 3c2b688238..e34330e9f1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.cc @@ -30,8 +30,8 @@ using mindspore::schema::ActivationType_HSWISH; namespace mindspore::kernel { int HswishInt8CPUKernel::Init() { - lite::tensor::Tensor *input = inputs_.at(0); - lite::tensor::Tensor *output = outputs_.at(0); + lite::tensor::Tensor *input = in_tensors_.at(0); + lite::tensor::Tensor *output = out_tensors_.at(0); MS_ASSERT(input); MS_ASSERT(output); @@ -67,9 +67,9 @@ void HswishInt8CPUKernel::MultiplierInt32ToInt16(int32_t input, int16_t *output) int HswishInt8CPUKernel::ReSize() { return RET_OK; } int HswishInt8CPUKernel::DoActivation(int task_id) { - auto input_addr = reinterpret_cast(inputs_.at(0)->Data()); - auto output_addr = reinterpret_cast(outputs_.at(0)->Data()); - auto length = inputs_.at(0)->ElementsNum(); + auto input_addr = reinterpret_cast(in_tensors_.at(0)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto length = in_tensors_.at(0)->ElementsNum(); int stride = UP_DIV(length, thread_count_); int count = MSMIN(stride, length - stride * task_id); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.cc index c9597214c1..41d481ced3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.cc @@ -32,12 +32,12 @@ MatmulInt8CPUKernel::~MatmulInt8CPUKernel() { int MatmulInt8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } int batch = 1; - auto x_shape = inputs_[0]->shape(); - auto o_shape = outputs_[0]->shape(); + auto x_shape = in_tensors_[0]->shape(); + auto o_shape = out_tensors_[0]->shape(); for (int i = 0; i < x_shape.size() - 2; ++i) { batch *= x_shape[i]; } @@ -66,17 +66,17 @@ int MatmulInt8CPUKernel::Init() { } memset(c_r8x8_ptr_, 0, params_->row_8_ * params_->col_8_ * sizeof(int)); - auto input_tensor = inputs_[0]; + auto input_tensor = in_tensors_[0]; auto params = input_tensor->GetQuantParams(); MS_ASSERT(params.size() == 1); quant_params_.input.zp_ = params.front().zeroPoint; quant_params_.input.scale_ = params.front().scale; - auto weight_tensor = inputs_[1]; + auto weight_tensor = in_tensors_[1]; params = weight_tensor->GetQuantParams(); MS_ASSERT(params.size() == 1); quant_params_.weight.zp_ = params.front().zeroPoint; quant_params_.weight.scale_ = params.front().scale; - auto output_tensor = outputs_[0]; + auto output_tensor = out_tensors_[0]; params = output_tensor->GetQuantParams(); MS_ASSERT(params.size() == 1); quant_params_.output.zp_ = params.front().zeroPoint; @@ -118,9 +118,9 @@ int MatmulInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - auto a_ptr = reinterpret_cast(inputs_[0]->Data()); - auto b_ptr = reinterpret_cast(inputs_[1]->Data()); - auto c_ptr = reinterpret_cast(outputs_[0]->Data()); + auto a_ptr = reinterpret_cast(in_tensors_[0]->Data()); + auto b_ptr = reinterpret_cast(in_tensors_[1]->Data()); + auto c_ptr = reinterpret_cast(out_tensors_[0]->Data()); auto a_stride = params_->row_ * params_->deep_; auto b_stride = params_->deep_ * params_->col_; auto c_stride = params_->row_ * params_->col_; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc index eab7550f60..ce3db4fa8a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc @@ -30,9 +30,9 @@ using mindspore::schema::PrimitiveType_Mul; namespace mindspore::kernel { int MulInt8CPUKernel::Init() { - lite::tensor::Tensor *input0 = inputs_.at(0); - lite::tensor::Tensor *input1 = inputs_.at(1); - lite::tensor::Tensor *output = outputs_.at(0); + lite::tensor::Tensor *input0 = in_tensors_.at(0); + lite::tensor::Tensor *input1 = in_tensors_.at(1); + lite::tensor::Tensor *output = out_tensors_.at(0); MS_ASSERT(input0); MS_ASSERT(input1); MS_ASSERT(output); @@ -67,25 +67,25 @@ int MulInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - input0_data_ = static_cast(inputs_.at(0)->Data()); - input1_data_ = static_cast(inputs_.at(1)->Data()); - output_data_ = static_cast(outputs_.at(0)->Data()); + input0_data_ = static_cast(in_tensors_.at(0)->Data()); + input1_data_ = static_cast(in_tensors_.at(1)->Data()); + output_data_ = static_cast(out_tensors_.at(0)->Data()); - elements_num_ = inputs_.at(0)->ElementsNum(); + elements_num_ = in_tensors_.at(0)->ElementsNum(); count_unit_ = thread_count_ > 1 ? UP_DIV(elements_num_, thread_count_) : elements_num_; - if (inputs_.at(0)->ElementsNum() != inputs_.at(1)->ElementsNum()) { - input0_data_ = static_cast(ctx_->allocator->Malloc(outputs_.at(0)->Size())); - input1_data_ = static_cast(ctx_->allocator->Malloc(outputs_.at(0)->Size())); + if (in_tensors_.at(0)->ElementsNum() != in_tensors_.at(1)->ElementsNum()) { + input0_data_ = static_cast(ctx_->allocator->Malloc(out_tensors_.at(0)->Size())); + input1_data_ = static_cast(ctx_->allocator->Malloc(out_tensors_.at(0)->Size())); ArithmeticParameter tile_para = {0}; - tile_para.ndim_ = outputs_.at(0)->shape().size(); + tile_para.ndim_ = out_tensors_.at(0)->shape().size(); for (size_t i = 0; i < tile_para.ndim_; i++) { - tile_para.in_shape0_[i] = inputs_.at(0)->DimensionSize(i); - tile_para.in_shape1_[i] = inputs_.at(1)->DimensionSize(i); - tile_para.out_shape_[i] = outputs_.at(0)->DimensionSize(i); + tile_para.in_shape0_[i] = in_tensors_.at(0)->DimensionSize(i); + tile_para.in_shape1_[i] = in_tensors_.at(1)->DimensionSize(i); + tile_para.out_shape_[i] = out_tensors_.at(0)->DimensionSize(i); } - TileDimensionsInt8(static_cast(inputs_.at(0)->Data()), static_cast(inputs_.at(1)->Data()), - input0_data_, input1_data_, &tile_para); + TileDimensionsInt8(static_cast(in_tensors_.at(0)->Data()), + static_cast(in_tensors_.at(1)->Data()), input0_data_, input1_data_, &tile_para); ret = LiteBackendParallelLaunch(MulInt8Run, this, thread_count_); ctx_->allocator->Free(input0_data_); ctx_->allocator->Free(input1_data_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc index a5307f71bf..825f1da9ff 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc @@ -47,8 +47,8 @@ int PadInt8CPUKernel::SetQuantParam() { return RET_MEMORY_FAILED; } - auto *input_tensor = inputs_.at(kInputIndex); - auto *out_tensor = outputs_.at(kOutputIndex); + auto *input_tensor = in_tensors_.at(kInputIndex); + auto *out_tensor = out_tensors_.at(kOutputIndex); auto in_quant_arg = input_tensor->GetQuantParams(); auto out_quant_arg = out_tensor->GetQuantParams(); @@ -69,8 +69,8 @@ int PadInt8CPUKernel::SetQuantParam() { } int PadInt8CPUKernel::InitPadParam() { - auto in_dims = inputs_[0]->shape(); - auto out_dims = outputs_[0]->shape(); + auto in_dims = in_tensors_[0]->shape(); + auto out_dims = out_tensors_[0]->shape(); int ndims = in_dims.size(); int in[] = {1, 1, 1, 1}; @@ -94,7 +94,7 @@ int PadInt8CPUKernel::ReSize() { int PadInt8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } int error_code = InitPadParam(); @@ -117,10 +117,10 @@ int PadInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return RET_ERROR; } - int8_t *in_data = reinterpret_cast(inputs_[0]->Data()); - int8_t *out_data = reinterpret_cast(outputs_[0]->Data()); + int8_t *in_data = reinterpret_cast(in_tensors_[0]->Data()); + int8_t *out_data = reinterpret_cast(out_tensors_[0]->Data()); - memset(out_data, pad_param_->pad_quant_arg_.constant_value_[0], outputs_[0]->ElementsNum() * sizeof(int8_t)); + memset(out_data, pad_param_->pad_quant_arg_.constant_value_[0], out_tensors_[0]->ElementsNum() * sizeof(int8_t)); PadConstant4D(in_data, out_data, in_dims_, out_dims_, pad_param_->paddings_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h index 4bc7b5f302..bda548944c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h @@ -30,8 +30,8 @@ class PadInt8CPUKernel : public LiteKernel { const std::vector &outputs, const lite::Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { - opParameter->thread_num_ = ctx->thread_num_; - pad_param_ = reinterpret_cast(opParameter); + op_parameter_->thread_num_ = ctx->thread_num_; + pad_param_ = reinterpret_cast(op_parameter_); } ~PadInt8CPUKernel() override { FreeQuantParam(); }; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc index 36b10c7344..8a91f71414 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_OK; namespace mindspore::kernel { int PoolingInt8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } auto ret = PoolingBaseCPUKernel::Init(); @@ -60,8 +60,8 @@ int PoolingInt8CPUKernel::ReSize() { } int PoolingInt8CPUKernel::RunImpl(int task_id) { - auto input_data = reinterpret_cast(inputs_.at(kInputIndex)->Data()); - auto output_data = reinterpret_cast(outputs_.at(kOutputIndex)->Data()); + auto input_data = reinterpret_cast(in_tensors_.at(kInputIndex)->Data()); + auto output_data = reinterpret_cast(out_tensors_.at(kOutputIndex)->Data()); if (pooling_param_->max_pooling_) { MaxPoolingInt8(input_data, output_data, pooling_param_, task_id); } else { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/prelu_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/prelu_int8.cc index 2f4d36a6fc..1edc5762f7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/prelu_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/prelu_int8.cc @@ -30,19 +30,19 @@ using mindspore::schema::PrimitiveType_Prelu; namespace mindspore::kernel { int PreluInt8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } PreluBaseCPUKernel::Init(); - auto *input_tensor = inputs_.at(kInputIndex); + auto *input_tensor = in_tensors_.at(kInputIndex); auto in_quant_args = input_tensor->GetQuantParams(); quant_prelu_parm_->quant_arg.in_args_.scale_ = in_quant_args.front().scale; quant_prelu_parm_->quant_arg.in_args_.zp_ = in_quant_args.front().zeroPoint; auto input_dim = input_tensor->shape().size(); MS_ASSERT(input_dim <= CROP_OFFSET_MAX_SIZE); quant_prelu_parm_->input_dim_ = input_dim; - quant_prelu_parm_->element_num = inputs_[0]->Size(); - auto *out_tensor = outputs_.at(kOutputIndex); + quant_prelu_parm_->element_num = in_tensors_[0]->Size(); + auto *out_tensor = out_tensors_.at(kOutputIndex); auto out_quant_args = out_tensor->GetQuantParams(); quant_prelu_parm_->quant_arg.out_args_.scale_ = out_quant_args.front().scale; quant_prelu_parm_->quant_arg.out_args_.zp_ = out_quant_args.front().zeroPoint; @@ -69,8 +69,8 @@ int PreluInt8Run(int task_id, LiteParallelGroupEnv *penv, void *cdata) { } int PreluInt8CPUKernel::DoExecute(int task_id) { - auto input_tensor = inputs_.at(kInputIndex); - auto out_tensor = outputs_.at(kOutputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); + auto out_tensor = out_tensors_.at(kOutputIndex); int8_t *input_data = reinterpret_cast(input_tensor->Data()); int8_t *output_data = reinterpret_cast(out_tensor->Data()); prelu(input_data, output_data, quant_prelu_parm_, task_id); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/prelu_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/prelu_int8.h index c50378e63d..a47244f5fb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/prelu_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/prelu_int8.h @@ -32,7 +32,7 @@ class PreluInt8CPUKernel : public PreluBaseCPUKernel { const std::vector &outputs, const Context *ctx, const lite::Primitive *primitive) : PreluBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { - quant_prelu_parm_ = reinterpret_cast(opParameter); + quant_prelu_parm_ = reinterpret_cast(op_parameter_); } ~PreluInt8CPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.cc index fb321c78f9..8292653610 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.cc @@ -28,8 +28,8 @@ using mindspore::schema::ActivationType_RELU; namespace mindspore::kernel { int ReluXInt8CPUKernel::Init() { - lite::tensor::Tensor *input = inputs_.at(0); - lite::tensor::Tensor *output = outputs_.at(0); + lite::tensor::Tensor *input = in_tensors_.at(0); + lite::tensor::Tensor *output = out_tensors_.at(0); MS_ASSERT(input); MS_ASSERT(output); @@ -47,11 +47,11 @@ int ReluXInt8CPUKernel::Init() { int ReluXInt8CPUKernel::ReSize() { return RET_OK; } int ReluXInt8CPUKernel::DoActivation(int task_id) { - auto input_addr = reinterpret_cast(inputs_.at(0)->Data()); - auto output_addr = reinterpret_cast(outputs_.at(0)->Data()); - auto length = inputs_.at(0)->ElementsNum(); + auto input_addr = reinterpret_cast(in_tensors_.at(0)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto length = in_tensors_.at(0)->ElementsNum(); - int stride = UP_DIV(length, opParameter->thread_num_); + int stride = UP_DIV(length, op_parameter_->thread_num_); int count = MSMIN(stride, length - stride * task_id); ReluXInt8(input_addr + stride * task_id, count, output_addr + stride * task_id, &quant_arg_); @@ -74,7 +74,7 @@ int ReluXInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - int error_code = LiteBackendParallelLaunch(ReluXInt8Run, this, opParameter->thread_num_); + int error_code = LiteBackendParallelLaunch(ReluXInt8Run, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "ReluXInt8Run function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.cc index 6102fb51a2..5736e84ed0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.cc @@ -28,12 +28,12 @@ namespace mindspore::kernel { int ReshapeInt8CPUKernel::Init() { ReshapeBaseCPUKernel::Init(); - auto *input_tensor = inputs_.at(kInputIndex); + auto *input_tensor = in_tensors_.at(kInputIndex); auto in_quant_args = input_tensor->GetQuantParams(); reshape_param_->quant_para_.in_args_.scale_ = in_quant_args.front().scale; reshape_param_->quant_para_.in_args_.zp_ = in_quant_args.front().zeroPoint; - auto *out_tensor = outputs_.at(kOutputIndex); + auto *out_tensor = out_tensors_.at(kOutputIndex); auto out_quant_args = out_tensor->GetQuantParams(); reshape_param_->quant_para_.out_args_.scale_ = out_quant_args.front().scale; reshape_param_->quant_para_.out_args_.zp_ = out_quant_args.front().zeroPoint; @@ -52,15 +52,15 @@ int ReshapeInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - MS_ASSERT(inputs_.size() == 1); - MS_ASSERT(outputs_.size() == 1); - input_data_ = static_cast(inputs_.at(kInputIndex)->Data()); - output_data_ = static_cast(outputs_.at(kOutputIndex)->Data()); + MS_ASSERT(in_tensors_.size() == 1); + MS_ASSERT(out_tensors_.size() == 1); + input_data_ = static_cast(in_tensors_.at(kInputIndex)->Data()); + output_data_ = static_cast(out_tensors_.at(kOutputIndex)->Data()); - elements_num_ = inputs_.at(kInputIndex)->ElementsNum(); - count_unit_ = opParameter->thread_num_ > 1 ? UP_DIV(elements_num_, opParameter->thread_num_) : elements_num_; + elements_num_ = in_tensors_.at(kInputIndex)->ElementsNum(); + count_unit_ = op_parameter_->thread_num_ > 1 ? UP_DIV(elements_num_, op_parameter_->thread_num_) : elements_num_; - ret = LiteBackendParallelLaunch(ReshapeInt8Run, this, opParameter->thread_num_); + ret = LiteBackendParallelLaunch(ReshapeInt8Run, this, op_parameter_->thread_num_); return ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc index 14aca7d6b6..191e7e5ec1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc @@ -45,10 +45,10 @@ int ResizeInt8CPUKernel::Init() { MS_ASSERT(quant_in_); quant_out_ = new (std::nothrow) QuantArg; MS_ASSERT(quant_out_); - auto input = inputs_.at(0); + auto input = in_tensors_.at(0); quant_in_->zp_ = input->GetQuantParams().front().zeroPoint; quant_in_->scale_ = input->GetQuantParams().front().scale; - auto output = outputs_.at(0); + auto output = out_tensors_.at(0); quant_out_->zp_ = output->GetQuantParams().front().zeroPoint; quant_out_->scale_ = output->GetQuantParams().front().scale; @@ -73,12 +73,12 @@ int ResizeInt8Impl(int task_id, LiteParallelGroupEnv *penv, void *cdata) { } int ResizeInt8CPUKernel::RunImpl(int task_id) { - auto input = inputs_.at(0); + auto input = in_tensors_.at(0); auto input_data = reinterpret_cast(input->Data()); if (input_data == nullptr) { return RET_NULL_PTR; } - auto output_data = reinterpret_cast(outputs_.at(0)->Data()); + auto output_data = reinterpret_cast(out_tensors_.at(0)->Data()); if (output_data == nullptr) { return RET_NULL_PTR; } @@ -91,8 +91,8 @@ int ResizeInt8CPUKernel::RunImpl(int task_id) { int ret = 0; switch (method_) { case static_cast(schema::ResizeMethod_BILINEAR): { - ret = ResizeBilinearInt8(input_data, output_data, input_shape.data(), outputs_[0]->shape().data(), align_corners_, - quant_in_, quant_out_, multiplier_, task_id, context_->thread_num_); + ret = ResizeBilinearInt8(input_data, output_data, input_shape.data(), out_tensors_[0]->shape().data(), + align_corners_, quant_in_, quant_out_, multiplier_, task_id, context_->thread_num_); break; } @@ -100,11 +100,12 @@ int ResizeInt8CPUKernel::RunImpl(int task_id) { bool same_zp = quant_in_->zp_ == quant_out_->zp_; bool same_scale = abs(quant_out_->scale_ - quant_in_->scale_) < 1e-6; if (same_zp && same_scale) { - ret = ResizeNearestNeighborInt8Simple(input_data, output_data, input_shape.data(), outputs_[0]->shape().data(), - align_corners_, task_id, context_->thread_num_); + ret = + ResizeNearestNeighborInt8Simple(input_data, output_data, input_shape.data(), out_tensors_[0]->shape().data(), + align_corners_, task_id, context_->thread_num_); } else { ret = - ResizeNearestNeighborInt8(input_data, output_data, input_shape.data(), outputs_[0]->shape().data(), + ResizeNearestNeighborInt8(input_data, output_data, input_shape.data(), out_tensors_[0]->shape().data(), align_corners_, multiplier_, quant_in_, quant_out_, task_id, context_->thread_num_); } break; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc index f35aafa8c0..00d2047fbc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc @@ -31,8 +31,8 @@ using mindspore::schema::ActivationType_SIGMOID; namespace mindspore::kernel { int SigmoidInt8CPUKernel::Init() { - lite::tensor::Tensor *input = inputs_.at(0); - lite::tensor::Tensor *output = outputs_.at(0); + lite::tensor::Tensor *input = in_tensors_.at(0); + lite::tensor::Tensor *output = out_tensors_.at(0); MS_ASSERT(input); MS_ASSERT(output); @@ -68,11 +68,11 @@ void SigmoidInt8CPUKernel::MultiplierInt32ToInt16(int32_t input, int16_t *output int SigmoidInt8CPUKernel::ReSize() { return RET_OK; } int SigmoidInt8CPUKernel::DoActivation(int task_id) { - auto input_addr = reinterpret_cast(inputs_.at(0)->Data()); - auto output_addr = reinterpret_cast(outputs_.at(0)->Data()); - auto length = inputs_.at(0)->ElementsNum(); + auto input_addr = reinterpret_cast(in_tensors_.at(0)->Data()); + auto output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto length = in_tensors_.at(0)->ElementsNum(); - int stride = UP_DIV(length, opParameter->thread_num_); + int stride = UP_DIV(length, op_parameter_->thread_num_); int count = MSMIN(stride, length - stride * task_id); SigmoidInt8(input_addr + stride * task_id, count, output_addr + stride * task_id, &quant_arg_); @@ -95,7 +95,7 @@ int SigmoidInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - int error_code = LiteBackendParallelLaunch(SigmoidInt8Run, this, opParameter->thread_num_); + int error_code = LiteBackendParallelLaunch(SigmoidInt8Run, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "SigmoidInt8Run function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc index c16a2ad24f..40ef9c10f4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc @@ -31,14 +31,14 @@ int SoftmaxInt8CPUKernel::Init() { return ret; } - auto *input_tensor = inputs_.at(kInputIndex); + auto *input_tensor = in_tensors_.at(kInputIndex); MS_ASSERT(input_tensor); auto in_quant_args = input_tensor->GetQuantParams(); quant_params_.in_quant_args_.scale_ = in_quant_args.front().scale; quant_params_.in_quant_args_.zp_ = in_quant_args.front().zeroPoint; - auto *out_tensor = outputs_.at(kOutputIndex); + auto *out_tensor = out_tensors_.at(kOutputIndex); MS_ASSERT(out_tensor); auto out_quant_args = out_tensor->GetQuantParams(); @@ -79,11 +79,11 @@ int SoftmaxInt8CPUKernel::ReSize() { } int SoftmaxInt8CPUKernel::DoSoftmax(int task_id) { - MS_ASSERT(inputs_.size() == 1); - MS_ASSERT(outputs_.size() == 1); + MS_ASSERT(in_tensors_.size() == 1); + MS_ASSERT(out_tensors_.size() == 1); - auto input_ptr = reinterpret_cast(inputs_.at(0)->Data()); - auto output_ptr = reinterpret_cast(outputs_.at(0)->Data()); + auto input_ptr = reinterpret_cast(in_tensors_.at(0)->Data()); + auto output_ptr = reinterpret_cast(out_tensors_.at(0)->Data()); int outter_size = 1, inner_size = 1; for (int i = 0; i < softmax_param_->axis_; i++) { @@ -124,7 +124,7 @@ int SoftmaxInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return RET_ERROR; } - auto input_ptr = reinterpret_cast(inputs_.at(0)->Data()); + auto input_ptr = reinterpret_cast(in_tensors_.at(0)->Data()); int ele_size = softmax_param_->element_size_; for (int i = 0; i < ele_size; i++) { float input_scaled = ((input_ptr[i] - quant_params_.in_quant_args_.zp_) * quant_params_.in_quant_args_.scale_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc index ac47e910d7..ab8f7dfe69 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc @@ -32,14 +32,14 @@ int SplitInt8CPUKernel::Init() { if (ret != RET_OK) { return ret; } - auto in_tensor = inputs_.at(kInputIndex); + auto in_tensor = in_tensors_.at(kInputIndex); auto in_quant_args = in_tensor->GetQuantParams(); param->quant_arg_.in_args_.scale_ = in_quant_args.front().scale; param->quant_arg_.in_args_.zp_ = in_quant_args.front().zeroPoint; MS_ASSERT(param->num_split_ == outputs_.size()); for (int i = 0; i < param->num_split_; i++) { - auto *out_tensor = outputs_.at(i); + auto *out_tensor = out_tensors_.at(i); auto out_quant_args = out_tensor->GetQuantParams(); param->quant_arg_.out_args_[i].scale_ = out_quant_args.front().scale; param->quant_arg_.out_args_[i].zp_ = out_quant_args.front().zeroPoint; @@ -63,7 +63,7 @@ int SplitInt8CPUKernel::Split(int task_id) { } int thread_offset = task_id * thread_n_stride_; auto ret = - DoSplit(input_ptr_, output_ptr_.data(), inputs_.front()->shape().data(), thread_offset, num_unit_thread, param); + DoSplit(input_ptr_, output_ptr_.data(), in_tensors_.front()->shape().data(), thread_offset, num_unit_thread, param); if (ret != RET_OK) { MS_LOG(ERROR) << "Split error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; @@ -87,11 +87,11 @@ int SplitInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return ret; } - auto in_tensor = inputs_.at(kInputIndex); + auto in_tensor = in_tensors_.at(kInputIndex); input_ptr_ = reinterpret_cast(in_tensor->Data()); MS_ASSERT(param->num_split_ == outputs_.size()); for (int i = 0; i < param->num_split_; i++) { - output_ptr_.push_back(reinterpret_cast(outputs_.at(i)->Data())); + output_ptr_.push_back(reinterpret_cast(out_tensors_.at(i)->Data())); } ret = LiteBackendParallelLaunch(SplitInt8Run, this, thread_n_num_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.cc index 1e9a1b3fde..fdec4d0947 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.cc @@ -34,7 +34,7 @@ int SqueezeInt8CPUKernel::Init() { return init_ret; } quant_Squeeze_parm_ = new (std::nothrow) SqueezeQuantArg; - auto input_num = inputs_.size(); + auto input_num = in_tensors_.size(); quant_Squeeze_parm_->input_num_ = input_num; quant_Squeeze_parm_->input_sizes_ = reinterpret_cast(malloc(sizeof(int) * input_num)); if (quant_Squeeze_parm_->input_sizes_ == nullptr) { @@ -59,7 +59,7 @@ int SqueezeInt8CPUKernel::Init() { } for (size_t i = 0; i < input_num; i++) { - auto *input_tensor = inputs_.at(i); + auto *input_tensor = in_tensors_.at(i); auto quant_args = input_tensor->GetQuantParams(); MS_ASSERT(quant_args.size() == 1); quant_Squeeze_parm_->in_quant_args_[i].scale_ = quant_args.front().scale; @@ -67,7 +67,7 @@ int SqueezeInt8CPUKernel::Init() { } MS_ASSERT(outputs_.size() == 1); - auto output_tensor = outputs_.at(0); + auto output_tensor = out_tensors_.at(0); MS_ASSERT(output_tensor != nullptr); auto quant_args = output_tensor->GetQuantParams(); MS_ASSERT(quant_args.size() == 1); @@ -81,9 +81,9 @@ int SqueezeInt8CPUKernel::Init() { } int SqueezeInt8CPUKernel::ReSize() { - auto input_num = inputs_.size(); + auto input_num = in_tensors_.size(); for (size_t i = 0; i < input_num; i++) { - auto *input_tensor = inputs_.at(i); + auto *input_tensor = in_tensors_.at(i); MS_ASSERT(input_tensor != nullptr); auto input_size = input_tensor->shape().size(); MS_ASSERT(input_size != NULL); @@ -95,14 +95,14 @@ int SqueezeInt8CPUKernel::ReSize() { ::memcpy(quant_Squeeze_parm_->input_shapes_[i], input_tensor->shape().data(), sizeof(int) * input_size); for (size_t j = 0; j < input_size; j++) { - auto *input_tensor_tmp = inputs_.at(i); + auto *input_tensor_tmp = in_tensors_.at(i); auto input_shape = input_tensor_tmp->shape()[j]; quant_Squeeze_parm_->input_sizes_[i] *= input_shape; } } - MS_ASSERT(outputs_.size() == 1); - auto output_tensor = outputs_.at(0); + MS_ASSERT(out_tensors_.size() == 1); + auto output_tensor = out_tensors_.at(0); MS_ASSERT(output_tensor != nullptr); auto output_shape = output_tensor->shape(); MS_ASSERT(output_shape != NULL); @@ -130,9 +130,9 @@ int SqueezeInt8CPUKernel::Run() { for (size_t i = 0; i < input_dim; i++) { auto input_size = quant_Squeeze_parm_->input_sizes_[i]; inputs_array[i] = reinterpret_cast(malloc(sizeof(int8_t) * input_size)); - auto input_type = inputs_[i]->data_type(); + auto input_type = in_tensors_[i]->data_type(); if (input_type == kNumberTypeUInt8) { - uint8_t *input_tmp = reinterpret_cast(inputs_[i]->Data()); + uint8_t *input_tmp = reinterpret_cast(in_tensors_[i]->Data()); for (size_t j = 0; j < input_size; j++) { inputs_array[i][j] = (int8_t)(input_tmp[j] - 128); } @@ -141,11 +141,11 @@ int SqueezeInt8CPUKernel::Run() { } quant_Squeeze_parm_->out_quant_args_.zp_ -= 128; } else { - ::memcpy(inputs_array[i], inputs_.at(i)->Data(), sizeof(int8_t) * input_size); + ::memcpy(inputs_array[i], in_tensors_.at(i)->Data(), sizeof(int8_t) * input_size); } } - int8_t *output_addr = reinterpret_cast(outputs_.at(0)->Data()); - auto output_type = outputs_[0]->data_type(); + int8_t *output_addr = reinterpret_cast(out_tensors_.at(0)->Data()); + auto output_type = out_tensors_[0]->data_type(); if (output_type == kNumberTypeUInt8) { auto output_size = quant_Squeeze_parm_->output_size_; for (size_t i = 0; i < output_size; i++) { @@ -171,12 +171,12 @@ int SqueezeInt8Run(int task_id, LiteParallelGroupEnv *penv, void *cdata) { } int SqueezeInt8CPUKernel::DoExecute(int task_id) { - auto input_tensor = inputs_.at(kInputIndex); - auto out_tensor = outputs_.at(kOutputIndex); + auto input_tensor = in_tensors_.at(kInputIndex); + auto out_tensor = out_tensors_.at(kOutputIndex); int8_t *input_data = reinterpret_cast(input_tensor->Data()); int8_t *output_data = reinterpret_cast(out_tensor->Data()); - size_t data_size = inputs_.front()->Size(); + size_t data_size = in_tensors_.front()->Size(); Squeeze(&input_data, output_data, task_id, quant_Squeeze_parm_, para_, data_size); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc index 0e96c8c9be..38e63be704 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc @@ -26,11 +26,11 @@ using mindspore::schema::PrimitiveType_TopK; namespace mindspore::kernel { int TopKInt8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - TopkParameter *parameter = reinterpret_cast(opParameter); - lite::tensor::Tensor *input = inputs_.at(0); + TopkParameter *parameter = reinterpret_cast(op_parameter_); + lite::tensor::Tensor *input = in_tensors_.at(0); parameter->last_dim_size_ = input->shape()[input->shape().size() - 1]; parameter->loop_num_ = 1; for (int i = 0; i < input->shape().size() - 1; ++i) { @@ -53,11 +53,11 @@ int TopKInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return ret; } - int8_t *input_data = reinterpret_cast(inputs_.at(0)->Data()); - int8_t *output_data = reinterpret_cast(outputs_.at(0)->Data()); - int32_t *output_index = reinterpret_cast(outputs_.at(1)->Data()); + int8_t *input_data = reinterpret_cast(in_tensors_.at(0)->Data()); + int8_t *output_data = reinterpret_cast(out_tensors_.at(0)->Data()); + int32_t *output_index = reinterpret_cast(out_tensors_.at(1)->Data()); - TopkInt8(input_data, output_data, output_index, reinterpret_cast(opParameter)); + TopkInt8(input_data, output_data, output_index, reinterpret_cast(op_parameter_)); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h index 513455c1e7..1fc8de9ae7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h @@ -28,7 +28,7 @@ class TopKInt8CPUKernel : public LiteKernel { const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~TopKInt8CPUKernel() override { - TopkParameter *parameter = reinterpret_cast(opParameter); + TopkParameter *parameter = reinterpret_cast(op_parameter_); free(parameter->topk_node_list_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc index 450034e93f..1e1de7bec4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc @@ -30,10 +30,10 @@ using mindspore::schema::PrimitiveType_Unsqueeze; namespace mindspore::kernel { int Unsqueezeint8CPUKernel::Init() { if (context_->infer_shape_interrupt_ && !context_->running_) { - SetNeedReInit(); + set_need_reinit(); return RET_OK; } - auto *input_tensor = inputs_.at(0); + auto *input_tensor = in_tensors_.at(0); auto quant_args = input_tensor->GetQuantParams(); MS_ASSERT(quant_args.size() == 1); Unsq_para_->quant_arg.in_quant_args_.scale_ = quant_args.front().scale; @@ -49,7 +49,7 @@ int Unsqueezeint8CPUKernel::Init() { } int Unsqueezeint8CPUKernel::ReSize() { - data_size_ = inputs_.at(0)->ElementsNum(); + data_size_ = in_tensors_.at(0)->ElementsNum(); thread_sz_count_ = MSMIN(thread_count_, data_size_); thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_); return RET_OK; @@ -61,9 +61,9 @@ int Unsqueezeint8CPUKernel::DoUnsqueeze(int task_id) { return RET_OK; } - auto input_ptr = reinterpret_cast(inputs_.front()->Data()); - auto output_ptr = reinterpret_cast(outputs_.front()->Data()); - size_t data_size = outputs_.front()->Size(); + auto input_ptr = reinterpret_cast(in_tensors_.front()->Data()); + auto output_ptr = reinterpret_cast(out_tensors_.front()->Data()); + size_t data_size = out_tensors_.front()->Size(); int ret = Unsqueeze(input_ptr, output_ptr, Unsq_para_, data_size, task_id); if (ret != RET_OK) { @@ -89,8 +89,8 @@ int Unsqueezeint8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare failed."; return ret; } - in_ptr_ = reinterpret_cast(inputs_.at(0)->Data()); - out_ptr_ = reinterpret_cast(outputs_.at(0)->Data()); + in_ptr_ = reinterpret_cast(in_tensors_.at(0)->Data()); + out_ptr_ = reinterpret_cast(out_tensors_.at(0)->Data()); ret = LiteBackendParallelLaunch(UnsqueezeIn8Run, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "UnsqueezeRun error error_code[" << ret << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h index 4c32cd05ef..ba785211e8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h @@ -31,8 +31,8 @@ class Unsqueezeint8CPUKernel : public LiteKernel { const std::vector &outputs, const Context *ctx, const lite::Primitive *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { - Unsq_para_ = reinterpret_cast(opParameter); - Unsq_para_->thread_count_ = opParameter->thread_num_; + Unsq_para_ = reinterpret_cast(op_parameter_); + Unsq_para_->thread_count_ = op_parameter_->thread_num_; } ~Unsqueezeint8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc index 36acee6869..81b5da24e1 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc @@ -32,36 +32,36 @@ using mindspore::lite::KernelRegistrar; namespace mindspore::kernel { std::vector ArithmeticOpenCLKernel::InitGlobalSize() const { - const size_t global_x = outputs_[0]->Width(); - const size_t global_y = outputs_[0]->Height(); - const size_t global_z = UP_ROUND_DIV(outputs_[0]->Channel(), 4); + const size_t global_x = out_tensors_[0]->Width(); + const size_t global_y = out_tensors_[0]->Height(); + const size_t global_z = UP_ROUND_DIV(out_tensors_[0]->Channel(), 4); std::vector global = {global_x, global_y, global_z}; return global; } void ArithmeticOpenCLKernel::Image2dGetWorkGroupSize() { - size_t H = outputs_[0]->Batch() * outputs_[0]->Height(); - size_t W = outputs_[0]->Width() * UP_DIV(outputs_[0]->Channel(), C4NUM); + size_t H = out_tensors_[0]->Batch() * out_tensors_[0]->Height(); + size_t W = out_tensors_[0]->Width() * UP_DIV(out_tensors_[0]->Channel(), C4NUM); local_size_ = {16, 16}; global_size_ = {W, H}; } void ArithmeticOpenCLKernel::BufferGetWorkGroupSize() { - uint32_t element_num = outputs_[0]->ElementsC4Num(); + uint32_t element_num = out_tensors_[0]->ElementsC4Num(); global_size_ = {element_num}; } -int ArithmeticOpenCLKernel::GetImageSize(size_t idx, std::vector* img_size) { - size_t CO4 = UP_DIV(outputs_[0]->Channel(), C4NUM); - int H = outputs_[0]->Batch() * outputs_[0]->Height(); - int W = outputs_[0]->Width() * CO4; +int ArithmeticOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { + size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); + int H = out_tensors_[0]->Batch() * out_tensors_[0]->Height(); + int W = out_tensors_[0]->Width() * CO4; size_t im_dst_x, im_dst_y; - if (inputs_[0]->GetFormat() == schema::Format_NHWC4) { + if (in_tensors_[0]->GetFormat() == schema::Format_NHWC4) { im_dst_x = W; im_dst_y = H; } else { - im_dst_y = outputs_[0]->Batch() * outputs_[0]->Height() * CO4; - im_dst_x = outputs_[0]->Width(); + im_dst_y = out_tensors_[0]->Batch() * out_tensors_[0]->Height() * CO4; + im_dst_x = out_tensors_[0]->Width(); } #ifdef ENABLE_FP16 size_t img_dtype = CL_HALF_FLOAT; @@ -78,12 +78,12 @@ int ArithmeticOpenCLKernel::Init() { runtime_ = lite::opencl::OpenCLRuntime::GetInstance(); std::string kernel_name; - if (inputs_[1]->TensorType() == schema::NodeType_ValueNode && inputs_[1]->Data() != nullptr) { + if (in_tensors_[1]->TensorType() == schema::NodeType_ValueNode && in_tensors_[1]->Data() != nullptr) { element_flag_ = false; kernel_name = "BoardcastArith"; } else { element_flag_ = true; - switch (opParameter->type_) { + switch (op_parameter_->type_) { case PrimitiveType_Mul: kernel_name = "ElementMul"; break; @@ -97,12 +97,11 @@ int ArithmeticOpenCLKernel::Init() { kernel_name = "ElementDiv"; break; default: - MS_LOG(ERROR) << "Error Operator type " << opParameter->type_; + MS_LOG(ERROR) << "Error Operator type " << op_parameter_->type_; break; } } - #ifdef PROGRAM_WITH_IL runtime_->CreateKernelFromIL(kernel_(), kernel_name); #else @@ -112,24 +111,24 @@ int ArithmeticOpenCLKernel::Init() { runtime_->LoadSource(program_name, source); runtime_->BuildKernel(kernel_, program_name, kernel_name, build_options); #endif - outputs_[0]->SetFormat(schema::Format_NHWC4); + out_tensors_[0]->SetFormat(schema::Format_NHWC4); Image2dGetWorkGroupSize(); return 0; } int ArithmeticOpenCLKernel::Run() { - MS_LOG(DEBUG) << this->Name() << " Running!"; + MS_LOG(DEBUG) << this->name() << " Running!"; auto runtime_ = lite::opencl::OpenCLRuntime::GetInstance(); int arg_idx = 0; - uint32_t element_num = outputs_[0]->ElementsC4Num(); + uint32_t element_num = out_tensors_[0]->ElementsC4Num(); - runtime_->SetKernelArg(kernel_, arg_idx++, inputs_[0]->Data()); + runtime_->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->Data()); if (element_flag_) { - runtime_->SetKernelArg(kernel_, arg_idx++, inputs_[1]->Data()); + runtime_->SetKernelArg(kernel_, arg_idx++, in_tensors_[1]->Data()); } else { - float value = static_cast(inputs_[1]->Data())[0]; - switch (opParameter->type_) { + float value = static_cast(in_tensors_[1]->Data())[0]; + switch (op_parameter_->type_) { case PrimitiveType_Mul: weight_ = value; break; @@ -143,15 +142,15 @@ int ArithmeticOpenCLKernel::Run() { weight_ = 1 / value; break; default: - MS_LOG(ERROR) << "Error Operator type " << opParameter->type_; + MS_LOG(ERROR) << "Error Operator type " << op_parameter_->type_; break; } runtime_->SetKernelArg(kernel_, arg_idx++, weight_); runtime_->SetKernelArg(kernel_, arg_idx++, bias_); } - runtime_->SetKernelArg(kernel_, arg_idx++, outputs_[0]->Data()); - int H = outputs_[0]->Batch() * outputs_[0]->Height(); - int W = outputs_[0]->Width() * UP_DIV(outputs_[0]->Channel(), C4NUM); + runtime_->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data()); + int H = out_tensors_[0]->Batch() * out_tensors_[0]->Height(); + int W = out_tensors_[0]->Width() * UP_DIV(out_tensors_[0]->Channel(), C4NUM); cl_int2 output_shape{W, H}; runtime_->SetKernelArg(kernel_, arg_idx++, output_shape); runtime_->RunKernel(kernel_, global_size_, local_size_, nullptr); diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc index 536bd84b0d..0d0758d912 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc @@ -29,14 +29,14 @@ using mindspore::schema::PrimitiveType_Concat; namespace mindspore::kernel { int ConcatOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { - size_t CO4 = UP_DIV(outputs_[0]->Channel(), C4NUM); + size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); size_t im_dst_x, im_dst_y; - if (inputs_[0]->GetFormat() == schema::Format_NHWC4) { - im_dst_x = outputs_[0]->Width() * CO4; - im_dst_y = outputs_[0]->Height(); + if (in_tensors_[0]->GetFormat() == schema::Format_NHWC4) { + im_dst_x = out_tensors_[0]->Width() * CO4; + im_dst_y = out_tensors_[0]->Height(); } else { - im_dst_y = outputs_[0]->Height() * CO4; - im_dst_x = outputs_[0]->Width(); + im_dst_y = out_tensors_[0]->Height() * CO4; + im_dst_x = out_tensors_[0]->Width(); } #ifdef ENABLE_FP16 size_t img_dtype = CL_HALF_FLOAT; @@ -49,11 +49,11 @@ int ConcatOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) return 1; } int ConcatOpenCLKernel::Init() { - if (inputs_[0]->shape().size() != 4) { + if (in_tensors_[0]->shape().size() != 4) { MS_LOG(ERROR) << "only support dim=4"; } - auto param = reinterpret_cast(this->opParameter); + auto param = reinterpret_cast(this->op_parameter_); MS_LOG(INFO) << "concat at axis=: " << param->axis_; if (param->axis_ != 0 && param->axis_ != 3) { MS_LOG(ERROR) << "only support axis=0 or axis=3"; @@ -62,7 +62,7 @@ int ConcatOpenCLKernel::Init() { if (param->axis_ == 0) { return 0; } - if (inputs_.size() == 2) { + if (in_tensors_.size() == 2) { std::set build_options; std::string source = concat_source_fp32; std::string program_name = "Concat"; @@ -72,7 +72,7 @@ int ConcatOpenCLKernel::Init() { ocl_runtime->BuildKernel(kernel_, program_name, kernel_name, build_options); } - if (inputs_.size() == 3) { + if (in_tensors_.size() == 3) { std::set build_options; std::string source = concat_source_fp32; std::string program_name = "Concat3input"; @@ -92,19 +92,20 @@ int ConcatOpenCLKernel::Run_axis0() { auto allocator_ = ocl_runtime->GetAllocator(); cl::CommandQueue *command_queue = ocl_runtime->GetDefaultCommandQueue(); - for (auto &tensor : inputs_) { + for (auto &tensor : in_tensors_) { auto buffer = static_cast(allocator_->GetDeviceBuffer(tensor->Data())); ocl_runtime->MapBuffer(*buffer, CL_MAP_READ, tensor->Size(), command_queue, true); } - for (auto &tensor : outputs_) { + for (auto &tensor : out_tensors_) { auto buffer = static_cast(allocator_->GetDeviceBuffer(tensor->Data())); ocl_runtime->MapBuffer(*buffer, CL_MAP_WRITE, tensor->Size(), command_queue, true); } - memcpy(outputs_[0]->Data(), inputs_[0]->Data(), inputs_[0]->Size()); - memcpy(reinterpret_cast(outputs_[0]->Data()) + inputs_[0]->Size(), inputs_[1]->Data(), inputs_[1]->Size()); + memcpy(out_tensors_[0]->Data(), in_tensors_[0]->Data(), in_tensors_[0]->Size()); + memcpy(reinterpret_cast(out_tensors_[0]->Data()) + in_tensors_[0]->Size(), in_tensors_[1]->Data(), + in_tensors_[1]->Size()); - for (auto tensors : {&inputs_, &outputs_}) { + for (auto tensors : {&in_tensors_, &out_tensors_}) { for (auto &tensor : *tensors) { auto buffer = static_cast(allocator_->GetDeviceBuffer(tensor->Data())); ocl_runtime->UnmapBuffer(*buffer, tensor->Data()); @@ -147,16 +148,16 @@ void ConcatGetWorkGroup(const std::vector &global, std::vector * local->push_back(z); } int ConcatOpenCLKernel::Run() { - MS_LOG(DEBUG) << this->Name() << " Running!"; - auto param = reinterpret_cast(this->opParameter); + MS_LOG(DEBUG) << this->name() << " Running!"; + auto param = reinterpret_cast(this->op_parameter_); if (param->axis_ == 0) { return Run_axis0(); } auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); - auto input0_shape = inputs_[0]->shape(); - auto input1_shape = inputs_[1]->shape(); - auto output_shape = outputs_[0]->shape(); + auto input0_shape = in_tensors_[0]->shape(); + auto input1_shape = in_tensors_[1]->shape(); + auto output_shape = out_tensors_[0]->shape(); cl_int2 input0_shape2_ = {DivideRoundUp(input0_shape[3], 4), DivideRoundUp(input1_shape[3], 4)}; // change cl_int4 output_shape_ = {output_shape[0], output_shape[1], output_shape[2], DivideRoundUp(output_shape[3], 4)}; @@ -169,20 +170,20 @@ int ConcatOpenCLKernel::Run() { // ConcatGetWorkGroup(global, &local, 512); int arg_cn = 0; - if (inputs_.size() == 2) { - ocl_runtime->SetKernelArg(kernel_, arg_cn++, outputs_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_cn++, inputs_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_cn++, inputs_[1]->Data()); + if (in_tensors_.size() == 2) { + ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[1]->Data()); ocl_runtime->SetKernelArg(kernel_, arg_cn++, input0_shape2_); ocl_runtime->SetKernelArg(kernel_, arg_cn++, output_shape_); - } else if (inputs_.size() == 3) { - auto input2_shape = inputs_[2]->shape(); + } else if (in_tensors_.size() == 3) { + auto input2_shape = in_tensors_[2]->shape(); cl_int3 input0_shape3_ = {DivideRoundUp(input0_shape[3], 4), DivideRoundUp(input1_shape[3], 4), DivideRoundUp(input2_shape[3], 4)}; - ocl_runtime->SetKernelArg(kernel_, arg_cn++, outputs_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_cn++, inputs_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_cn++, inputs_[1]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_cn++, inputs_[2]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[1]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[2]->Data()); ocl_runtime->SetKernelArg(kernel_, arg_cn++, input0_shape3_); ocl_runtime->SetKernelArg(kernel_, arg_cn++, output_shape_); } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc index b4ea3fb933..3a67a66ac5 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc @@ -31,7 +31,7 @@ using mindspore::schema::PrimitiveType_DeConv2D; namespace mindspore::kernel { int Conv2dTransposeOpenCLKernel::Init() { - ConvParameter *param = reinterpret_cast(opParameter); + ConvParameter *param = reinterpret_cast(op_parameter_); if (param->kernel_h_ != 2 || param->kernel_w_ != 2 || param->stride_h_ != 2 || param->stride_w_ != 2) { MS_LOG(ERROR) << "only support kh=kw=2 and stride_h=stride_w=2."; return 1; @@ -66,7 +66,7 @@ int Conv2dTransposeOpenCLKernel::Init() { padWeight_ = reinterpret_cast(allocator->MapBuffer(padWeight_, CL_MAP_WRITE, nullptr, true)); PadWeight(); allocator->UnmapBuffer(padWeight_); - outputs_[0]->SetFormat(schema::Format_NHWC4); + out_tensors_[0]->SetFormat(schema::Format_NHWC4); MS_LOG(DEBUG) << kernel_name << " Init Done!"; return 0; } @@ -75,14 +75,14 @@ int Conv2dTransposeOpenCLKernel::ReSize() { return 0; } void Conv2dTransposeOpenCLKernel::PadWeight() { // OHWI to OHWI4(I)4(O) - ConvParameter *param = reinterpret_cast(opParameter); + ConvParameter *param = reinterpret_cast(op_parameter_); int ci = param->input_channel_; int co = param->output_channel_; int kh = param->kernel_h_; int kw = param->kernel_w_; int div_ci = UP_DIV(ci, 4); int div_co = UP_DIV(co, 4); - auto origin_weight = reinterpret_cast(inputs_.at(kWeightIndex)->Data()); + auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); int index = 0; for (int co_i = 0; co_i < div_co; co_i++) { for (int kw_i = 0; kw_i < kw; kw_i++) { @@ -107,23 +107,23 @@ void Conv2dTransposeOpenCLKernel::PadWeight() { } int Conv2dTransposeOpenCLKernel::Run() { - MS_LOG(DEBUG) << this->Name() << " Running!"; - std::vector shapex = inputs_[0]->shape(); + MS_LOG(DEBUG) << this->name() << " Running!"; + std::vector shapex = in_tensors_[0]->shape(); int n = shapex[0]; if (n > 1) { MS_LOG(ERROR) << "Conv2dTranspose n > 1 not supported!"; return 1; } - ConvParameter *param = reinterpret_cast(opParameter); + ConvParameter *param = reinterpret_cast(op_parameter_); int ci = param->input_channel_; int co = param->output_channel_; int kh = param->kernel_h_; int kw = param->kernel_w_; int pad = param->pad_h_; - int oh = outputs_[0]->shape()[1]; - int ow = outputs_[0]->shape()[2]; - int h = inputs_[0]->shape()[1]; - int w = inputs_[0]->shape()[2]; + int oh = out_tensors_[0]->shape()[1]; + int ow = out_tensors_[0]->shape()[2]; + int h = in_tensors_[0]->shape()[1]; + int w = in_tensors_[0]->shape()[2]; auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); cl::ImageFormat image_format; @@ -137,9 +137,9 @@ int Conv2dTransposeOpenCLKernel::Run() { } cl_int in_error_code, in_error_code_weight, in_error_code_bias, out_error_code; cl::Image2D img_x(*ocl_runtime->Context(), CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, image_format, w * ci / 4, h, 0, - inputs_[0]->Data(), &in_error_code); + in_tensors_[0]->Data(), &in_error_code); cl::Image2D img_bias(*ocl_runtime->Context(), CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, image_format, co / 4, 1, 0, - inputs_[2]->Data(), &in_error_code_bias); + in_tensors_[2]->Data(), &in_error_code_bias); cl::Image2D out_mem(*ocl_runtime->Context(), CL_MEM_WRITE_ONLY, image_format, ow * co / 4, oh, 0, nullptr, &out_error_code); // local size should less than MAX_GROUP_SIZE @@ -164,7 +164,8 @@ int Conv2dTransposeOpenCLKernel::Run() { ocl_runtime->RunKernel(kernel_, global, local, nullptr); auto origin = cl::array{0, 0, 0}; auto region = cl::array{(size_t)(ow * co / 4), (size_t)(oh), 1}; - ocl_runtime->GetDefaultCommandQueue()->enqueueReadImage(out_mem, CL_TRUE, origin, region, 0, 0, outputs_[0]->Data()); + ocl_runtime->GetDefaultCommandQueue()->enqueueReadImage(out_mem, CL_TRUE, origin, region, 0, 0, + out_tensors_[0]->Data()); return 0; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.cc index 9fc71ba389..90eb85ec4c 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.cc @@ -43,10 +43,10 @@ int ConvolutionOpenCLKernel::Init() { } std::string ConvolutionOpenCLKernel::CodeGen() { - auto param = reinterpret_cast(opParameter); + auto param = reinterpret_cast(op_parameter_); - auto input_tensor = inputs_[0]; - auto output_tensor = outputs_[0]; + auto input_tensor = in_tensors_[0]; + auto output_tensor = out_tensors_[0]; const size_t CI = input_tensor->Channel(); const size_t CI_SLICES = UP_DIV(CI, C4NUM); const size_t CI_ALIGN = UP_DIV(CI, C4NUM) * C4NUM; @@ -170,7 +170,7 @@ int ConvolutionOpenCLKernel::InitBuffer() { auto allocator = ocl_runtime->GetAllocator(); // weight: OHWI -> OHWIIO - auto weight_tensor = inputs_[1]; + auto weight_tensor = in_tensors_[1]; auto weight_shape = weight_tensor->shape(); size_t CO = weight_shape[0]; size_t KH = weight_shape[1]; @@ -202,7 +202,7 @@ int ConvolutionOpenCLKernel::InitBuffer() { allocator->UnmapBuffer(packed_weight_); // align bias - auto bias_tensor = inputs_[2]; + auto bias_tensor = in_tensors_[2]; size_t packed_bias_size = CO_SLICES * CO_TILE * sizeof(float); packed_bias_ = reinterpret_cast(allocator->Malloc(packed_bias_size)); packed_bias_ = reinterpret_cast(allocator->MapBuffer(packed_bias_, CL_MAP_WRITE, nullptr, true)); @@ -227,10 +227,10 @@ static int GetBiggestDivider(int x, int y) { int ConvolutionOpenCLKernel::GetGlobalLocal(std::vector *global, std::vector *local) { auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); - auto param = reinterpret_cast(opParameter); - param->output_h_ = outputs_[0]->Height(); - param->output_w_ = outputs_[0]->Width(); - param->output_channel_ = outputs_[0]->Channel(); + auto param = reinterpret_cast(op_parameter_); + param->output_h_ = out_tensors_[0]->Height(); + param->output_w_ = out_tensors_[0]->Width(); + param->output_channel_ = out_tensors_[0]->Channel(); constexpr size_t work_group_size[] = {4, 4, 1}; auto max_work_item_sizes = ocl_runtime->GetWorkItemSize(); @@ -261,21 +261,21 @@ int ConvolutionOpenCLKernel::GetGlobalLocal(std::vector *global, std::ve } int ConvolutionOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { - size_t CO_SLICES = UP_DIV(outputs_[0]->Channel(), C4NUM); + size_t CO_SLICES = UP_DIV(out_tensors_[0]->Channel(), C4NUM); size_t im_dst_x, im_dst_y; - if (inputs_[0]->GetFormat() == schema::Format_NHWC4) { - if (outputs_[0]->Width() * CO_SLICES < 65536) { + if (in_tensors_[0]->GetFormat() == schema::Format_NHWC4) { + if (out_tensors_[0]->Width() * CO_SLICES < 65536) { { - im_dst_x = outputs_[0]->Width() * CO_SLICES; - im_dst_y = outputs_[0]->Height(); + im_dst_x = out_tensors_[0]->Width() * CO_SLICES; + im_dst_y = out_tensors_[0]->Height(); } } else { - im_dst_x = outputs_[0]->Height() * CO_SLICES; - im_dst_y = outputs_[0]->Width(); + im_dst_x = out_tensors_[0]->Height() * CO_SLICES; + im_dst_y = out_tensors_[0]->Width(); } } else { - im_dst_y = outputs_[0]->Height() * CO_SLICES; - im_dst_x = outputs_[0]->Width(); + im_dst_y = out_tensors_[0]->Height() * CO_SLICES; + im_dst_x = out_tensors_[0]->Width(); } #ifdef ENABLE_FP16 size_t img_dtype = CL_HALF_FLOAT; @@ -290,14 +290,14 @@ int ConvolutionOpenCLKernel::GetImageSize(size_t idx, std::vector *img_s } int ConvolutionOpenCLKernel::Run() { - MS_LOG(DEBUG) << this->Name() << " Running!"; + MS_LOG(DEBUG) << this->name() << " Running!"; auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); int arg_cn = 0; - ocl_runtime->SetKernelArg(kernel_, arg_cn++, inputs_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->Data()); ocl_runtime->SetKernelArg(kernel_, arg_cn++, packed_weight_); ocl_runtime->SetKernelArg(kernel_, arg_cn++, packed_bias_); - ocl_runtime->SetKernelArg(kernel_, arg_cn++, outputs_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->Data()); std::vector global; std::vector local; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc index 58c343ac90..401a95afe8 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc @@ -42,8 +42,8 @@ namespace mindspore::kernel { int DepthwiseConv2dOpenCLKernel::Init() { auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); std::string kernel_name = "DepthwiseConv2d"; - auto in_format = inputs_[0]->GetFormat(); - outputs_[0]->SetFormat(in_format); + auto in_format = in_tensors_[0]->GetFormat(); + out_tensors_[0]->SetFormat(in_format); if (in_format != schema::Format_NHWC4 && in_format != schema::Format_NC4HW4) { MS_LOG(ERROR) << "input format(" << in_format << ") " << "format not support!"; @@ -58,7 +58,7 @@ int DepthwiseConv2dOpenCLKernel::Init() { } else if (in_format == schema::Format_NHWC4) { kernel_name += "_NHWC4"; } - auto parameter = reinterpret_cast(opParameter); + auto parameter = reinterpret_cast(op_parameter_); if (parameter->kernel_h_ == 1) { kernel_name += "_1x1"; } @@ -81,37 +81,37 @@ int DepthwiseConv2dOpenCLKernel::Init() { } int DepthwiseConv2dOpenCLKernel::InitBuffer() { - auto parameter = reinterpret_cast(opParameter); + auto parameter = reinterpret_cast(op_parameter_); auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); auto allocator = ocl_runtime->GetAllocator(); // weight: o, h, w, i; o == group, i == 1 - auto origin_weight = reinterpret_cast(inputs_.at(kWeightIndex)->Data()); - int CO4 = UP_DIV(outputs_[0]->Channel(), C4NUM); + auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); + int CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); int pack_weight_size = C4NUM * CO4 * parameter->kernel_h_ * parameter->kernel_w_; packed_weight_ = reinterpret_cast(allocator->Malloc(pack_weight_size * sizeof(FLOAT_t))); packed_weight_ = reinterpret_cast(allocator->MapBuffer(packed_weight_, CL_MAP_WRITE, nullptr, true)); int plane = parameter->kernel_h_ * parameter->kernel_w_; #ifdef ENABLE_FP16 - PackNCHWToNC4HW4Fp16(origin_weight, packed_weight_, 1, plane, outputs_[0]->Channel()); + PackNCHWToNC4HW4Fp16(origin_weight, packed_weight_, 1, plane, out_tensors_[0]->Channel()); #else - PackNCHWToNC4HW4Fp32(origin_weight, packed_weight_, 1, plane, outputs_[0]->Channel()); + PackNCHWToNC4HW4Fp32(origin_weight, packed_weight_, 1, plane, out_tensors_[0]->Channel()); #endif allocator->UnmapBuffer(packed_weight_); // init bias - if (inputs_.size() == kInputSize2) { + if (in_tensors_.size() == kInputSize2) { bias_data_ = reinterpret_cast(allocator->Malloc(C4NUM * CO4 * sizeof(FLOAT_t))); bias_data_ = reinterpret_cast(allocator->MapBuffer(bias_data_, CL_MAP_WRITE, nullptr, true)); size_t up_co_size = C4NUM * CO4 * sizeof(FLOAT_t); memset(bias_data_, 0, up_co_size); - auto ori_bias = reinterpret_cast(inputs_.at(kBiasIndex)->Data()); - memcpy(bias_data_, ori_bias, outputs_[0]->Channel() * sizeof(FLOAT_t)); + auto ori_bias = reinterpret_cast(in_tensors_.at(kBiasIndex)->Data()); + memcpy(bias_data_, ori_bias, out_tensors_[0]->Channel() * sizeof(FLOAT_t)); allocator->UnmapBuffer(bias_data_); } else { - MS_ASSERT(inputs_.size() == kInputSize1); + MS_ASSERT(in_tensors_.size() == kInputSize1); } return RET_OK; } @@ -119,14 +119,14 @@ int DepthwiseConv2dOpenCLKernel::InitBuffer() { int DepthwiseConv2dOpenCLKernel::ReSize() { return RET_OK; } int DepthwiseConv2dOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { - size_t CO4 = UP_DIV(outputs_[0]->Channel(), C4NUM); + size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); size_t im_dst_x, im_dst_y; - if (inputs_[0]->GetFormat() == schema::Format_NHWC4) { - im_dst_x = outputs_[0]->Width() * CO4; - im_dst_y = outputs_[0]->Height(); + if (in_tensors_[0]->GetFormat() == schema::Format_NHWC4) { + im_dst_x = out_tensors_[0]->Width() * CO4; + im_dst_y = out_tensors_[0]->Height(); } else { - im_dst_y = outputs_[0]->Height() * CO4; - im_dst_x = outputs_[0]->Width(); + im_dst_y = out_tensors_[0]->Height() * CO4; + im_dst_x = out_tensors_[0]->Width(); } #ifdef ENABLE_FP16 size_t img_dtype = CL_HALF_FLOAT; @@ -140,27 +140,27 @@ int DepthwiseConv2dOpenCLKernel::GetImageSize(size_t idx, std::vector *i } int DepthwiseConv2dOpenCLKernel::GetGlobalSize(size_t idx, std::vector *global_size) { - size_t CO4 = UP_DIV(outputs_[0]->Channel(), C4NUM); - std::vector global = {(size_t)outputs_[0]->Width(), (size_t)outputs_[0]->Height(), CO4}; + size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); + std::vector global = {(size_t)out_tensors_[0]->Width(), (size_t)out_tensors_[0]->Height(), CO4}; *global_size = std::move(global); return RET_OK; } int DepthwiseConv2dOpenCLKernel::GetLocalSize(size_t idx, const std::vector &global_size, std::vector *local_size) { - size_t CO4 = UP_DIV(outputs_[0]->Channel(), C4NUM); + size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); std::vector local = {1, 1, CO4}; *local_size = std::move(local); return RET_OK; } int DepthwiseConv2dOpenCLKernel::Run() { - MS_LOG(DEBUG) << this->Name() << " Running!"; - auto parameter = reinterpret_cast(opParameter); + MS_LOG(DEBUG) << this->name() << " Running!"; + auto parameter = reinterpret_cast(op_parameter_); auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); - size_t CO4 = UP_DIV(outputs_[0]->Channel(), C4NUM); - size_t CI4 = UP_DIV(inputs_[0]->Channel(), C4NUM); - std::vector global = {(size_t)outputs_[0]->Width(), (size_t)outputs_[0]->Height(), CO4}; + size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); + size_t CI4 = UP_DIV(in_tensors_[0]->Channel(), C4NUM); + std::vector global = {(size_t)out_tensors_[0]->Width(), (size_t)out_tensors_[0]->Height(), CO4}; std::vector local; GetLocalSize(0, global, &local); @@ -169,9 +169,9 @@ int DepthwiseConv2dOpenCLKernel::Run() { cl_int2 stride = {parameter->stride_h_, parameter->stride_w_}; cl_int2 padding = {-parameter->pad_h_, -parameter->pad_w_}; cl_int2 dilation = {parameter->dilation_h_, parameter->dilation_w_}; - cl_int4 src_size = {inputs_[0]->Width(), inputs_[0]->Height(), (cl_int)CI4, inputs_[0]->Batch()}; - cl_int4 dst_size = {(cl_int)outputs_[0]->Width(), (cl_int)outputs_[0]->Height(), (cl_int)CO4, - (cl_int)outputs_[0]->Batch()}; + cl_int4 src_size = {in_tensors_[0]->Width(), in_tensors_[0]->Height(), (cl_int)CI4, in_tensors_[0]->Batch()}; + cl_int4 dst_size = {(cl_int)out_tensors_[0]->Width(), (cl_int)out_tensors_[0]->Height(), (cl_int)CO4, + (cl_int)out_tensors_[0]->Batch()}; ocl_runtime->SetKernelArg(kernel_, 1, packed_weight_); ocl_runtime->SetKernelArg(kernel_, 2, bias_data_); @@ -182,8 +182,8 @@ int DepthwiseConv2dOpenCLKernel::Run() { ocl_runtime->SetKernelArg(kernel_, 8, dilation); ocl_runtime->SetKernelArg(kernel_, 9, src_size); ocl_runtime->SetKernelArg(kernel_, 10, dst_size); - ocl_runtime->SetKernelArg(kernel_, 0, inputs_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, 4, outputs_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, 0, in_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, 4, out_tensors_[0]->Data()); ocl_runtime->RunKernel(kernel_, global, local, nullptr); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc index cef0926aed..674e3c0b86 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc @@ -50,14 +50,14 @@ int MatMulOpenCLKernel::Init() { ocl_runtime->LoadSource(program_name, source); ocl_runtime->BuildKernel(kernel_, program_name, kernel_name, build_options); #endif - auto weight_format = inputs_[1]->GetFormat(); + auto weight_format = in_tensors_[1]->GetFormat(); if (weight_format != schema::Format_NHWC) { MS_LOG(ERROR) << "weight format(" << weight_format << ") " << "format not support!"; return 1; } - int ci = inputs_[1]->shape()[3]; - int co = inputs_[1]->shape()[0]; + int ci = in_tensors_[1]->shape()[3]; + int co = in_tensors_[1]->shape()[0]; sizeCI = {ci, UP_DIV(ci, 4)}; sizeCO = {co, UP_DIV(co, 4)}; auto allocator = ocl_runtime->GetAllocator(); @@ -68,7 +68,7 @@ int MatMulOpenCLKernel::Init() { PadWeight(); allocator->UnmapBuffer(padWeight_); allocator->UnmapBuffer(bias_); - outputs_[0]->SetFormat(schema::Format_NHWC4); + out_tensors_[0]->SetFormat(schema::Format_NHWC4); MS_LOG(DEBUG) << kernel_name << " Init Done!"; return 0; } @@ -76,7 +76,7 @@ int MatMulOpenCLKernel::Init() { int MatMulOpenCLKernel::ReSize() { return 0; } void MatMulOpenCLKernel::PadWeight() { - auto origin_weight = reinterpret_cast(inputs_.at(kWeightIndex)->Data()); + auto origin_weight = reinterpret_cast(in_tensors_.at(kWeightIndex)->Data()); int divCI = sizeCI.s[1]; int divCO = sizeCO.s[1]; int index = 0; @@ -96,7 +96,7 @@ void MatMulOpenCLKernel::PadWeight() { } } if (hasBias_) { - memcpy(bias_, inputs_[2]->Data(), sizeof(FLOAT_T) * sizeCO.s[0]); + memcpy(bias_, in_tensors_[2]->Data(), sizeof(FLOAT_T) * sizeCO.s[0]); for (int i = sizeCO.s[0]; i < sizeCO.s[1] * 4; i++) { bias_[i] = 0; } @@ -108,8 +108,8 @@ void MatMulOpenCLKernel::PadWeight() { } int MatMulOpenCLKernel::Run() { - MS_LOG(DEBUG) << this->Name() << " Running!"; - std::vector shapex = inputs_[0]->shape(); + MS_LOG(DEBUG) << this->name() << " Running!"; + std::vector shapex = in_tensors_[0]->shape(); int n = shapex[0]; if (n > 1) { MS_LOG(ERROR) << "MatMul n > 1 not supported!"; @@ -131,7 +131,7 @@ int MatMulOpenCLKernel::Run() { } cl_int in_error_code, in_error_code_weight, in_error_code_bias, out_error_code; cl::Image2D img_input(*ocl_runtime->Context(), CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, image_format, sizeCI.s[1], 1, - 0, inputs_[0]->Data(), &in_error_code); + 0, in_tensors_[0]->Data(), &in_error_code); cl::Image2D img_bias(*ocl_runtime->Context(), CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, image_format, sizeCO.s[1], 1, 0, bias_, &in_error_code_bias); cl::Image2D img_out(*ocl_runtime->Context(), CL_MEM_WRITE_ONLY, image_format, sizeCO.s[1], 1, 0, nullptr, @@ -147,7 +147,8 @@ int MatMulOpenCLKernel::Run() { ocl_runtime->RunKernel(kernel_, global, local, nullptr); auto origin = cl::array{0, 0, 0}; auto region = cl::array{(size_t)(sizeCO.s[1]), 1, 1}; - ocl_runtime->GetDefaultCommandQueue()->enqueueReadImage(img_out, CL_TRUE, origin, region, 0, 0, outputs_[0]->Data()); + ocl_runtime->GetDefaultCommandQueue()->enqueueReadImage(img_out, CL_TRUE, origin, region, 0, 0, + out_tensors_[0]->Data()); return 0; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc index 0e7d8dc27c..5d5c0cfb4d 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc @@ -73,25 +73,25 @@ int PoolingOpenCLKernel::Init() { ocl_runtime->LoadSource(program_name, source); ocl_runtime->BuildKernel(kernel_, program_name, kernel_name, build_options); #endif - outputs_[0]->SetFormat(schema::Format_NHWC4); + out_tensors_[0]->SetFormat(schema::Format_NHWC4); MS_LOG(DEBUG) << kernel_name << " Init Done!"; return RET_OK; } std::vector PoolingOpenCLKernel::InitGlobalSize() const { - const size_t global_x = outputs_[0]->Height(); - const size_t global_y = outputs_[0]->Width(); - const size_t global_z = UP_DIV(outputs_[0]->Channel(), C4NUM); + const size_t global_x = out_tensors_[0]->Height(); + const size_t global_y = out_tensors_[0]->Width(); + const size_t global_z = UP_DIV(out_tensors_[0]->Channel(), C4NUM); std::vector global = {global_x, global_y, global_z}; return global; } int PoolingOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { - size_t CO4 = UP_DIV(outputs_[0]->Channel(), C4NUM); + size_t CO4 = UP_DIV(out_tensors_[0]->Channel(), C4NUM); size_t im_dst_x, im_dst_y; - im_dst_x = outputs_[0]->Width() * CO4; - im_dst_y = outputs_[0]->Height(); + im_dst_x = out_tensors_[0]->Width() * CO4; + im_dst_y = out_tensors_[0]->Height(); #ifdef ENABLE_FP16 size_t img_dtype = CL_HALF_FLOAT; #else @@ -108,21 +108,21 @@ int PoolingOpenCLKernel::InitBuffer() { return RET_OK; } int PoolingOpenCLKernel::ReSize() { return RET_OK; } int PoolingOpenCLKernel::Run() { - MS_LOG(DEBUG) << this->Name() << " Running!"; + MS_LOG(DEBUG) << this->name() << " Running!"; auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); // attribute - int slices = UP_DIV(outputs_[0]->Channel(), C4NUM); - cl_int4 input_shape = {inputs_[0]->Height(), inputs_[0]->Width(), inputs_[0]->Channel(), slices}; - cl_int4 output_shape = {outputs_[0]->Height(), outputs_[0]->Width(), outputs_[0]->Channel(), slices}; + int slices = UP_DIV(out_tensors_[0]->Channel(), C4NUM); + cl_int4 input_shape = {in_tensors_[0]->Height(), in_tensors_[0]->Width(), in_tensors_[0]->Channel(), slices}; + cl_int4 output_shape = {out_tensors_[0]->Height(), out_tensors_[0]->Width(), out_tensors_[0]->Channel(), slices}; cl_int2 stride = {parameter_->stride_h_, parameter_->stride_w_}; cl_int2 kernel_size = {parameter_->window_h_, parameter_->window_w_}; cl_int2 padding = {parameter_->pad_u_, parameter_->pad_l_}; // binding parameters int arg_idx = 0; - ocl_runtime->SetKernelArg(kernel_, arg_idx++, inputs_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, arg_idx++, outputs_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, in_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, arg_idx++, out_tensors_[0]->Data()); ocl_runtime->SetKernelArg(kernel_, arg_idx++, input_shape); ocl_runtime->SetKernelArg(kernel_, arg_idx++, output_shape); ocl_runtime->SetKernelArg(kernel_, arg_idx++, stride); diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc index 10e6176ff5..f8389b3449 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc @@ -46,7 +46,7 @@ int SoftmaxOpenCLKernel::Init() { ocl_runtime->LoadSource(program_name, source); ocl_runtime->BuildKernel(kernel_, program_name, kernel_name, build_options); #endif - outputs_[0]->SetFormat(schema::Format_NHWC4); + out_tensors_[0]->SetFormat(schema::Format_NHWC4); MS_LOG(DEBUG) << kernel_name << " Init Done!"; return 0; } @@ -55,21 +55,22 @@ int SoftmaxOpenCLKernel::InitBuffer() { return 0; } int SoftmaxOpenCLKernel::ReSize() { return 0; } int SoftmaxOpenCLKernel::Run() { - MS_LOG(DEBUG) << this->Name() << " Running!"; + MS_LOG(DEBUG) << this->name() << " Running!"; auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance(); auto allocator = ocl_runtime->GetAllocator(); // global and local workers - const uint32_t grid_x = inputs_[0]->shape()[2]; // W - const uint32_t grid_y = inputs_[0]->shape()[1]; // H + const uint32_t grid_x = in_tensors_[0]->shape()[2]; // W + const uint32_t grid_y = in_tensors_[0]->shape()[1]; // H const uint32_t grid_z = 1; std::vector global = {grid_x, grid_y, grid_z}; std::vector local = {1, 1, 1}; // input and output - cl::Buffer *input = reinterpret_cast(allocator->GetDeviceBuffer(inputs_[0]->Data())); - cl::Buffer *output = reinterpret_cast(allocator->GetDeviceBuffer(outputs_[0]->Data())); - cl_int4 input_size = {inputs_[0]->shape()[0], inputs_[0]->shape()[1], inputs_[0]->shape()[2], inputs_[0]->shape()[3]}; + cl::Buffer *input = reinterpret_cast(allocator->GetDeviceBuffer(in_tensors_[0]->Data())); + cl::Buffer *output = reinterpret_cast(allocator->GetDeviceBuffer(out_tensors_[0]->Data())); + cl_int4 input_size = {in_tensors_[0]->shape()[0], in_tensors_[0]->shape()[1], in_tensors_[0]->shape()[2], + in_tensors_[0]->shape()[3]}; int arg_idx = 0; ocl_runtime->SetKernelArg(kernel_, arg_idx++, *input); ocl_runtime->SetKernelArg(kernel_, arg_idx++, *output); diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc index 677f99739b..246597e917 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc @@ -50,17 +50,17 @@ int TransposeOpenCLKernel::Init() { ocl_runtime->LoadSource(program_name, source); ocl_runtime->BuildKernel(kernel_, program_name, kernel_name, build_options); #endif - auto input_format = inputs_[0]->GetFormat(); + auto input_format = in_tensors_[0]->GetFormat(); if (input_format != schema::Format_NHWC4) { MS_LOG(ERROR) << "input format(" << input_format << ") " << "format not support!"; return RET_ERROR; } - if ((inputs_[0]->Height() * inputs_[0]->Width()) % 4 != 0) { + if ((in_tensors_[0]->Height() * in_tensors_[0]->Width()) % 4 != 0) { MS_LOG(ERROR) << "input H * W % 4 != 0 not support!"; return RET_ERROR; } - outputs_[0]->SetFormat(schema::Format_NCHW); + out_tensors_[0]->SetFormat(schema::Format_NCHW); MS_LOG(DEBUG) << kernel_name << " Init Done!"; return RET_OK; } @@ -69,8 +69,8 @@ int TransposeOpenCLKernel::ReSize() { return 0; } int TransposeOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size) { size_t im_dst_x, im_dst_y; - im_dst_x = UP_DIV(outputs_[0]->Height() * outputs_[0]->Width(), C4NUM); - im_dst_y = outputs_[0]->Channel(); + im_dst_x = UP_DIV(out_tensors_[0]->Height() * out_tensors_[0]->Width(), C4NUM); + im_dst_y = out_tensors_[0]->Channel(); #ifdef ENABLE_FP16 size_t img_dtype = CL_HALF_FLOAT; #else @@ -83,8 +83,8 @@ int TransposeOpenCLKernel::GetImageSize(size_t idx, std::vector *img_siz } int TransposeOpenCLKernel::Run() { - MS_LOG(DEBUG) << this->Name() << " Running!"; - std::vector shapex = inputs_[0]->shape(); + MS_LOG(DEBUG) << this->name() << " Running!"; + std::vector shapex = in_tensors_[0]->shape(); int h = shapex[1]; int w = shapex[2]; int c = shapex[3]; @@ -97,8 +97,8 @@ int TransposeOpenCLKernel::Run() { cl_int2 HW = {h * w, hw4}; cl_int2 C = {c, c4}; - ocl_runtime->SetKernelArg(kernel_, 0, inputs_[0]->Data()); - ocl_runtime->SetKernelArg(kernel_, 1, outputs_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, 0, in_tensors_[0]->Data()); + ocl_runtime->SetKernelArg(kernel_, 1, out_tensors_[0]->Data()); ocl_runtime->SetKernelArg(kernel_, 2, HW); ocl_runtime->SetKernelArg(kernel_, 3, C); ocl_runtime->RunKernel(kernel_, global, local, nullptr); diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h index f79f9a273a..e5f5ecdc2f 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h @@ -23,7 +23,6 @@ #include "src/runtime/opencl/opencl_runtime.h" #include "src/runtime/kernel/opencl/opencl_kernel.h" - namespace mindspore::kernel { class TransposeOpenCLKernel : public OpenCLKernel { public: @@ -36,6 +35,7 @@ class TransposeOpenCLKernel : public OpenCLKernel { int ReSize() override; int Run() override; int GetImageSize(size_t idx, std::vector *img_size) override; + private: cl::Kernel kernel_; }; diff --git a/mindspore/lite/src/runtime/opencl/opencl_executor.cc b/mindspore/lite/src/runtime/opencl/opencl_executor.cc index fec99cd4c0..1103281ba7 100644 --- a/mindspore/lite/src/runtime/opencl/opencl_executor.cc +++ b/mindspore/lite/src/runtime/opencl/opencl_executor.cc @@ -44,7 +44,7 @@ int OpenCLExecutor::Run(std::vector &inputs, std::vector(kernel); - auto &outputs = kernel->GetOutputs(); + auto &outputs = kernel->out_tensors(); for (auto i = 0; i < outputs.size(); ++i) { auto *output = outputs.at(i); MS_ASSERT(nullptr != output); @@ -59,29 +59,29 @@ int OpenCLExecutor::Run(std::vector &inputs, std::vectorset_allocator(allocator); } session::CallBackParam callbackParam; - callbackParam.name_callback_param = kernel->Name(); + callbackParam.name_callback_param = kernel->name(); if (before != nullptr) { - if (!before(PackToMSTensors(kernel->GetInputs()), PackToMSTensors(kernel->GetOutputs()), callbackParam)) { - MS_LOG(ERROR) << "run kernel before_callback failed, name: " << kernel->Name(); + if (!before(PackToMSTensors(kernel->in_tensors()), PackToMSTensors(kernel->out_tensors()), callbackParam)) { + MS_LOG(ERROR) << "run kernel before_callback failed, name: " << kernel->name(); } } auto ret = kernel->Run(); if (0 != ret) { - MS_LOG(ERROR) << "run kernel failed, name: " << kernel->Name(); + MS_LOG(ERROR) << "run kernel failed, name: " << kernel->name(); return ret; } if (after != nullptr) { - if (!after(PackToMSTensors(kernel->GetInputs()), PackToMSTensors(kernel->GetOutputs()), callbackParam)) { - MS_LOG(ERROR) << "run kernel after_callback failed, name: " << kernel->Name(); + if (!after(PackToMSTensors(kernel->in_tensors()), PackToMSTensors(kernel->out_tensors()), callbackParam)) { + MS_LOG(ERROR) << "run kernel after_callback failed, name: " << kernel->name(); } } - for (auto input_kernel : kernel->GetInKernels()) { + for (auto input_kernel : kernel->in_kernels()) { MS_EXCEPTION_IF_NULL(input_kernel); ret = input_kernel->DecOutTensorRefCount(); if (0 != ret) { - MS_LOG(WARNING) << "DecOutTensorRefCount for kernel" << kernel->Name() << " failed"; + MS_LOG(WARNING) << "DecOutTensorRefCount for kernel" << kernel->name() << " failed"; } } } diff --git a/mindspore/lite/src/scheduler.cc b/mindspore/lite/src/scheduler.cc index 426c656b06..39917b91a6 100644 --- a/mindspore/lite/src/scheduler.cc +++ b/mindspore/lite/src/scheduler.cc @@ -107,10 +107,10 @@ void Scheduler::ConstructSubgraphs(std::vector *kernels) { std::vector sub_kernels; std::vector> sub_kernels_list; - kernel::KERNEL_ARCH prev_arch = kernels->front()->Desc().arch; + kernel::KERNEL_ARCH prev_arch = kernels->front()->desc().arch; for (uint32_t i = 0; i < kernel_count; ++i) { auto curr_kernel = kernels->at(i); - auto curr_arch = curr_kernel->Desc().arch; + auto curr_arch = curr_kernel->desc().arch; if (curr_arch == prev_arch) { sub_kernels.emplace_back(curr_kernel); } @@ -124,10 +124,10 @@ void Scheduler::ConstructSubgraphs(std::vector *kernels) { std::vector subgraph_kernels; for (auto temp_kernels : sub_kernels_list) { - kernel::KERNEL_ARCH arch = temp_kernels.front()->Desc().arch; + kernel::KERNEL_ARCH arch = temp_kernels.front()->desc().arch; if (arch == kernel::KERNEL_ARCH::kCPU) { for (auto kernel : temp_kernels) { - for (auto tensor : kernel->GetOutputs()) { + for (auto tensor : kernel->out_tensors()) { tensor->set_allocator(context_->allocator.get()); } } @@ -152,12 +152,12 @@ kernel::LiteKernel *Scheduler::CreateSubKernel(const std::vector output_kernels{tail_kernel}; std::vector input_tensors; std::vector output_tensors; - for (auto tensor : head_kernel->GetInputs()) { + for (auto tensor : head_kernel->in_tensors()) { if (tensor->Data() == nullptr) { input_tensors.emplace_back(tensor); } } - for (auto tensor : tail_kernel->GetOutputs()) { + for (auto tensor : tail_kernel->out_tensors()) { if (tensor->Data() == nullptr) { output_tensors.emplace_back(tensor); } @@ -178,20 +178,16 @@ kernel::LiteKernel *Scheduler::CreateSubKernel(const std::vector &kernels) { return 0; } - -int Scheduler::MergeKernels(std::vector *kernels) { return 0; } - -kernel::LiteKernel *Scheduler::ScheduleNode(const std::vector &inputs, - const std::vector &outputs, +kernel::LiteKernel *Scheduler::ScheduleNode(const std::vector &in_tensors, + const std::vector &out_tensors, const lite::Primitive *primitive) { // todo: support NPU, APU MS_ASSERT(nullptr != primitive); - auto data_type = inputs.front()->data_type(); + auto data_type = in_tensors.front()->data_type(); kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, primitive->Type()}; if (context_->device_ctx_.type == DT_GPU) { desc.arch = kernel::KERNEL_ARCH::kGPU; - auto *kernel = KernelFactory::GetInstance()->GetKernel(inputs, outputs, primitive, context_, desc); + auto *kernel = KernelFactory::GetInstance()->GetKernel(in_tensors, out_tensors, primitive, context_, desc); if (nullptr != kernel) { kernel->set_desc(desc); return kernel; @@ -203,16 +199,16 @@ kernel::LiteKernel *Scheduler::ScheduleNode(const std::vector if (data_type == kNumberTypeFloat32) { // check if support fp16 kernel::KernelKey key{desc.arch, kNumberTypeFloat16, desc.type}; - kernel = KernelFactory::GetInstance()->GetKernel(inputs, outputs, primitive, context_, key); + kernel = KernelFactory::GetInstance()->GetKernel(in_tensors, out_tensors, primitive, context_, key); if (kernel != nullptr) { MS_LOG(DEBUG) << "Get fp16 op success."; kernel->set_desc(desc); return kernel; } MS_LOG(DEBUG) << "Get fp16 op failed, back to fp32 op."; - kernel = KernelFactory::GetInstance()->GetKernel(inputs, outputs, primitive, context_, desc); + kernel = KernelFactory::GetInstance()->GetKernel(in_tensors, out_tensors, primitive, context_, desc); } else { - kernel = KernelFactory::GetInstance()->GetKernel(inputs, outputs, primitive, context_, desc); + kernel = KernelFactory::GetInstance()->GetKernel(in_tensors, out_tensors, primitive, context_, desc); } if (kernel != nullptr) { kernel->set_desc(desc); diff --git a/mindspore/lite/src/scheduler.h b/mindspore/lite/src/scheduler.h index 38a8cca466..4aed217c99 100644 --- a/mindspore/lite/src/scheduler.h +++ b/mindspore/lite/src/scheduler.h @@ -25,19 +25,13 @@ namespace mindspore::lite { class Scheduler { public: - explicit Scheduler(const Context *ctx) { - context_ = const_cast(ctx); - } + explicit Scheduler(const Context *ctx) { context_ = const_cast(ctx); } int Schedule(const lite::Model *model, std::vector *tensors, std::vector *kernels); protected: - kernel::LiteKernel *ScheduleNode(const std::vector &inputs, - const std::vector &outputs, const lite::Primitive *primitive); - // find schedule able kernels and save in markedKernelGroup - int MarkKernels(const std::vector &kernels); - // use SubGraphKernel to replace group in kernels - int MergeKernels(std::vector *kernels); + kernel::LiteKernel *ScheduleNode(const std::vector &in_tensors, + const std::vector &out_tensors, const lite::Primitive *primitive); private: int InitOp2Kernel(const lite::Model *model, std::vector *tensors, @@ -49,7 +43,6 @@ class Scheduler { kernel::LiteKernel *CreateSubKernel(const std::vector &kernels, kernel::KERNEL_ARCH arch); protected: - std::vector> markedKernelGroup; Context *context_ = nullptr; }; } // namespace mindspore::lite diff --git a/mindspore/lite/test/common/common_test.cc b/mindspore/lite/test/common/common_test.cc index 1c4b699619..2b7444134d 100644 --- a/mindspore/lite/test/common/common_test.cc +++ b/mindspore/lite/test/common/common_test.cc @@ -24,13 +24,13 @@ extern "C" { namespace mindspore { -void Common::SetUpTestCase() {} +void CommonTest::SetUpTestCase() {} -void Common::TearDownTestCase() {} +void CommonTest::TearDownTestCase() {} -void Common::SetUp() {} +void CommonTest::SetUp() {} -void Common::TearDown() {} +void CommonTest::TearDown() {} } // namespace mindspore diff --git a/mindspore/lite/test/common/common_test.h b/mindspore/lite/test/common/common_test.h index 09fb487f15..1c848ed724 100644 --- a/mindspore/lite/test/common/common_test.h +++ b/mindspore/lite/test/common/common_test.h @@ -23,7 +23,7 @@ #include #include "gtest/gtest.h" namespace mindspore { -class Common : public testing::Test { +class CommonTest : public testing::Test { public: // TestCase only enter once static void SetUpTestCase(); diff --git a/mindspore/lite/test/st/benchmark_test.cc b/mindspore/lite/test/st/benchmark_test.cc index 7454565a04..bc28a4a79a 100644 --- a/mindspore/lite/test/st/benchmark_test.cc +++ b/mindspore/lite/test/st/benchmark_test.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { -class BenchmarkTest : public mindspore::Common { +class BenchmarkTest : public mindspore::CommonTest { public: BenchmarkTest() {} }; diff --git a/mindspore/lite/test/st/converter_test.cc b/mindspore/lite/test/st/converter_test.cc index 25754f6da4..31a6669103 100644 --- a/mindspore/lite/test/st/converter_test.cc +++ b/mindspore/lite/test/st/converter_test.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { -class ConverterTest : public mindspore::Common { +class ConverterTest : public mindspore::CommonTest { public: ConverterTest() {} }; diff --git a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc index a2b6bd5f89..6d2505f957 100644 --- a/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc +++ b/mindspore/lite/test/ut/src/dataset/de_tensor_test.cc @@ -30,7 +30,7 @@ using Tensor = mindspore::dataset::Tensor; using DataType = mindspore::dataset::DataType; using TensorShape = mindspore::dataset::TensorShape; -class MindDataTestTensorDE : public mindspore::Common { +class MindDataTestTensorDE : public mindspore::CommonTest { public: MindDataTestTensorDE() {} }; diff --git a/mindspore/lite/test/ut/src/dataset/eager_test.cc b/mindspore/lite/test/ut/src/dataset/eager_test.cc index 2621bd99b0..a4c31ac8e2 100644 --- a/mindspore/lite/test/ut/src/dataset/eager_test.cc +++ b/mindspore/lite/test/ut/src/dataset/eager_test.cc @@ -30,7 +30,7 @@ using mindspore::dataset::api::vision::Resize; using Execute = mindspore::dataset::api::Execute; using Path = mindspore::dataset::Path; -class MindDataTestEager : public mindspore::Common { +class MindDataTestEager : public mindspore::CommonTest { public: MindDataTestEager() {} }; diff --git a/mindspore/lite/test/ut/src/graph_test.cc b/mindspore/lite/test/ut/src/graph_test.cc index c1fa4cbd8e..8cadde3cca 100644 --- a/mindspore/lite/test/ut/src/graph_test.cc +++ b/mindspore/lite/test/ut/src/graph_test.cc @@ -28,7 +28,7 @@ #include "mindspore/lite/schema/inner/anf_ir_generated.h" namespace mindspore { -class TestLiteInference : public mindspore::Common { +class TestLiteInference : public mindspore::CommonTest { public: TestLiteInference() {} }; diff --git a/mindspore/lite/test/ut/src/infer_test.cc b/mindspore/lite/test/ut/src/infer_test.cc index 04c30e241b..47c58abdf8 100644 --- a/mindspore/lite/test/ut/src/infer_test.cc +++ b/mindspore/lite/test/ut/src/infer_test.cc @@ -25,7 +25,7 @@ #include "mindspore/core/utils/log_adapter.h" namespace mindspore { -class InferTest : public mindspore::Common { +class InferTest : public mindspore::CommonTest { public: InferTest() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc index c3283f335f..a87c8f2cb1 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc @@ -22,7 +22,7 @@ #include "mindspore/lite/src/runtime/kernel/arm/nnacl/pack.h" namespace mindspore { -class TestPack : public mindspore::Common { +class TestPack : public mindspore::CommonTest { public: TestPack() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc index 60e2acff00..11eb9c65f2 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc @@ -20,7 +20,7 @@ #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { -class TestStridedSlice : public mindspore::Common { +class TestStridedSlice : public mindspore::CommonTest { public: TestStridedSlice() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/convolution_fp16_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/convolution_fp16_tests.cc index 552240f730..8151d6dcca 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/convolution_fp16_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/convolution_fp16_tests.cc @@ -25,7 +25,7 @@ #include "src/runtime/kernel/arm/nnacl/fp16/conv_fp16.h" namespace mindspore { -class TestConvolutionFp16 : public mindspore::Common { +class TestConvolutionFp16 : public mindspore::CommonTest { public: TestConvolutionFp16() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc index f09caab0b5..19c0257b9b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc @@ -22,7 +22,7 @@ namespace mindspore { -class TestActivationFp32 : public mindspore::Common { +class TestActivationFp32 : public mindspore::CommonTest { public: TestActivationFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/argminmax_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/argminmax_fp32_test.cc index e7e2099d51..bdf057fca2 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/argminmax_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/argminmax_fp32_test.cc @@ -21,7 +21,7 @@ namespace mindspore { -class TestArgMinMaxTestFp32 : public mindspore::Common { +class TestArgMinMaxTestFp32 : public mindspore::CommonTest { public: TestArgMinMaxTestFp32() = default; }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc index e5b75c5ed4..ebaf1ae920 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc @@ -20,7 +20,7 @@ namespace mindspore { -class BatchToSpaceTestFp32 : public mindspore::Common { +class BatchToSpaceTestFp32 : public mindspore::CommonTest { public: BatchToSpaceTestFp32() = default; }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc index 7bd03d842d..dc36f5e933 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/src/lite_kernel.h" namespace mindspore { -class TestBatchnormFp32 : public mindspore::Common { +class TestBatchnormFp32 : public mindspore::CommonTest { public: TestBatchnormFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc index 3d57b070c3..465cc28df3 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc @@ -26,7 +26,7 @@ namespace mindspore { using mindspore::lite::tensor::Tensor; -class TestConv1x1Fp32 : public mindspore::Common { +class TestConv1x1Fp32 : public mindspore::CommonTest { public: TestConv1x1Fp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc index 86acfe7b57..97966621ab 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc @@ -23,7 +23,7 @@ #include "mindspore/lite/src/ops/ops.h" namespace mindspore { -class TestConvolutionDwFp32 : public mindspore::Common { +class TestConvolutionDwFp32 : public mindspore::CommonTest { public: TestConvolutionDwFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc index 89704cfa9f..1a64bb0833 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc @@ -17,7 +17,7 @@ #include "mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/crop.h" namespace mindspore { -class CropTestFp32 : public mindspore::Common { +class CropTestFp32 : public mindspore::CommonTest { public: CropTestFp32() = default; }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc index df598547e2..df7c0b19d3 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc @@ -23,7 +23,7 @@ #include "mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/deconv.h" namespace mindspore { -class TestDeConvolutionFp32 : public mindspore::Common { +class TestDeConvolutionFp32 : public mindspore::CommonTest { public: TestDeConvolutionFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/depth_to_space_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/depth_to_space_fp32_test.cc index 2c22d10249..be9303ea02 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/depth_to_space_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/depth_to_space_fp32_test.cc @@ -20,7 +20,7 @@ namespace mindspore { -class DepthToSpaceTestFp32 : public mindspore::Common { +class DepthToSpaceTestFp32 : public mindspore::CommonTest { public: DepthToSpaceTestFp32() = default; }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc index acefdb8733..9bc94e9eaf 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc @@ -24,7 +24,7 @@ namespace mindspore { using mindspore::lite::tensor::Tensor; -class TestEluFp32 : public mindspore::Common { +class TestEluFp32 : public mindspore::CommonTest { public: TestEluFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc index 02aff1d460..45ed9b8689 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc @@ -24,7 +24,7 @@ namespace mindspore { using mindspore::lite::tensor::Tensor; -class TestEmbeddingLookupFp32 : public mindspore::Common { +class TestEmbeddingLookupFp32 : public mindspore::CommonTest { public: TestEmbeddingLookupFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc index fd4ed81a14..a2e5ef95c9 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc @@ -26,7 +26,7 @@ namespace mindspore { using mindspore::lite::tensor::Tensor; -class TestFcFp32 : public mindspore::Common { +class TestFcFp32 : public mindspore::CommonTest { public: TestFcFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc index 41312db0b1..2e54fe362d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc @@ -22,7 +22,7 @@ #include "mindspore/lite/src/ops/ops.h" namespace mindspore { -class LstmFp32 : public mindspore::Common { +class LstmFp32 : public mindspore::CommonTest { public: LstmFp32() {} }; @@ -131,7 +131,7 @@ void CompareOutput(lite::tensor::Tensor *output, std::vector data) { } std::cout << std::endl; - Common::CompareOutputData(reinterpret_cast(output->Data()), data.data(), output->ElementsNum(), 0.0001); + CommonTest::CompareOutputData(reinterpret_cast(output->Data()), data.data(), output->ElementsNum(), 0.0001); } TEST_F(LstmFp32, LstmForwardFp32Accuracy) { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc index b4890a0cb0..5f55cfbbc5 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc @@ -22,7 +22,7 @@ #include "src/lite_kernel.h" namespace mindspore { -class TestMatMulFp32 : public mindspore::Common { +class TestMatMulFp32 : public mindspore::CommonTest { public: TestMatMulFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc index 4b3538316f..49e4e01e5b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc @@ -20,7 +20,7 @@ #include "src/lite_kernel.h" namespace mindspore { -class TestPowerFp32 : public mindspore::Common { +class TestPowerFp32 : public mindspore::CommonTest { public: TestPowerFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc index fbb947615f..3d2be11e0f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc @@ -20,7 +20,7 @@ namespace mindspore { -class TestReduceFp32 : public mindspore::Common { +class TestReduceFp32 : public mindspore::CommonTest { public: TestReduceFp32() = default; int tid = 0; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc index 3c5e98f398..b9e316260b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc @@ -20,7 +20,7 @@ namespace mindspore { -class TestResizeBilinearFp32 : public mindspore::Common { +class TestResizeBilinearFp32 : public mindspore::CommonTest { public: TestResizeBilinearFp32() = default; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc index ea59c40db2..65cb508489 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc @@ -19,7 +19,7 @@ namespace mindspore { -class TestResizeNearestNeighborFp32 : public mindspore::Common { +class TestResizeNearestNeighborFp32 : public mindspore::CommonTest { public: TestResizeNearestNeighborFp32() = default; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc index 3808ac9752..6ea1aa39c8 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc @@ -20,7 +20,7 @@ #include "src/lite_kernel.h" namespace mindspore { -class TestROIPoolingFp32 : public mindspore::Common { +class TestROIPoolingFp32 : public mindspore::CommonTest { public: TestROIPoolingFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc index 696647687d..bee3ae52e6 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc @@ -23,7 +23,7 @@ namespace mindspore { -class SpaceToBatchTestFp32 : public mindspore::Common { +class SpaceToBatchTestFp32 : public mindspore::CommonTest { public: SpaceToBatchTestFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc index ac0258b1fa..e8d1b92980 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class SpaceToDepthTestFp32 : public mindspore::Common { +class SpaceToDepthTestFp32 : public mindspore::CommonTest { public: SpaceToDepthTestFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strassen_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strassen_fp32_tests.cc index 442f490c0a..022bd4fab7 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strassen_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strassen_fp32_tests.cc @@ -23,7 +23,7 @@ #include "mindspore/lite/src/runtime/kernel/arm/nnacl/conv_parameter.h" namespace mindspore { -class TestStrassenFp32 : public mindspore::Common { +class TestStrassenFp32 : public mindspore::CommonTest { public: TestStrassenFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc index fd005cff64..d86c3f2a1a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { -class TestTopKFp32 : public mindspore::Common { +class TestTopKFp32 : public mindspore::CommonTest { public: TestTopKFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc index e1fd748bb3..11a87e7366 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc @@ -27,7 +27,7 @@ #include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h" namespace mindspore { -class TestActGradFp32 : public mindspore::Common { +class TestActGradFp32 : public mindspore::CommonTest { public: TestActGradFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc index 7ca2daf26a..e4b78e4b2e 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc @@ -26,7 +26,7 @@ namespace mindspore { -class TestArithmeticGradFp32 : public mindspore::Common { +class TestArithmeticGradFp32 : public mindspore::CommonTest { public: TestArithmeticGradFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc index 8146950ab6..5c52b57ff7 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc @@ -23,7 +23,7 @@ namespace mindspore { -class TestBiasGradFp32 : public mindspore::Common { +class TestBiasGradFp32 : public mindspore::CommonTest { public: TestBiasGradFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc index 1d114587ec..1ccd2f023c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc @@ -27,7 +27,7 @@ #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { -class TestConvolutionGradFp32 : public mindspore::Common { +class TestConvolutionGradFp32 : public mindspore::CommonTest { public: TestConvolutionGradFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc index 5194cab5ac..858fffa78e 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc @@ -26,7 +26,7 @@ #include "src/runtime/kernel/arm/nnacl/fp32_grad/pooling_grad.h" namespace mindspore { -class TestPoolingGradFp32 : public mindspore::Common { +class TestPoolingGradFp32 : public mindspore::CommonTest { public: TestPoolingGradFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc index c35bac8fa4..eee16499fe 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc @@ -23,7 +23,7 @@ namespace mindspore { -class TestSoftmaxCrossEntropyFp32 : public mindspore::Common { +class TestSoftmaxCrossEntropyFp32 : public mindspore::CommonTest { public: TestSoftmaxCrossEntropyFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc index 40f108d4a3..710afbcb25 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc @@ -22,7 +22,7 @@ #include "mindspore/lite/include/context.h" namespace mindspore { -class TestQuantizedAdd : public mindspore::Common { +class TestQuantizedAdd : public mindspore::CommonTest { public: TestQuantizedAdd() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc index ecc5bcb3f2..1fce620899 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class TestArithmeticSelfInt8 : public mindspore::Common { +class TestArithmeticSelfInt8 : public mindspore::CommonTest { public: TestArithmeticSelfInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc index cc7084b25b..3cd925a86c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class TestConcatInt8 : public mindspore::Common { +class TestConcatInt8 : public mindspore::CommonTest { public: TestConcatInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc index 6ee9e96976..0d02008f26 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class TestCropInt8 : public mindspore::Common { +class TestCropInt8 : public mindspore::CommonTest { public: TestCropInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc index 227a1cd938..ff42754f05 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc @@ -31,7 +31,7 @@ using mindspore::lite::tensor::QuantArg; using mindspore::lite::tensor::Tensor; using mindspore::schema::Format_NHWC; using mindspore::schema::NodeType_Parameter; -class TestDeconvInt8 : public mindspore::Common { +class TestDeconvInt8 : public mindspore::CommonTest { public: TestDeconvInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc index 51a3796271..52666f6272 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc @@ -23,7 +23,7 @@ namespace mindspore { using lite::tensor::Tensor; -class TestFcInt8 : public mindspore::Common { +class TestFcInt8 : public mindspore::CommonTest { public: TestFcInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc index e9caaec997..a564bf87e1 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc @@ -24,7 +24,7 @@ #include "mindspore/lite/include/context.h" namespace mindspore { -class TestHSwishInt8 : public mindspore::Common { +class TestHSwishInt8 : public mindspore::CommonTest { public: TestHSwishInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc index fdce152ef2..d556da8d1a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc @@ -22,7 +22,7 @@ #include "mindspore/lite/src/lite_kernel.h" namespace mindspore { -class TestMatmulInt8 : public mindspore::Common { +class TestMatmulInt8 : public mindspore::CommonTest { public: TestMatmulInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc index 7185c13bfa..9830e165c6 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class TestMulInt8 : public mindspore::Common { +class TestMulInt8 : public mindspore::CommonTest { public: TestMulInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc index 3bd462cc8a..f3b8c27ae8 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc @@ -26,7 +26,7 @@ namespace mindspore { using mindspore::lite::tensor::QuantArg; using mindspore::lite::tensor::Tensor; -class TestPadInt8 : public mindspore::Common { +class TestPadInt8 : public mindspore::CommonTest { public: TestPadInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc index 297316a7b5..e987a74c33 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class TestPreluInt8 : public mindspore::Common { +class TestPreluInt8 : public mindspore::CommonTest { public: TestPreluInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc index 665d18a217..ad89c45a0b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class QuantDTypeCastTestFp32 : public mindspore::Common { +class QuantDTypeCastTestFp32 : public mindspore::CommonTest { public: QuantDTypeCastTestFp32() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc index 70de924b24..920902b200 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc @@ -22,7 +22,7 @@ #include "mindspore/lite/include/context.h" namespace mindspore { -class TestReluXInt8 : public mindspore::Common { +class TestReluXInt8 : public mindspore::CommonTest { public: TestReluXInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc index 33dcc3050d..9a2131dc73 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class TestReshapeInt8 : public mindspore::Common { +class TestReshapeInt8 : public mindspore::CommonTest { public: TestReshapeInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc index 8860be9f34..ed0b269dd6 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc @@ -27,7 +27,7 @@ namespace mindspore { using mindspore::lite::tensor::QuantArg; using mindspore::lite::tensor::Tensor; -class TestResizeBilinearInt8 : public mindspore::Common { +class TestResizeBilinearInt8 : public mindspore::CommonTest { public: TestResizeBilinearInt8() = default; void TearDown() override; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc index 07b8d352e2..ffc3790c54 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc @@ -25,7 +25,7 @@ namespace mindspore { using mindspore::lite::tensor::QuantArg; using mindspore::lite::tensor::Tensor; -class TestResizeNearestNeighborInt8 : public mindspore::Common { +class TestResizeNearestNeighborInt8 : public mindspore::CommonTest { public: TestResizeNearestNeighborInt8() = default; void Prepare(const std::vector &in_shape, const std::vector &out_shape, int8_t *input_data, diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc index 361c7be45f..ff8e414162 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/include/context.h" namespace mindspore { -class TestSigmoidInt8 : public mindspore::Common { +class TestSigmoidInt8 : public mindspore::CommonTest { public: TestSigmoidInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc index 20a5ff88a4..ff9a1e7528 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc @@ -23,7 +23,7 @@ namespace mindspore { -class TestSoftmaxInt8 : public mindspore::Common { +class TestSoftmaxInt8 : public mindspore::CommonTest { public: TestSoftmaxInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc index 70d7e5c313..a224b9cdad 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class TestSplitInt8 : public mindspore::Common { +class TestSplitInt8 : public mindspore::CommonTest { public: TestSplitInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc index a2c2a76481..6bb7fe9cac 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class TestSqueezeInt8 : public mindspore::Common { +class TestSqueezeInt8 : public mindspore::CommonTest { public: TestSqueezeInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc index ea9bf54dbc..302a3b388e 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc @@ -21,7 +21,7 @@ #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { -class TestTopKInt8 : public mindspore::Common { +class TestTopKInt8 : public mindspore::CommonTest { public: TestTopKInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc index b8fb3423c1..41acb55188 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class TestUnsqueezeInt8 : public mindspore::Common { +class TestUnsqueezeInt8 : public mindspore::CommonTest { public: TestUnsqueezeInt8() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/common_utils_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/common_utils_test.cc index 7b47ee3fe5..057b4774b6 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/common_utils_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/common_utils_test.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace kernel { -class CommonUtilTest : public mindspore::Common { +class CommonUtilTest : public mindspore::CommonTest { public: CommonUtilTest() = default; }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc index 5d9e99dd0b..701f4a5950 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc @@ -151,7 +151,7 @@ void TestCase(const std::vector &shape_a, const std::vector &shape_b) lite::opencl::OpenCLRuntime::DeleteInstance(); } -class TestArithmeticOpenCL : public mindspore::Common { +class TestArithmeticOpenCL : public mindspore::CommonTest { public: TestArithmeticOpenCL() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc index 075cce119e..3fce02a894 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/avg_pooling_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class TestAvgPoolingOpenCL : public mindspore::Common {}; +class TestAvgPoolingOpenCL : public mindspore::CommonTest {}; void InitAvgPoolingParam(PoolingParameter *param) { param->input_batch_ = 1; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/concat_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/concat_tests.cc index 0cb6156ae5..3c8f0cda51 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/concat_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/concat_tests.cc @@ -104,7 +104,7 @@ void ConcatComputeByCPU_3input_dim4_axis3(float *input0, float *input1, float *i } namespace mindspore { -class TestConcatOpenCL : public mindspore::Common { +class TestConcatOpenCL : public mindspore::CommonTest { public: TestConcatOpenCL() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_transpose_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_transpose_tests.cc index a16485d2f1..b811012fdd 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_transpose_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_transpose_tests.cc @@ -24,7 +24,7 @@ #include "mindspore/core/utils/log_adapter.h" namespace mindspore { -class TestConv2dTransposeOpenCL : public mindspore::Common { +class TestConv2dTransposeOpenCL : public mindspore::CommonTest { public: TestConv2dTransposeOpenCL() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/convolution_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/convolution_tests.cc index 86efc7886d..468d30cd27 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/convolution_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/convolution_tests.cc @@ -28,7 +28,7 @@ using mindspore::kernel::SubGraphOpenCLKernel; namespace mindspore { -class TestConvolutionOpenCL : public mindspore::Common {}; +class TestConvolutionOpenCL : public mindspore::CommonTest {}; void LoadData(void *dst, size_t dst_size, const std::string &file_path) { if (file_path.empty()) { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc index 59e4d93288..8f6cdf2f01 100755 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc @@ -38,7 +38,7 @@ bool IMAGE2D_OPEN = true; namespace mindspore { -class TestConvolutionDwOpenCL : public mindspore::Common { +class TestConvolutionDwOpenCL : public mindspore::CommonTest { public: TestConvolutionDwOpenCL(){} }; @@ -152,7 +152,7 @@ void DepthWiseTestMain(ConvParameter *conv_param, float_t *input_data, float_t * } std::cout << std::endl; // compare - Common::CompareOutputData(packed_output, packed_correct_data, packed_output_size, 0.00001); + CommonTest::CompareOutputData(packed_output, packed_correct_data, packed_output_size, 0.00001); SAFE_DELETE_ARRAY(packed_correct_data) } @@ -529,7 +529,7 @@ TEST_F(TestConvolutionDwOpenCL, ConvDwNoPadFp32) { } std::cout << std::endl; // compare - Common::CompareOutputData(packed_output, packed_correct_data, packed_output_size, 0.00001); + CommonTest::CompareOutputData(packed_output, packed_correct_data, packed_output_size, 0.00001); inputs[1]->SetData(nullptr); inputs[2]->SetData(nullptr); @@ -701,7 +701,7 @@ TEST_F(TestConvolutionDwOpenCL, ConvDwPadFp32) { } std::cout << std::endl; // compare - Common::CompareOutputData(packed_output, packed_correct_data, packed_output_size, 0.00001); + CommonTest::CompareOutputData(packed_output, packed_correct_data, packed_output_size, 0.00001); inputs[1]->SetData(nullptr); inputs[2]->SetData(nullptr); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/matmul_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/matmul_tests.cc index 50d46b2a68..1e7088f459 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/matmul_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/matmul_tests.cc @@ -23,7 +23,7 @@ #include "mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.h" namespace mindspore { -class TestMatMulOpenCL : public mindspore::Common { +class TestMatMulOpenCL : public mindspore::CommonTest { public: TestMatMulOpenCL() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/max_pooling_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/max_pooling_tests.cc index d90be66a4c..5e809c6a0c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/max_pooling_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/max_pooling_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class TestMaxPoolingOpenCL : public mindspore::Common {}; +class TestMaxPoolingOpenCL : public mindspore::CommonTest {}; void InitParameter(PoolingParameter *param) { param->window_h_ = 2; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/opencl_kernel_tests.h b/mindspore/lite/test/ut/src/runtime/kernel/opencl/opencl_kernel_tests.h index bcde788efe..61ffc86412 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/opencl_kernel_tests.h +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/opencl_kernel_tests.h @@ -26,7 +26,7 @@ namespace mindspore { -class TestOpenCLKernel : public mindspore::Common { +class TestOpenCLKernel : public mindspore::CommonTest { public: TestOpenCLKernel() {} }; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/softmax_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/softmax_tests.cc index 684190bde2..6d81702c07 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/softmax_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/softmax_tests.cc @@ -24,7 +24,7 @@ namespace mindspore { -class TestSoftmaxOpenCL : public mindspore::Common {}; +class TestSoftmaxOpenCL : public mindspore::CommonTest {}; void InitSoftaxParam(SoftmaxParameter *param) { param->axis_ = -1; } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/transpose_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/transpose_tests.cc index 0f432ba599..c1ffbd8995 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/transpose_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/transpose_tests.cc @@ -23,7 +23,7 @@ #include "mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h" namespace mindspore { -class TestTransposeOpenCL : public mindspore::Common { +class TestTransposeOpenCL : public mindspore::CommonTest { public: TestTransposeOpenCL() {} }; diff --git a/mindspore/lite/test/ut/src/train_test.cc b/mindspore/lite/test/ut/src/train_test.cc deleted file mode 100644 index e64d5a9dfd..0000000000 --- a/mindspore/lite/test/ut/src/train_test.cc +++ /dev/null @@ -1,287 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include "utils/base_ref_utils.h" -#include "mindspore/lite/schema/inner/model_generated.h" -#include "mindspore/lite/src/train/model_impl.h" -#include "mindspore/lite/include/model.h" -#include "mindspore/lite/src/train/train_session.h" -#include "common/common_test.h" -#include "mindspore/core/utils/log_adapter.h" - -namespace mindspore { -class TrainTest : public mindspore::Common { - public: - TrainTest() {} -}; - -TEST_F(TrainTest, TestConvNode) { - auto meta_graph = std::make_shared(); - meta_graph->name = "graph"; - - auto node = std::make_unique(); - node->inputIndex = {0, 1}; - node->outputIndex = {2}; - node->primitive = std::make_unique(); - node->primitive->value.type = schema::PrimitiveType_Conv2D; - auto primitive = new schema::Conv2DT; - primitive->padMode = schema::PadMode_SAME; - primitive->channelIn = 3; - primitive->channelOut = 32; - primitive->format = schema::Format_NHWC; - primitive->strideH = 1; - primitive->strideW = 1; - primitive->kernelH = 3; - primitive->kernelW = 3; - primitive->dilateH = 1; - primitive->dilateW = 1; - node->primitive->value.value = primitive; - node->name = "Conv2D"; - meta_graph->nodes.emplace_back(std::move(node)); - meta_graph->inputIndex = {0}; - meta_graph->outputIndex = {2}; - - auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_Parameter; // todo use ValueNode? - input0->format = schema::Format_NHWC; - input0->dataType = TypeId::kNumberTypeFloat32; - input0->dims = {1, 28, 28, 3}; - input0->offset = -1; - meta_graph->allTensors.emplace_back(std::move(input0)); - - auto weight = std::make_unique(); - weight->nodeType = schema::NodeType::NodeType_ValueNode; - weight->format = schema::Format_KHWC; - weight->dataType = TypeId::kNumberTypeFloat32; - weight->dims = {32, 3, 3, 3}; - - auto buf = new char *[1]; - //================================================================ - size_t weight_size; - std::string weight_path = "./convfp32_weight_32_3_3_3.bin"; - ReadFile(weight_path.c_str(), &weight_size, buf); - ASSERT_NE(nullptr, buf[0]); - auto weight_data_temp = reinterpret_cast(buf[0]); - ASSERT_NE(nullptr, weight_data_temp); - weight->data.resize(sizeof(float) * 32 * 3 * 3 * 3); - - //================================================================ - memcpy(weight->data.data(), weight_data_temp, weight_size); - weight->offset = -1; - meta_graph->allTensors.emplace_back(std::move(weight)); - - auto output = std::make_unique(); - output->nodeType = schema::NodeType::NodeType_Parameter; - output->format = schema::Format_NHWC; - output->dataType = TypeId::kNumberTypeFloat32; - output->dims = {1, 28, 28, 32}; - output->offset = -1; - meta_graph->allTensors.emplace_back(std::move(output)); - - flatbuffers::FlatBufferBuilder builder(1024); - auto offset = schema::MetaGraph::Pack(builder, meta_graph.get()); - builder.Finish(offset); - size_t size = builder.GetSize(); - const char *content = reinterpret_cast(builder.GetBufferPointer()); - - auto model = lite::Model::Import(content, size); - ASSERT_NE(nullptr, model); - auto session = new session::TrainSession(); // inference::MSSession::CreateSession(kCPUDevice, 0); - ASSERT_NE(nullptr, session); - auto graphId = session->CompileGraph(NOT_NULL(model->GetModelImpl())); - - auto inTensor = new tensor::Tensor(TypeId::kNumberTypeFloat32, {1, 28, 28, 3}); - ASSERT_NE(nullptr, inTensor); - ASSERT_EQ(sizeof(float) * (28 * 28 * 3), inTensor->Size()); - auto ret = inTensor->MallocData(); - ASSERT_EQ(0, ret); - auto data = inTensor->Data(); - //=================================================== - size_t input_size; - std::string input_path = "./convfp32_input_1_28_28_3.bin"; - ReadFile(input_path.c_str(), &input_size, buf); - ASSERT_NE(nullptr, buf[0]); - auto input_data = reinterpret_cast(buf[0]); - ASSERT_NE(nullptr, input_data); - //=================================================== - memcpy(data, input_data, input_size); - std::vector> inputs; - inputs.emplace_back(inTensor); - VectorRef outputsRef; - session->RunGraph(graphId, inputs, &outputsRef); - auto outputs = TransformVectorRefToMultiTensor(outputsRef); - ASSERT_EQ(1, outputs.size()); - ASSERT_EQ(1, outputs.front().size()); - auto runOutput = outputs.front().front(); - ASSERT_NE(nullptr, runOutput); - ASSERT_EQ(28 * 28 * 32, runOutput->ElementsNum()); - ASSERT_EQ(TypeId::kNumberTypeFloat32, runOutput->data_type()); - auto *outData = reinterpret_cast(runOutput->MutableData()); - //=================================================== - size_t output_size; - std::string output_path = "./convfp32_out_1_28_28_32.bin"; - ReadFile(output_path.c_str(), &output_size, buf); - ASSERT_NE(nullptr, buf[0]); - auto output_data = reinterpret_cast(buf[0]); - ASSERT_NE(nullptr, output_data); - //=================================================== - ASSERT_EQ(output_size, runOutput->Size()); - for (size_t i = 0; i < runOutput->ElementsNum(); i++) { - ASSERT_EQ(output_data[i], outData[i]); - } - MS_LOG(INFO) << "Passed"; -} - -// TEST_F(TrainTest, TestMultiNode) { -// auto msGraph = std::make_shared(); -// msGraph->name = "graph"; -// auto msSubgraph = std::make_unique(); -// msSubgraph->name = "subGraph"; -// -// auto conv = std::make_unique(); -// conv->inputIndex = {0, 1}; -// conv->outputIndex = {2}; -// conv->attr.type = schema::OpT_Conv2D; -// auto conv_attr = new schema::Conv2DT; -// conv_attr->padMode = schema::PadMode_SAME; -// conv_attr->format = schema::Format_NHWC; -// conv_attr->strideH = 1; -// conv_attr->strideW = 1; -// conv_attr->kernelH = 3; -// conv_attr->kernelW = 3; -// conv_attr->dilateH = 1; -// conv_attr->dilateW = 1; -// -// conv->attr.value = conv_attr; -// conv->name = "Conv2D"; -// conv->fmkType = schema::FmkType_CAFFE; -// msSubgraph->nodes.emplace_back(std::move(conv)); -// -// auto matMul1 = std::make_unique(); -// matMul1->inputIndex = {2, 3}; -// matMul1->outputIndex = {4}; -// matMul1->attr.type = schema::OpT_MatMul; -// auto matMul_attr1 = new schema::MatMulT; -// matMul_attr1->transposeA = false; -// matMul_attr1->transposeB = true; -// matMul1->attr.value = matMul_attr1; -// matMul1->name = "matmul1"; -// matMul1->fmkType = schema::FmkType_CAFFE; -// msSubgraph->nodes.emplace_back(std::move(matMul1)); -// -// auto matMul2 = std::make_unique(); -// matMul2->inputIndex = {4, 5}; -// matMul2->outputIndex = {6}; -// matMul2->attr.type = schema::OpT_MatMul; -// auto matMul_attr2 = new schema::MatMulT; -// matMul_attr2->transposeA = false; -// matMul_attr2->transposeB = true; -// matMul2->attr.value = matMul_attr2; -// matMul2->name = "matmul2"; -// matMul2->fmkType = schema::FmkType_CAFFE; -// msSubgraph->nodes.emplace_back(std::move(matMul2)); -// -// msSubgraph->inputIndex = {0}; -// msSubgraph->outputIndex = {6}; -// -// auto input0 = std::make_unique(); -// input0->refCount = schema::MSCONST_WEIGHT_REFCOUNT; -// input0->format = schema::Format_NHWC; -// input0->dataType = TypeId::kNumberTypeFloat32; -// input0->dims = {1, 5, 5, 3}; -// input0->offset = -1; -// msSubgraph->allTensors.emplace_back(std::move(input0)); -// -// auto conv_weight = std::make_unique(); -// conv_weight->refCount = schema::MSCONST_WEIGHT_REFCOUNT; -// conv_weight->format = schema::Format_KHWC; -// conv_weight->dataType = TypeId::kNumberTypeFloat32; -// conv_weight->dims = {8, 3, 3, 3}; -// conv_weight->data.resize(8*3*3*3*sizeof(float)); -// msSubgraph->allTensors.emplace_back(std::move(conv_weight)); -// -// auto conv_output = std::make_unique(); -// conv_output->refCount = 0; -// conv_output->format = schema::Format_NHWC; -// conv_output->dataType = TypeId::kNumberTypeFloat32; -// conv_output->dims = {1, 5, 5, 8}; -// msSubgraph->allTensors.emplace_back(std::move(conv_output)); -// -// auto add_weight = std::make_unique(); -// add_weight->refCount = schema::MSCONST_WEIGHT_REFCOUNT; -// add_weight->format = schema::Format_NHWC; -// add_weight->dataType = TypeId::kNumberTypeFloat32; -// add_weight->dims = {1, 5, 5, 8}; -// add_weight->data.resize(5*5*8*sizeof(float)); -// msSubgraph->allTensors.emplace_back(std::move(add_weight)); -// -// auto add_output = std::make_unique(); -// add_output->refCount = 0; -// add_output->format = schema::Format_NHWC; -// add_output->dataType = TypeId::kNumberTypeFloat32; -// add_output->dims = {1, 5, 5, 8}; -// msSubgraph->allTensors.emplace_back(std::move(add_output)); -// -// auto mul_weight = std::make_unique(); -// mul_weight->refCount = schema::MSCONST_WEIGHT_REFCOUNT; -// mul_weight->format = schema::Format_NHWC; -// mul_weight->dataType = TypeId::kNumberTypeFloat32; -// mul_weight->dims = {1, 5, 5, 8}; -// mul_weight->data.resize(5*5*8*sizeof(float)); -// msSubgraph->allTensors.emplace_back(std::move(mul_weight)); -// -// auto mul_output = std::make_unique(); -// mul_output->refCount = 0; -// mul_output->format = schema::Format_NHWC; -// mul_output->dataType = TypeId::kNumberTypeFloat32; -// mul_output->dims = {1, 5, 5, 8}; -// msSubgraph->allTensors.emplace_back(std::move(mul_output)); -// msGraph->subgraphs.emplace_back(std::move(msSubgraph)); -// -// flatbuffers::FlatBufferBuilder builder(1024); -// auto offset = schema::GraphDef::Pack(builder, msGraph.get()); -// builder.Finish(offset); -// size_t size = builder.GetSize(); -// const char *content = (char *)builder.GetBufferPointer(); -// const std::string strstub = ""; -// -// auto func_graph = inference::LoadModel(content, size, strstub); -// ASSERT_NE(nullptr, func_graph); -// auto session = inference::MSSession::CreateSession(kCPUDevice, 0); -// ASSERT_NE(nullptr, session); -// auto graphId = session->CompileGraph(func_graph); -// -// auto inTensor = -// std::shared_ptr(inference::MSTensor::CreateTensor(TypeId::kNumberTypeFloat32, {1, 5, 5, 3})); -// ASSERT_NE(nullptr, inTensor); -// ASSERT_EQ(sizeof(float) * (5 * 5 * 3), inTensor->Size()); -// (void)inTensor->MutableData(); -// -// std::vector> inputs; -// inputs.emplace_back(inTensor); -// auto outputs = session->RunGraph(graphId, inputs); -// ASSERT_EQ(1, outputs.size()); -// ASSERT_EQ(1, outputs.front().size()); -// auto runOutput = outputs.front().front(); -// ASSERT_NE(nullptr, runOutput); -// ASSERT_EQ(5 * 5 * 8, runOutput->ElementsNum()); -// ASSERT_EQ(TypeId::kNumberTypeFloat32, runOutput->data_type()); -// MS_LOG(INFO) << "Passed"; -// } -} // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h index 200e322c7c..0eb680ed0f 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h @@ -22,7 +22,7 @@ #include "schema/inner/model_generated.h" namespace mindspore { -class TestTfliteParser : public Common { +class TestTfliteParser : public CommonTest { public: TestTfliteParser() = default; void TearDown() override; diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc index 6fac6dedc4..24d494c46c 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc @@ -27,7 +27,7 @@ #include "src/common/anf_exporter/anf_exporter.h" namespace mindspore { -class ConvActivationFusionTest : public mindspore::Common { +class ConvActivationFusionTest : public mindspore::CommonTest { public: ConvActivationFusionTest() = default; }; diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc index ef9fd87115..32f203275e 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc @@ -27,7 +27,7 @@ #include "src/common/anf_exporter/anf_exporter.h" namespace mindspore { -class ConvBiasAddFusionTest : public mindspore::Common { +class ConvBiasAddFusionTest : public mindspore::CommonTest { public: ConvBiasAddFusionTest() = default; }; diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc index e2ce0d9e90..347b4498e1 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc @@ -27,7 +27,7 @@ #include "src/common/anf_exporter/anf_exporter.h" namespace mindspore { -class ConvBNFusionTest : public mindspore::Common { +class ConvBNFusionTest : public mindspore::CommonTest { public: ConvBNFusionTest() = default; }; diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc index 1eca3a469c..92f119c74a 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc @@ -27,7 +27,7 @@ #include "src/common/anf_exporter/anf_exporter.h" namespace mindspore { -class ConvScaleFusionTest : public mindspore::Common { +class ConvScaleFusionTest : public mindspore::CommonTest { public: ConvScaleFusionTest() = default; }; diff --git a/mindspore/lite/tools/converter/converter_flags.cc b/mindspore/lite/tools/converter/converter_flags.cc index 3fd7a9fc45..66a081d6a4 100644 --- a/mindspore/lite/tools/converter/converter_flags.cc +++ b/mindspore/lite/tools/converter/converter_flags.cc @@ -18,14 +18,13 @@ #include #include "tools/converter/converter_flags.h" - namespace mindspore { namespace lite { namespace converter { Flags::Flags() { - AddFlag(&Flags::fmkIn, "fmk", "Input model framework type. TF | CAFFE | ONNX | MS | TFLITE", ""); - AddFlag(&Flags::modelFile, "modelFile", - "Input model file path. TF: *.pb | CAFFE: *.prototxt | ONNX: *.onnx | MS: *.ms", ""); + AddFlag(&Flags::fmkIn, "fmk", "Input model framework type. TFLITE | CAFFE | MS", ""); + AddFlag(&Flags::modelFile, "modelFile", "Input model file path. TFLITE: *.tflite | CAFFE: *.prototxt | MS: *.mindir", + ""); AddFlag(&Flags::outputFile, "outputFile", "Output model file path. Will add .ms automatically", ""); AddFlag(&Flags::weightFile, "weightFile", "Input model weight file path. Needed when fmk is CAFFE. CAFFE: *.caffemodel", ""); @@ -79,18 +78,14 @@ int Flags::Init(int argc, const char **argv) { MS_LOG(ERROR) << "INPUT INVALID: inputInferenceType is invalid: %s", this->inputInferenceTypeIn.c_str(); return 1; } - if (this->fmkIn == "TF") { - this->fmk = FmkType_TF; - } else if (this->fmkIn == "CAFFE") { + if (this->fmkIn == "CAFFE") { this->fmk = FmkType_CAFFE; - } else if (this->fmkIn == "ONNX") { - this->fmk = FmkType_ONNX; } else if (this->fmkIn == "MS") { this->fmk = FmkType_MS; } else if (this->fmkIn == "TFLITE") { this->fmk = FmkType_TFLITE; } else { - MS_LOG(ERROR) << "INPUT ILLEGAL: fmk must be TF|CAFFE|ONNX|MS"; + MS_LOG(ERROR) << "INPUT ILLEGAL: fmk must be TFLITE|CAFFE|MS"; return 1; } @@ -111,66 +106,8 @@ int Flags::Init(int argc, const char **argv) { return 1; } - // auto status = ValidateAwareQuantizerCLI(); - // if (status != RET_OK) { - // MS_PRINT_ERROR("Parse aware quantization command line failed: %d", status); - // return status; - // } - // status = ValidateWeighQuantCLI(); - // if (status != RET_OK) { - // MS_PRINT_ERROR("ValidateWeighQuantCLI failed: %d", status); - // return status; - // } return 0; } - -// bool Flags::ValidateString(const string pattern, const string input) { -// std::regex repPattern(pattern, std::regex_constants::extended); -// std::match_results regResult; -// return regex_match(input, regResult, repPattern); -//} - -// int Flags::ValidateAwareQuantizerCLI() { -// // check input inference type -// if (this->inputInferenceType == DataType_DT_FLOAT) { -// if (this->mean.empty()) { -// MS_PRINT_ERROR("mean value shound not be null!") -// return RET_PARAM_INVALID; -// } -// if (this->stdDev.empty()) { -// MS_PRINT_ERROR("standard deviation value shound not be null!") -// return RET_PARAM_INVALID; -// } -// const std::string pattern = "^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$"; -// if (!ValidateString(pattern, this->mean)) { -// MS_PRINT_ERROR("invalid input mean values: %s", this->mean.c_str()); -// return RET_PARAM_INVALID; -// } -// if (!ValidateString(pattern, this->stdDev)) { -// MS_PRINT_ERROR("invalid input standard deviation value: %s", this->stdDev.c_str()); -// return RET_PARAM_INVALID; -// } -// } else { -// if (!this->mean.empty()) { -// MS_PRINT_INFO("useless mean value: %s", this->mean.c_str()); -// } -// if (!this->stdDev.empty()) { -// MS_PRINT_INFO("useless stdDev value: %s", this->stdDev.c_str()); -// } -// } -// return RET_OK; -//} - -// int Flags::ValidateWeighQuantCLI() { -// if (!this->quantSize.empty()) { -// if (!ValidateString("^[0-9]*$", this->quantSize)) { -// MS_PRINT_ERROR("invalid input quantSize: %s, only support positive integer type!", this->quantSize.c_str()); -// return RET_PARAM_INVALID; -// } -// } -// return RET_OK; -//} } // namespace converter } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/tools/converter/converter_flags.h b/mindspore/lite/tools/converter/converter_flags.h index b97d777ae1..0594663a9b 100644 --- a/mindspore/lite/tools/converter/converter_flags.h +++ b/mindspore/lite/tools/converter/converter_flags.h @@ -50,10 +50,6 @@ class Flags : public virtual mindspore::lite::FlagParser { private: bool ValidateString(std::string pattern, std::string input); - // int ValidateAwareQuantizerCLI(); - // - // int ValidateWeighQuantCLI(); - public: std::string modelFile; std::string outputFile;