| @@ -23,12 +23,17 @@ | |||||
| #endif | #endif | ||||
| typedef struct TransposeParameter { | typedef struct TransposeParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int perm_[8]; | int perm_[8]; | ||||
| bool conjugate_; | bool conjugate_; | ||||
| int num_axes_; | |||||
| // shape correlative | |||||
| int strides_[8]; | int strides_[8]; | ||||
| int out_strides_[8]; | int out_strides_[8]; | ||||
| // other parameter | |||||
| int num_axes_; | |||||
| int data_size_; | int data_size_; | ||||
| } TransposeParameter; | } TransposeParameter; | ||||
| @@ -19,10 +19,15 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct ROIPoolingParameter { | typedef struct ROIPoolingParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int pooledW_; | |||||
| int pooledH_; | |||||
| float scale_; | |||||
| // shape correlative | |||||
| int in_strides_[DIMENSION_4D]; | int in_strides_[DIMENSION_4D]; | ||||
| int out_strides_[DIMENSION_4D]; | int out_strides_[DIMENSION_4D]; | ||||
| float scale_; | |||||
| int ndim_; | int ndim_; | ||||
| int input_w_; | int input_w_; | ||||
| int input_h_; | int input_h_; | ||||
| @@ -32,9 +37,9 @@ typedef struct ROIPoolingParameter { | |||||
| int output_h_; | int output_h_; | ||||
| int output_n_; | int output_n_; | ||||
| int output_c_; | int output_c_; | ||||
| // other parameter | |||||
| int thread_num_; | int thread_num_; | ||||
| int pooledW_; | |||||
| int pooledH_; | |||||
| } ROIPoolingParameter; | } ROIPoolingParameter; | ||||
| #ifdef __cplusplus | #ifdef __cplusplus | ||||
| @@ -19,11 +19,12 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct { | |||||
| typedef struct SkipGramParameter { | |||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int ngram_size; | |||||
| int max_skip_size; | |||||
| bool include_all_ngrams; | bool include_all_ngrams; | ||||
| int max_skip_size; | |||||
| int ngram_size; | |||||
| } SkipGramParameter; | } SkipGramParameter; | ||||
| #endif // MINDSPORE_LITE_NNACL_FP32_SKIP_GRAM_H_ | #endif // MINDSPORE_LITE_NNACL_FP32_SKIP_GRAM_H_ | ||||
| @@ -18,16 +18,21 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct SpaceToBatchParameter { | typedef struct SpaceToBatchParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| bool need_paddings_; | |||||
| int m_; | |||||
| int block_sizes_[4]; | int block_sizes_[4]; | ||||
| int paddings_[4]; | int paddings_[4]; | ||||
| // shape correlative | |||||
| int input_shape_[4]; | int input_shape_[4]; | ||||
| int output_shape_[4]; | int output_shape_[4]; | ||||
| int in_stride_[4]; | int in_stride_[4]; | ||||
| int out_stride_[4]; | int out_stride_[4]; | ||||
| int padded_in_shape_[4]; | int padded_in_shape_[4]; | ||||
| // other parameter | |||||
| bool need_paddings_; | |||||
| int m_; | |||||
| } SpaceToBatchParameter; | } SpaceToBatchParameter; | ||||
| #ifdef __cplusplus | #ifdef __cplusplus | ||||
| extern "C" { | extern "C" { | ||||
| @@ -18,6 +18,7 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct SpaceToDepthParameter { | typedef struct SpaceToDepthParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int32_t block_size_; | int32_t block_size_; | ||||
| } SpaceToDepthParameter; | } SpaceToDepthParameter; | ||||
| @@ -20,14 +20,19 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct TileParameter { | typedef struct TileParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int in_dim_; | |||||
| int multiples_[5]; | |||||
| int dims_[5]; | |||||
| // shape correlative | |||||
| int in_shape_[5]; | int in_shape_[5]; | ||||
| int out_shape_[5]; | int out_shape_[5]; | ||||
| int dims_[5]; | |||||
| int multiples_[5]; | |||||
| int in_strides_[5]; | int in_strides_[5]; | ||||
| int out_strides_[5]; | int out_strides_[5]; | ||||
| // other parameter | |||||
| int in_dim_; | |||||
| } TileParameter; | } TileParameter; | ||||
| #ifdef __cplusplus | #ifdef __cplusplus | ||||
| @@ -25,11 +25,14 @@ typedef struct TopkNode { | |||||
| } TopkNode; | } TopkNode; | ||||
| typedef struct TopkParameter { | typedef struct TopkParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int last_dim_size_; | |||||
| int loop_num_; | |||||
| int k_; | int k_; | ||||
| bool sorted_; | bool sorted_; | ||||
| // other parameter | |||||
| int last_dim_size_; | |||||
| int loop_num_; | |||||
| void *topk_node_list_; | void *topk_node_list_; | ||||
| } TopkParameter; | } TopkParameter; | ||||
| @@ -20,6 +20,7 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct UniqueParameter { | typedef struct UniqueParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| } UniqueParameter; | } UniqueParameter; | ||||
| @@ -22,8 +22,11 @@ | |||||
| #define UNSQUEEZE_DIMS_MAX_SIZE 4 | #define UNSQUEEZE_DIMS_MAX_SIZE 4 | ||||
| typedef struct UnsqueezeParameter { | typedef struct UnsqueezeParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int dims_[UNSQUEEZE_DIMS_MAX_SIZE]; | int dims_[UNSQUEEZE_DIMS_MAX_SIZE]; | ||||
| // other parameter | |||||
| int num_dim_; | int num_dim_; | ||||
| } UnsqueezeParameter; | } UnsqueezeParameter; | ||||
| @@ -25,11 +25,16 @@ extern "C" { | |||||
| #endif | #endif | ||||
| typedef struct SoftmaxCrossEntropyParameter { | typedef struct SoftmaxCrossEntropyParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int32_t batch_size_; | |||||
| unsigned int number_of_classes_; | |||||
| int n_dim_; | int n_dim_; | ||||
| // shape correlative | |||||
| int input_shape_[5]; | int input_shape_[5]; | ||||
| // other parameter | |||||
| int32_t batch_size_; | |||||
| unsigned int number_of_classes_; | |||||
| int is_grad; | int is_grad; | ||||
| } SoftmaxCrossEntropyParameter; | } SoftmaxCrossEntropyParameter; | ||||
| @@ -20,13 +20,16 @@ | |||||
| #define REDUCE_MAX_AXES_NUM 8 | #define REDUCE_MAX_AXES_NUM 8 | ||||
| typedef struct ReduceParameter { | typedef struct ReduceParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int axes_[REDUCE_MAX_AXES_NUM]; | |||||
| bool keep_dims_; | bool keep_dims_; | ||||
| int mode_; | |||||
| bool reduce_to_end_; | bool reduce_to_end_; | ||||
| float coeff; | float coeff; | ||||
| int axes_[REDUCE_MAX_AXES_NUM]; | |||||
| // other parameter | |||||
| int num_axes_; | int num_axes_; | ||||
| int mode_; | |||||
| } ReduceParameter; | } ReduceParameter; | ||||
| #endif // MINDSPORE_LITE_NNACL_REDUCE_PARAMETER_H_ | #endif // MINDSPORE_LITE_NNACL_REDUCE_PARAMETER_H_ | ||||
| @@ -21,7 +21,10 @@ | |||||
| #include "nnacl/quantization/quantize.h" | #include "nnacl/quantization/quantize.h" | ||||
| typedef struct ReshapeParameter { | typedef struct ReshapeParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| // other parameter | |||||
| ReshapeQuantArg quant_para_; | ReshapeQuantArg quant_para_; | ||||
| int thread_count_; | int thread_count_; | ||||
| } ReshapeParameter; | } ReshapeParameter; | ||||
| @@ -18,6 +18,7 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct ResizeParameter { | typedef struct ResizeParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int method_; | int method_; | ||||
| int64_t new_height_; | int64_t new_height_; | ||||
| @@ -20,14 +20,19 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct ReverseSequenceParameter { | typedef struct ReverseSequenceParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int ndim_; | |||||
| int seq_axis_; | |||||
| int batch_axis_; | |||||
| // shape correlative | |||||
| int input_shape0_[5]; | int input_shape0_[5]; | ||||
| int output_shape_[5]; | int output_shape_[5]; | ||||
| int input_stride_[5]; | int input_stride_[5]; | ||||
| int output_stride_[5]; | int output_stride_[5]; | ||||
| int seq_axis_; | |||||
| int batch_axis_; | |||||
| // other parameter | |||||
| int ndim_; | |||||
| int outer_count_; | int outer_count_; | ||||
| int outer_stride_; | int outer_stride_; | ||||
| int inner_count_; | int inner_count_; | ||||
| @@ -20,11 +20,17 @@ | |||||
| #include <mindspore/lite/nnacl/quantization/quantize.h> | #include <mindspore/lite/nnacl/quantization/quantize.h> | ||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct ScaleParameter { | typedef struct ScaleParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int axis_; | |||||
| int activation_type_; | |||||
| // shape correlative | |||||
| int outer_size_; | int outer_size_; | ||||
| int axis_size_; | int axis_size_; | ||||
| int inner_size_; | int inner_size_; | ||||
| int axis_; | |||||
| // other parameter | |||||
| bool const_scale_; | bool const_scale_; | ||||
| bool const_offset_; | bool const_offset_; | ||||
| QuantMulArg scale_mul_arg_; | QuantMulArg scale_mul_arg_; | ||||
| @@ -33,7 +39,6 @@ typedef struct ScaleParameter { | |||||
| int scale_zp_; | int scale_zp_; | ||||
| int offset_zp_; | int offset_zp_; | ||||
| int output_zp_; | int output_zp_; | ||||
| int activation_type_; | |||||
| int output_activation_min_; | int output_activation_min_; | ||||
| int output_activation_max_; | int output_activation_max_; | ||||
| } ScaleParameter; | } ScaleParameter; | ||||
| @@ -20,6 +20,7 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct ScatterNDParameter { | typedef struct ScatterNDParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| } ScatterNDParameter; | } ScatterNDParameter; | ||||
| @@ -20,6 +20,7 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct ShapeParameter { | typedef struct ShapeParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| } ShapeParameter; | } ShapeParameter; | ||||
| @@ -21,15 +21,20 @@ | |||||
| #define SIGMOID_OFFSET_MAX_SIZE 4 | #define SIGMOID_OFFSET_MAX_SIZE 4 | ||||
| typedef struct SigmoidParameter { | typedef struct SigmoidParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| // shape correlative | |||||
| const int *in_shape_; | |||||
| const int *out_shape_; | |||||
| // other parameter | |||||
| SigmoidQuantArg quant_arg; | SigmoidQuantArg quant_arg; | ||||
| double alpha_; | double alpha_; | ||||
| int thread_count_; | int thread_count_; | ||||
| int64_t offset_[PRELU_OFFSET_MAX_SIZE]; | int64_t offset_[PRELU_OFFSET_MAX_SIZE]; | ||||
| int64_t in_offset_[PRELU_OFFSET_MAX_SIZE]; | int64_t in_offset_[PRELU_OFFSET_MAX_SIZE]; | ||||
| int64_t axis_; | int64_t axis_; | ||||
| const int *in_shape_; | |||||
| const int *out_shape_; | |||||
| int input_dim_; | int input_dim_; | ||||
| int element_num; | int element_num; | ||||
| } SigmoidParameter; | } SigmoidParameter; | ||||
| @@ -23,12 +23,17 @@ | |||||
| #define SLICE_SHAPE_MAX_SIZE 4 | #define SLICE_SHAPE_MAX_SIZE 4 | ||||
| typedef struct SliceParameter { | typedef struct SliceParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| SliceQuantArg quant_arg_; | |||||
| // shape correlative | |||||
| int32_t shape_[SLICE_SHAPE_MAX_SIZE]; | |||||
| int32_t begin_[SLICE_SHAPE_MAX_SIZE]; | int32_t begin_[SLICE_SHAPE_MAX_SIZE]; | ||||
| int32_t end_[SLICE_SHAPE_MAX_SIZE]; | int32_t end_[SLICE_SHAPE_MAX_SIZE]; | ||||
| int32_t size_[SLICE_SHAPE_MAX_SIZE]; | int32_t size_[SLICE_SHAPE_MAX_SIZE]; | ||||
| int32_t shape_[SLICE_SHAPE_MAX_SIZE]; | |||||
| // other parameter | |||||
| SliceQuantArg quant_arg_; | |||||
| int32_t param_length_; | int32_t param_length_; | ||||
| } SliceParameter; | } SliceParameter; | ||||
| @@ -20,11 +20,16 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct SoftmaxParameter { | typedef struct SoftmaxParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int32_t axis_; | int32_t axis_; | ||||
| // shape correlative | |||||
| int input_shape_[5]; | |||||
| // other parameter | |||||
| int element_size_; | int element_size_; | ||||
| int n_dim_; | int n_dim_; | ||||
| int input_shape_[5]; | |||||
| } SoftmaxParameter; | } SoftmaxParameter; | ||||
| #endif // MINDSPORE_LITE_NNACL_SOFTMAX_PARAMETER_H_ | #endif // MINDSPORE_LITE_NNACL_SOFTMAX_PARAMETER_H_ | ||||
| @@ -20,8 +20,11 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct SparseToDenseParameter { | typedef struct SparseToDenseParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| bool validate_indices_; | bool validate_indices_; | ||||
| // other parameter | |||||
| int thread_num_; | int thread_num_; | ||||
| } SparseToDenseParameter; | } SparseToDenseParameter; | ||||
| @@ -21,12 +21,17 @@ | |||||
| #include "nnacl/quantization/quantize.h" | #include "nnacl/quantization/quantize.h" | ||||
| #define SPLIT_STRIDES_SIZE 32 | #define SPLIT_STRIDES_SIZE 32 | ||||
| typedef struct SplitParameter { | typedef struct SplitParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| SplitQuantArg quant_arg_; | |||||
| int num_split_; | int num_split_; | ||||
| int *split_sizes_; | int *split_sizes_; | ||||
| int strides_[SPLIT_STRIDES_SIZE]; | |||||
| int split_dim_; | int split_dim_; | ||||
| // shape correlative | |||||
| int strides_[SPLIT_STRIDES_SIZE]; | |||||
| // other parameter | |||||
| SplitQuantArg quant_arg_; | |||||
| int n_dims_; | int n_dims_; | ||||
| int split_count_; | int split_count_; | ||||
| } SplitParameter; | } SplitParameter; | ||||
| @@ -20,6 +20,7 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct SqueezeParameter { | typedef struct SqueezeParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int axes_[8]; | int axes_[8]; | ||||
| } SqueezeParameter; | } SqueezeParameter; | ||||
| @@ -22,17 +22,22 @@ | |||||
| #define SQUEEZE_OFFSET_MAX_SIZE 4 | #define SQUEEZE_OFFSET_MAX_SIZE 4 | ||||
| typedef struct SqueezeParameter { | typedef struct SqueezeParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| SqueezeQuantArg quant_arg; | |||||
| int thread_count_; | |||||
| int thread_id_; | |||||
| int offset_size_; | |||||
| int64_t offset_[SQUEEZE_OFFSET_MAX_SIZE]; | |||||
| int64_t in_offset_[SQUEEZE_OFFSET_MAX_SIZE]; | |||||
| int64_t axis_; | int64_t axis_; | ||||
| // shape correlative | |||||
| const int *in_shape_; | const int *in_shape_; | ||||
| const int *out_shape_; | const int *out_shape_; | ||||
| int offset_size_; | |||||
| int64_t offset_[SQUEEZE_OFFSET_MAX_SIZE]; | |||||
| int64_t in_offset_[SQUEEZE_OFFSET_MAX_SIZE]; | |||||
| int input_dim_; | int input_dim_; | ||||
| // other parameter | |||||
| SqueezeQuantArg quant_arg; | |||||
| int thread_count_; | |||||
| int thread_id_; | |||||
| } SqueezeParameter; | } SqueezeParameter; | ||||
| #endif // MINDSPORE_LITE_NNACL_SQUEEZE_PARAMETER_H_ | #endif // MINDSPORE_LITE_NNACL_SQUEEZE_PARAMETER_H_ | ||||
| @@ -19,6 +19,7 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct StackParameter { | typedef struct StackParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int32_t axis_; | int32_t axis_; | ||||
| } StackParameter; | } StackParameter; | ||||
| @@ -19,14 +19,19 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct StridedSliceParameter { | typedef struct StridedSliceParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int begins_[8]; | int begins_[8]; | ||||
| int ends_[8]; | int ends_[8]; | ||||
| int strides_[8]; | int strides_[8]; | ||||
| int isScale; | int isScale; | ||||
| int num_axes_; | |||||
| // shape correlative | |||||
| int in_shape_length_; | int in_shape_length_; | ||||
| int in_shape_[8]; | int in_shape_[8]; | ||||
| // other parameter | |||||
| int num_axes_; | |||||
| LiteDataType data_type; | LiteDataType data_type; | ||||
| } StridedSliceParameter; | } StridedSliceParameter; | ||||
| @@ -21,9 +21,12 @@ | |||||
| #include "ir/dtype/type_id.h" | #include "ir/dtype/type_id.h" | ||||
| typedef struct TensorListParameter { | typedef struct TensorListParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| mindspore::TypeId shape_type_; | mindspore::TypeId shape_type_; | ||||
| mindspore::TypeId element_dtype_; | mindspore::TypeId element_dtype_; | ||||
| // other parameter | |||||
| int num_element_; | int num_element_; | ||||
| } TensorListParameter; | } TensorListParameter; | ||||
| @@ -22,12 +22,17 @@ | |||||
| #define MAX_TRANSPOSE_DIM_SIZE 5 | #define MAX_TRANSPOSE_DIM_SIZE 5 | ||||
| typedef struct TransposeParameter { | typedef struct TransposeParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int perm_[8]; | int perm_[8]; | ||||
| bool conjugate_; | bool conjugate_; | ||||
| int num_axes_; | |||||
| // shape correlative | |||||
| int strides_[8]; | int strides_[8]; | ||||
| int out_strides_[8]; | int out_strides_[8]; | ||||
| // other parameter | |||||
| int num_axes_; | |||||
| int data_size_; | int data_size_; | ||||
| } TransposeParameter; | } TransposeParameter; | ||||
| @@ -22,17 +22,22 @@ | |||||
| #define UNSQUEEZE_OFFSET_MAX_SIZE 4 | #define UNSQUEEZE_OFFSET_MAX_SIZE 4 | ||||
| typedef struct UnSqueezeParameter { | typedef struct UnSqueezeParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| UnSqueezeQuantArg quant_arg; | |||||
| int thread_count_; | |||||
| int thread_id_; | |||||
| int offset_size_; | |||||
| int64_t offset_[UNSQUEEZE_OFFSET_MAX_SIZE]; | |||||
| int64_t in_offset_[UNSQUEEZE_OFFSET_MAX_SIZE]; | |||||
| int64_t axis_; | int64_t axis_; | ||||
| // shape correlative | |||||
| const int *in_shape_; | const int *in_shape_; | ||||
| const int *out_shape_; | const int *out_shape_; | ||||
| int input_dim_; | int input_dim_; | ||||
| int64_t offset_[UNSQUEEZE_OFFSET_MAX_SIZE]; | |||||
| int64_t in_offset_[UNSQUEEZE_OFFSET_MAX_SIZE]; | |||||
| // other parameter | |||||
| UnSqueezeQuantArg quant_arg; | |||||
| int thread_count_; | |||||
| int thread_id_; | |||||
| int offset_size_; | |||||
| } UnSqueezeParameter; | } UnSqueezeParameter; | ||||
| #endif // MINDSPORE_LITE_NNACL_UNSQUEEZE_PARAMETER_H_ | #endif // MINDSPORE_LITE_NNACL_UNSQUEEZE_PARAMETER_H_ | ||||
| @@ -20,9 +20,12 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct UnstackParameter { | typedef struct UnstackParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| int num_; | int num_; | ||||
| int axis_; | int axis_; | ||||
| // other parameter | |||||
| int pre_dims_; | int pre_dims_; | ||||
| int axis_dim_; | int axis_dim_; | ||||
| int after_dims_; | int after_dims_; | ||||
| @@ -19,7 +19,10 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct { | typedef struct { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| // other parameter | |||||
| int method_; // 0 for bilinear; 1 for nearest | int method_; // 0 for bilinear; 1 for nearest | ||||
| } UpsampleParameter; | } UpsampleParameter; | ||||
| @@ -19,7 +19,10 @@ | |||||
| #include "nnacl/op_base.h" | #include "nnacl/op_base.h" | ||||
| typedef struct WhereParameter { | typedef struct WhereParameter { | ||||
| // primitive parameter | |||||
| OpParameter op_parameter_; | OpParameter op_parameter_; | ||||
| // other parameter | |||||
| int num_; | int num_; | ||||
| int num1_; | int num1_; | ||||
| int num2_; | int num2_; | ||||
| @@ -41,8 +41,8 @@ OpParameter *PopulateSliceParameter(const mindspore::lite::PrimitiveC *primitive | |||||
| } | } | ||||
| slice_param->param_length_ = static_cast<int32_t>(param_begin.size()); | slice_param->param_length_ = static_cast<int32_t>(param_begin.size()); | ||||
| for (int32_t i = 0; i < slice_param->param_length_; ++i) { | for (int32_t i = 0; i < slice_param->param_length_; ++i) { | ||||
| slice_param->begin_[i] = param_begin[i]; | |||||
| slice_param->size_[i] = param_size[i]; | |||||
| slice_param->begin_[i] = param_begin.at(i); | |||||
| slice_param->size_[i] = param_size.at(i); | |||||
| } | } | ||||
| return reinterpret_cast<OpParameter *>(slice_param); | return reinterpret_cast<OpParameter *>(slice_param); | ||||
| } | } | ||||
| @@ -32,8 +32,16 @@ OpParameter *PopulateSpaceToBatchNDParameter(const mindspore::lite::PrimitiveC * | |||||
| space_batch_param_nd->op_parameter_.type_ = primitive->Type(); | space_batch_param_nd->op_parameter_.type_ = primitive->Type(); | ||||
| auto block_sizes = ((mindspore::lite::SpaceToBatchND *)primitive)->GetBlockShape(); | auto block_sizes = ((mindspore::lite::SpaceToBatchND *)primitive)->GetBlockShape(); | ||||
| space_batch_param_nd->m_ = block_sizes.size(); | space_batch_param_nd->m_ = block_sizes.size(); | ||||
| if (block_sizes.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { | |||||
| MS_LOG(ERROR) << "The value of block_sizes.size() is too big"; | |||||
| return nullptr; | |||||
| } | |||||
| memcpy(space_batch_param_nd->block_sizes_, (block_sizes.data()), block_sizes.size() * sizeof(int)); | memcpy(space_batch_param_nd->block_sizes_, (block_sizes.data()), block_sizes.size() * sizeof(int)); | ||||
| auto paddings = ((mindspore::lite::SpaceToBatchND *)primitive)->GetPaddings(); | auto paddings = ((mindspore::lite::SpaceToBatchND *)primitive)->GetPaddings(); | ||||
| if (paddings.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { | |||||
| MS_LOG(ERROR) << "The value of paddings.size() is too big"; | |||||
| return nullptr; | |||||
| } | |||||
| memcpy(space_batch_param_nd->paddings_, (paddings.data()), paddings.size() * sizeof(int)); | memcpy(space_batch_param_nd->paddings_, (paddings.data()), paddings.size() * sizeof(int)); | ||||
| return reinterpret_cast<OpParameter *>(space_batch_param_nd); | return reinterpret_cast<OpParameter *>(space_batch_param_nd); | ||||
| } | } | ||||
| @@ -34,8 +34,16 @@ OpParameter *PopulateSpaceToBatchParameter(const mindspore::lite::PrimitiveC *pr | |||||
| space_batch_param->op_parameter_.type_ = primitive->Type(); | space_batch_param->op_parameter_.type_ = primitive->Type(); | ||||
| auto block_sizes = ((mindspore::lite::SpaceToBatch *)primitive)->BlockSizes(); | auto block_sizes = ((mindspore::lite::SpaceToBatch *)primitive)->BlockSizes(); | ||||
| space_batch_param->m_ = block_sizes.size(); | space_batch_param->m_ = block_sizes.size(); | ||||
| if (block_sizes.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { | |||||
| MS_LOG(ERROR) << "The value of block_sizes.size() is too big"; | |||||
| return nullptr; | |||||
| } | |||||
| memcpy(space_batch_param->block_sizes_, (block_sizes.data()), block_sizes.size() * sizeof(int)); | memcpy(space_batch_param->block_sizes_, (block_sizes.data()), block_sizes.size() * sizeof(int)); | ||||
| auto paddings = ((mindspore::lite::SpaceToBatch *)primitive)->Paddings(); | auto paddings = ((mindspore::lite::SpaceToBatch *)primitive)->Paddings(); | ||||
| if (paddings.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { | |||||
| MS_LOG(ERROR) << "The value of paddings.size() is too big"; | |||||
| return nullptr; | |||||
| } | |||||
| memcpy(space_batch_param->paddings_, (paddings.data()), paddings.size() * sizeof(int)); | memcpy(space_batch_param->paddings_, (paddings.data()), paddings.size() * sizeof(int)); | ||||
| return reinterpret_cast<OpParameter *>(space_batch_param); | return reinterpret_cast<OpParameter *>(space_batch_param); | ||||
| } | } | ||||
| @@ -32,6 +32,10 @@ OpParameter *PopulateSplitParameter(const mindspore::lite::PrimitiveC *primitive | |||||
| auto param = reinterpret_cast<mindspore::lite::Split *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); | auto param = reinterpret_cast<mindspore::lite::Split *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); | ||||
| split_param->op_parameter_.type_ = primitive->Type(); | split_param->op_parameter_.type_ = primitive->Type(); | ||||
| split_param->num_split_ = param->GetNumberSplit(); | split_param->num_split_ = param->GetNumberSplit(); | ||||
| if (split_param->num_split_ > std::numeric_limits<int>::max() / static_cast<int>(sizeof(int))) { | |||||
| MS_LOG(ERROR) << "The value of split_param->num_split_ is too big"; | |||||
| return nullptr; | |||||
| } | |||||
| int *split_sizes = reinterpret_cast<int *>(malloc(split_param->num_split_ * sizeof(int))); | int *split_sizes = reinterpret_cast<int *>(malloc(split_param->num_split_ * sizeof(int))); | ||||
| if (split_sizes == nullptr) { | if (split_sizes == nullptr) { | ||||
| MS_LOG(ERROR) << "malloc split size of SplitParameter failed."; | MS_LOG(ERROR) << "malloc split size of SplitParameter failed."; | ||||
| @@ -35,12 +35,28 @@ OpParameter *PopulateStridedSliceParameter(const mindspore::lite::PrimitiveC *pr | |||||
| auto n_dims = ((lite::StridedSlice *)primitive)->NDims(); | auto n_dims = ((lite::StridedSlice *)primitive)->NDims(); | ||||
| strided_slice_param->num_axes_ = n_dims; | strided_slice_param->num_axes_ = n_dims; | ||||
| auto begin = ((lite::StridedSlice *)primitive)->GetBegins(); | auto begin = ((lite::StridedSlice *)primitive)->GetBegins(); | ||||
| if (begin.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { | |||||
| MS_LOG(ERROR) << "The value of begin.size() is too big"; | |||||
| return nullptr; | |||||
| } | |||||
| memcpy(strided_slice_param->begins_, (begin.data()), begin.size() * sizeof(int)); | memcpy(strided_slice_param->begins_, (begin.data()), begin.size() * sizeof(int)); | ||||
| auto end = ((lite::StridedSlice *)primitive)->GetEnds(); | auto end = ((lite::StridedSlice *)primitive)->GetEnds(); | ||||
| if (end.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { | |||||
| MS_LOG(ERROR) << "The value of end.size() is too big"; | |||||
| return nullptr; | |||||
| } | |||||
| memcpy(strided_slice_param->ends_, (end.data()), end.size() * sizeof(int)); | memcpy(strided_slice_param->ends_, (end.data()), end.size() * sizeof(int)); | ||||
| auto stride = ((lite::StridedSlice *)primitive)->GetStrides(); | auto stride = ((lite::StridedSlice *)primitive)->GetStrides(); | ||||
| if (stride.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { | |||||
| MS_LOG(ERROR) << "The value of stride.size() is too big"; | |||||
| return nullptr; | |||||
| } | |||||
| memcpy(strided_slice_param->strides_, (stride.data()), stride.size() * sizeof(int)); | memcpy(strided_slice_param->strides_, (stride.data()), stride.size() * sizeof(int)); | ||||
| auto in_shape = ((lite::StridedSlice *)primitive)->GetInShape(); | auto in_shape = ((lite::StridedSlice *)primitive)->GetInShape(); | ||||
| if (in_shape.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { | |||||
| MS_LOG(ERROR) << "The value of in_shape.size() is too big"; | |||||
| return nullptr; | |||||
| } | |||||
| memcpy(strided_slice_param->in_shape_, (in_shape.data()), in_shape.size() * sizeof(int)); | memcpy(strided_slice_param->in_shape_, (in_shape.data()), in_shape.size() * sizeof(int)); | ||||
| strided_slice_param->in_shape_length_ = static_cast<int>(in_shape.size()); | strided_slice_param->in_shape_length_ = static_cast<int>(in_shape.size()); | ||||
| return reinterpret_cast<OpParameter *>(strided_slice_param); | return reinterpret_cast<OpParameter *>(strided_slice_param); | ||||
| @@ -44,7 +44,7 @@ OpParameter *PopulateTileParameter(const mindspore::lite::PrimitiveC *primitive) | |||||
| tile_param->multiples_[i] = 1; | tile_param->multiples_[i] = 1; | ||||
| } | } | ||||
| for (size_t i = 0; i < dims.size(); ++i) { | for (size_t i = 0; i < dims.size(); ++i) { | ||||
| tile_param->multiples_[dims[i]] = multiples[i]; | |||||
| tile_param->multiples_[dims.at(i)] = multiples.at(i); | |||||
| } | } | ||||
| #endif | #endif | ||||
| return reinterpret_cast<OpParameter *>(tile_param); | return reinterpret_cast<OpParameter *>(tile_param); | ||||
| @@ -76,7 +76,7 @@ int Reduce::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inp | |||||
| attr->keepDims = GetValue<bool>(prim.GetAttr("keep_dims")); | attr->keepDims = GetValue<bool>(prim.GetAttr("keep_dims")); | ||||
| if (inputs.size() == kAnfPopulaterInputNumTwo) { | if (inputs.size() == kAnfPopulaterInputNumTwo) { | ||||
| auto inputNode = inputs[kAnfPopulaterInputNumOne]; | |||||
| auto inputNode = inputs.at(kAnfPopulaterInputNumOne); | |||||
| MS_ASSERT(inputNode != nullptr); | MS_ASSERT(inputNode != nullptr); | ||||
| if (inputNode->isa<ValueNode>()) { | if (inputNode->isa<ValueNode>()) { | ||||
| auto valueNode = inputNode->cast<ValueNodePtr>(); | auto valueNode = inputNode->cast<ValueNodePtr>(); | ||||
| @@ -178,7 +178,7 @@ int Reduce::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp | |||||
| } | } | ||||
| int begin_axis; | int begin_axis; | ||||
| begin_axis = axes[0] < 0 ? axes[0] + rank : axes[0]; | |||||
| begin_axis = axes.at(0) < 0 ? axes.at(0) + rank : axes.at(0); | |||||
| for (auto i = begin_axis + 1; i < rank; ++i) { | for (auto i = begin_axis + 1; i < rank; ++i) { | ||||
| actual_axes.emplace_back(i); | actual_axes.emplace_back(i); | ||||
| } | } | ||||
| @@ -200,7 +200,8 @@ int Reduce::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp | |||||
| for (size_t i = 0; i < in_shape.size(); i++) { | for (size_t i = 0; i < in_shape.size(); i++) { | ||||
| bool reduce_axis = false; | bool reduce_axis = false; | ||||
| for (size_t idx = 0; idx < num_axes; ++idx) { | for (size_t idx = 0; idx < num_axes; ++idx) { | ||||
| if (static_cast<size_t>(actual_axes[idx]) == i || static_cast<size_t>(actual_axes[idx] + in_shape.size()) == i) { | |||||
| if (static_cast<size_t>(actual_axes.at(idx)) == i || | |||||
| static_cast<size_t>(actual_axes.at(idx) + in_shape.size()) == i) { | |||||
| reduce_axis = true; | reduce_axis = true; | ||||
| break; | break; | ||||
| } | } | ||||
| @@ -210,7 +211,7 @@ int Reduce::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp | |||||
| out_shape.push_back(1); | out_shape.push_back(1); | ||||
| } | } | ||||
| } else { | } else { | ||||
| out_shape.push_back(in_shape[i]); | |||||
| out_shape.push_back(in_shape.at(i)); | |||||
| } | } | ||||
| } | } | ||||
| output->set_shape(out_shape); | output->set_shape(out_shape); | ||||
| @@ -48,7 +48,7 @@ int Reshape::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &in | |||||
| if (this->primitive_->value.value == nullptr) { | if (this->primitive_->value.value == nullptr) { | ||||
| auto attr = new (std::nothrow) schema::ReshapeT(); | auto attr = new (std::nothrow) schema::ReshapeT(); | ||||
| MS_ASSERT(inputs.size() == kAnfPopulaterInputNumThree - 1); | MS_ASSERT(inputs.size() == kAnfPopulaterInputNumThree - 1); | ||||
| auto inputNode = inputs[kAnfPopulaterInputNumTwo - 1]; | |||||
| auto inputNode = inputs.at(kAnfPopulaterInputNumTwo - 1); | |||||
| if (inputNode->isa<ValueNode>()) { | if (inputNode->isa<ValueNode>()) { | ||||
| auto valueNode = inputNode->cast<ValueNodePtr>(); | auto valueNode = inputNode->cast<ValueNodePtr>(); | ||||
| MS_ASSERT(valueNode != nullptr); | MS_ASSERT(valueNode != nullptr); | ||||
| @@ -58,7 +58,7 @@ int Reshape::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &in | |||||
| auto tuple = val->cast<ValueTuplePtr>(); | auto tuple = val->cast<ValueTuplePtr>(); | ||||
| MS_ASSERT(tuple != nullptr); | MS_ASSERT(tuple != nullptr); | ||||
| for (size_t i = 0; i < tuple->size(); ++i) { | for (size_t i = 0; i < tuple->size(); ++i) { | ||||
| auto elem = tuple->value()[i]; | |||||
| auto elem = tuple->value().at(i); | |||||
| MS_ASSERT(elem != nullptr); | MS_ASSERT(elem != nullptr); | ||||
| attr->shape.emplace_back(CastToInt(elem).front()); | attr->shape.emplace_back(CastToInt(elem).front()); | ||||
| } | } | ||||
| @@ -114,7 +114,7 @@ Registry ReshapeRegistry(schema::PrimitiveType_Reshape, ReshapeCreator); | |||||
| int Reshape::CalNewShape(const Tensor *in_tensor, std::vector<int> *out_shape) const { | int Reshape::CalNewShape(const Tensor *in_tensor, std::vector<int> *out_shape) const { | ||||
| size_t in_shape_size = 1; | size_t in_shape_size = 1; | ||||
| for (size_t i = 0; i < in_tensor->shape().size(); i++) { | for (size_t i = 0; i < in_tensor->shape().size(); i++) { | ||||
| in_shape_size *= in_tensor->shape()[i]; | |||||
| in_shape_size *= in_tensor->shape().at(i); | |||||
| } | } | ||||
| int64_t inferIndex = -1; | int64_t inferIndex = -1; | ||||
| size_t out_shapeSize = 1; | size_t out_shapeSize = 1; | ||||
| @@ -154,14 +154,14 @@ void CalShape(const T *data, const std::vector<Tensor *> &inputs, std::vector<in | |||||
| if (static_cast<int>(data[i]) == -1) { | if (static_cast<int>(data[i]) == -1) { | ||||
| index = i; | index = i; | ||||
| } else if (static_cast<int>(data[i]) == 0) { | } else if (static_cast<int>(data[i]) == 0) { | ||||
| size *= inputs[0]->shape()[i]; | |||||
| size *= inputs[0]->shape().at(i); | |||||
| } else { | } else { | ||||
| size *= data[i]; | size *= data[i]; | ||||
| } | } | ||||
| out_shape->push_back(data[i]); | out_shape->push_back(data[i]); | ||||
| } | } | ||||
| if (static_cast<int>(data[index]) == -1) { | if (static_cast<int>(data[index]) == -1) { | ||||
| (*out_shape)[index] = input_count / size; | |||||
| (*out_shape).at(index) = input_count / size; | |||||
| } | } | ||||
| } | } | ||||
| int Reshape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { | int Reshape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { | ||||
| @@ -219,7 +219,7 @@ int Reshape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out | |||||
| } | } | ||||
| } else if (inputs_.size() == kSingleNum) { | } else if (inputs_.size() == kSingleNum) { | ||||
| for (size_t i = 0; i < GetShape().size(); ++i) { | for (size_t i = 0; i < GetShape().size(); ++i) { | ||||
| out_shape.push_back(GetShape()[i]); | |||||
| out_shape.push_back(GetShape().at(i)); | |||||
| } | } | ||||
| } else { | } else { | ||||
| MS_LOG(ERROR) << "inputs tensor size invalid."; | MS_LOG(ERROR) << "inputs tensor size invalid."; | ||||
| @@ -68,8 +68,8 @@ int Resize::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inp | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| std::vector<int> targetSize = CastToInt(prim.GetAttr("size")); | std::vector<int> targetSize = CastToInt(prim.GetAttr("size")); | ||||
| attr->newHeight = targetSize[0]; | |||||
| attr->newWidth = targetSize[1]; | |||||
| attr->newHeight = targetSize.at(0); | |||||
| attr->newWidth = targetSize.at(1); | |||||
| attr->alignCorners = GetValue<bool>(prim.GetAttr("align_corners")); | attr->alignCorners = GetValue<bool>(prim.GetAttr("align_corners")); | ||||
| this->primitive_->value.value = attr; | this->primitive_->value.value = attr; | ||||
| @@ -57,7 +57,7 @@ int Rfft::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| auto input_shape = input->shape(); | auto input_shape = input->shape(); | ||||
| input_shape[input_shape.size() - 1] = GetFftLength() / 2 + 1; | |||||
| input_shape.at(input_shape.size() - 1) = GetFftLength() / 2 + 1; | |||||
| input_shape.push_back(2); | input_shape.push_back(2); | ||||
| outputs_.front()->set_shape(input_shape); | outputs_.front()->set_shape(input_shape); | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -86,16 +86,17 @@ int Sgd::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| if (inputs[0]->ElementsNum() != inputs[1]->ElementsNum() || inputs[0]->ElementsNum() != inputs[3]->ElementsNum() || | |||||
| inputs[2]->ElementsNum() != 1 || inputs[4]->ElementsNum() != 1) { | |||||
| if (inputs.at(0)->ElementsNum() != inputs.at(1)->ElementsNum() || | |||||
| inputs.at(0)->ElementsNum() != inputs.at(3)->ElementsNum() || inputs.at(2)->ElementsNum() != 1 || | |||||
| inputs.at(4)->ElementsNum() != 1) { | |||||
| MS_LOG(ERROR) << "error input data size!"; | MS_LOG(ERROR) << "error input data size!"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| if (!outputs.empty()) { | if (!outputs.empty()) { | ||||
| auto *out = outputs.front(); | auto *out = outputs.front(); | ||||
| MS_ASSERT(out != nullptr); | MS_ASSERT(out != nullptr); | ||||
| out->set_data_type(inputs[0]->data_type()); | |||||
| out->set_format(inputs[0]->format()); | |||||
| out->set_data_type(inputs.at(0)->data_type()); | |||||
| out->set_format(inputs.at(0)->format()); | |||||
| out->set_shape({1}); | out->set_shape({1}); | ||||
| } | } | ||||
| @@ -77,7 +77,7 @@ int Slice::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inpu | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| auto sizeNode = inputs[kAnfPopulaterInputNumTwo]; | |||||
| auto sizeNode = inputs.at(kAnfPopulaterInputNumTwo); | |||||
| MS_ASSERT(sizeNode != nullptr); | MS_ASSERT(sizeNode != nullptr); | ||||
| if (sizeNode->isa<ValueNode>()) { | if (sizeNode->isa<ValueNode>()) { | ||||
| auto valueNode = sizeNode->cast<ValueNodePtr>(); | auto valueNode = sizeNode->cast<ValueNodePtr>(); | ||||
| @@ -172,8 +172,8 @@ int Slice::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tens | |||||
| return RET_PARAM_INVALID; | return RET_PARAM_INVALID; | ||||
| } | } | ||||
| auto input = inputs.at(0); | auto input = inputs.at(0); | ||||
| outputs[0]->set_data_type(input->data_type()); | |||||
| outputs[0]->set_format(input->format()); | |||||
| outputs.at(0)->set_data_type(input->data_type()); | |||||
| outputs.at(0)->set_format(input->format()); | |||||
| if (!infer_flag()) { | if (!infer_flag()) { | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -191,7 +191,7 @@ int Slice::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tens | |||||
| if (slice_size.empty() && inputs.at(2)->data_c() != nullptr) { | if (slice_size.empty() && inputs.at(2)->data_c() != nullptr) { | ||||
| for (int i = 0; i < inputs.at(2)->ElementsNum(); i++) { | for (int i = 0; i < inputs.at(2)->ElementsNum(); i++) { | ||||
| auto end = static_cast<int *>(inputs.at(2)->data_c())[i]; | auto end = static_cast<int *>(inputs.at(2)->data_c())[i]; | ||||
| auto size = end < 0 ? end : (end == INT32_MAX ? -1 : end - slice_begin[i]); | |||||
| auto size = end < 0 ? end : (end == INT32_MAX ? -1 : end - slice_begin.at(i)); | |||||
| slice_size.emplace_back(size); | slice_size.emplace_back(size); | ||||
| } | } | ||||
| } | } | ||||
| @@ -208,32 +208,32 @@ int Slice::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tens | |||||
| begin.assign(input_shape.size(), 0); | begin.assign(input_shape.size(), 0); | ||||
| size.assign(input_shape.size(), -1); | size.assign(input_shape.size(), -1); | ||||
| for (size_t i = 0; i < slice_axes.size(); ++i) { | for (size_t i = 0; i < slice_axes.size(); ++i) { | ||||
| begin[slice_axes[i]] = slice_begin[i]; | |||||
| size[slice_axes[i]] = slice_size[i]; | |||||
| begin.at(slice_axes.at(i)) = slice_begin.at(i); | |||||
| size.at(slice_axes.at(i)) = slice_size.at(i); | |||||
| } | } | ||||
| for (size_t i = 0; i < input_shape.size(); ++i) { | for (size_t i = 0; i < input_shape.size(); ++i) { | ||||
| if (size[i] < 0 && size[i] != -1) { | |||||
| MS_LOG(ERROR) << "Invalid size input!size[" << i << "]=" << size[i]; | |||||
| if (size.at(i) < 0 && size.at(i) != -1) { | |||||
| MS_LOG(ERROR) << "Invalid size input!size[" << i << "]=" << size.at(i); | |||||
| return RET_PARAM_INVALID; | return RET_PARAM_INVALID; | ||||
| } | } | ||||
| if (begin[i] < 0) { | |||||
| MS_LOG(ERROR) << "Invalid begin input " << begin[i] << " which should be >= 0"; | |||||
| if (begin.at(i) < 0) { | |||||
| MS_LOG(ERROR) << "Invalid begin input " << begin.at(i) << " which should be >= 0"; | |||||
| return RET_PARAM_INVALID; | return RET_PARAM_INVALID; | ||||
| } | } | ||||
| if (input_shape[i] <= begin[i]) { | |||||
| MS_LOG(ERROR) << "Invalid begin input!begin[" << i << "]=" << begin[i] | |||||
| << " which should be <= " << input_shape[i]; | |||||
| if (input_shape.at(i) <= begin.at(i)) { | |||||
| MS_LOG(ERROR) << "Invalid begin input!begin[" << i << "]=" << begin.at(i) | |||||
| << " which should be <= " << input_shape.at(i); | |||||
| return RET_PARAM_INVALID; | return RET_PARAM_INVALID; | ||||
| } | } | ||||
| if (size[i] > (input_shape[i] - begin[i])) { | |||||
| MS_LOG(ERROR) << "Invalid size input " << size[i] << " which should be <= " << input_shape[i] - begin[i]; | |||||
| if (size.at(i) > (input_shape.at(i) - begin.at(i))) { | |||||
| MS_LOG(ERROR) << "Invalid size input " << size.at(i) << " which should be <= " << input_shape.at(i) - begin.at(i); | |||||
| return RET_PARAM_INVALID; | return RET_PARAM_INVALID; | ||||
| } | } | ||||
| output_shape[i] = size[i] < 0 ? input_shape[i] - begin[i] : size[i]; | |||||
| output_shape.at(i) = size.at(i) < 0 ? input_shape.at(i) - begin.at(i) : size.at(i); | |||||
| } | } | ||||
| outputs[0]->set_shape(output_shape); | |||||
| outputs.at(0)->set_shape(output_shape); | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| } // namespace lite | } // namespace lite | ||||
| @@ -96,8 +96,8 @@ int SpaceToBatchND::InferShape(std::vector<lite::Tensor *> inputs, std::vector<l | |||||
| MS_LOG(ERROR) << "space_to_batch_nd only support NHWC now!"; | MS_LOG(ERROR) << "space_to_batch_nd only support NHWC now!"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| outputs[0]->set_data_type(input->data_type()); | |||||
| outputs[0]->set_format(input->format()); | |||||
| outputs.at(0)->set_data_type(input->data_type()); | |||||
| outputs.at(0)->set_format(input->format()); | |||||
| if (!infer_flag()) { | if (!infer_flag()) { | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -112,16 +112,28 @@ int SpaceToBatchND::InferShape(std::vector<lite::Tensor *> inputs, std::vector<l | |||||
| int padding_right = 0; | int padding_right = 0; | ||||
| int block_w = 1; | int block_w = 1; | ||||
| if (block_shape.size() == 2) { | if (block_shape.size() == 2) { | ||||
| padding_left = padding[2]; | |||||
| padding_right = padding[3]; | |||||
| block_w = block_shape[1]; | |||||
| padding_left = padding.at(2); | |||||
| padding_right = padding.at(3); | |||||
| block_w = block_shape.at(1); | |||||
| } | } | ||||
| std::vector<int32_t> output_shape(input_shape.size()); | std::vector<int32_t> output_shape(input_shape.size()); | ||||
| output_shape[NHWC_N] = input_shape[NHWC_N] * block_shape[0] * block_w; | |||||
| output_shape[NHWC_H] = (input_shape[NHWC_H] + padding[0] + padding[1]) / block_shape[0]; | |||||
| output_shape[NHWC_W] = (input_shape[NHWC_W] + padding_left + padding_right) / block_w; | |||||
| output_shape[NHWC_C] = input_shape[NHWC_C]; | |||||
| outputs[0]->set_shape(output_shape); | |||||
| if (block_shape.at(0) * block_w > std::numeric_limits<int>::max() / input_shape.at(NHWC_N)) { | |||||
| MS_LOG(ERROR) << "The value of block_shape.at(0) * block_w is too big"; | |||||
| return RET_ERROR; | |||||
| } | |||||
| output_shape.at(NHWC_N) = input_shape.at(NHWC_N) * block_shape.at(0) * block_w; | |||||
| if (padding.at(0) + padding.at(1) > std::numeric_limits<int>::max() - input_shape.at(NHWC_H)) { | |||||
| MS_LOG(ERROR) << "The value of padding.at(0) + padding.at(1) is too big"; | |||||
| return RET_ERROR; | |||||
| } | |||||
| output_shape.at(NHWC_H) = (input_shape.at(NHWC_H) + padding.at(0) + padding.at(1)) / block_shape.at(0); | |||||
| if (padding_left + padding_right > std::numeric_limits<int>::max() - input_shape.at(NHWC_W)) { | |||||
| MS_LOG(ERROR) << "The value of padding_left + padding_right is too big"; | |||||
| return RET_ERROR; | |||||
| } | |||||
| output_shape.at(NHWC_W) = (input_shape.at(NHWC_W) + padding_left + padding_right) / block_w; | |||||
| output_shape.at(NHWC_C) = input_shape.at(NHWC_C); | |||||
| outputs.at(0)->set_shape(output_shape); | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| } // namespace lite | } // namespace lite | ||||
| @@ -71,8 +71,8 @@ int SpaceToDepth::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lit | |||||
| MS_LOG(ERROR) << "space_to_depth only support NHWC now!"; | MS_LOG(ERROR) << "space_to_depth only support NHWC now!"; | ||||
| return 1; | return 1; | ||||
| } | } | ||||
| outputs[0]->set_format(input->format()); | |||||
| outputs[0]->set_data_type(input->data_type()); | |||||
| outputs.at(0)->set_format(input->format()); | |||||
| outputs.at(0)->set_data_type(input->data_type()); | |||||
| if (!infer_flag()) { | if (!infer_flag()) { | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -83,17 +83,21 @@ int SpaceToDepth::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lit | |||||
| } | } | ||||
| int32_t block_size = GetBlockSize(); | int32_t block_size = GetBlockSize(); | ||||
| if (input_shape[NHWC_H] % block_size != 0 || input_shape[NHWC_H] == 0 || input_shape[NHWC_W] % block_size != 0 || | |||||
| input_shape[NHWC_W] == 0) { | |||||
| if (input_shape.at(NHWC_H) % block_size != 0 || input_shape.at(NHWC_H) == 0 || | |||||
| input_shape.at(NHWC_W) % block_size != 0 || input_shape.at(NHWC_W) == 0) { | |||||
| MS_LOG(ERROR) << "input dimension h or w size error!"; | MS_LOG(ERROR) << "input dimension h or w size error!"; | ||||
| return 1; | return 1; | ||||
| } | } | ||||
| std::vector<int32_t> output_shape(input_shape.size()); | std::vector<int32_t> output_shape(input_shape.size()); | ||||
| output_shape[NHWC_N] = input_shape[NHWC_N]; | |||||
| output_shape[NHWC_H] = input_shape[NHWC_H] / block_size; | |||||
| output_shape[NHWC_W] = input_shape[NHWC_W] / block_size; | |||||
| output_shape[NHWC_C] = input_shape[NHWC_C] * (block_size * block_size); | |||||
| outputs[0]->set_shape(output_shape); | |||||
| output_shape.at(NHWC_N) = input_shape.at(NHWC_N); | |||||
| output_shape.at(NHWC_H) = input_shape.at(NHWC_H) / block_size; | |||||
| output_shape.at(NHWC_W) = input_shape.at(NHWC_W) / block_size; | |||||
| if (block_size * block_size > std::numeric_limits<int32_t>::max() / input_shape.at(NHWC_C)) { | |||||
| MS_LOG(ERROR) << "The value of block_size * block_size is too big"; | |||||
| return RET_ERROR; | |||||
| } | |||||
| output_shape.at(NHWC_C) = input_shape.at(NHWC_C) * (block_size * block_size); | |||||
| outputs.at(0)->set_shape(output_shape); | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| } // namespace lite | } // namespace lite | ||||
| @@ -51,8 +51,8 @@ int SparseToDense::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| auto input2 = inputs_.at(2); | auto input2 = inputs_.at(2); | ||||
| outputs_[0]->set_data_type(input2->data_type()); | |||||
| outputs_[0]->set_format(input2->format()); | |||||
| outputs_.at(0)->set_data_type(input2->data_type()); | |||||
| outputs_.at(0)->set_format(input2->format()); | |||||
| if (!infer_flag()) { | if (!infer_flag()) { | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -67,7 +67,7 @@ int SparseToDense::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor | |||||
| for (int i = 0; i < input1->ElementsNum(); i++) { | for (int i = 0; i < input1->ElementsNum(); i++) { | ||||
| output_shape.push_back(input1_data[i]); | output_shape.push_back(input1_data[i]); | ||||
| } | } | ||||
| outputs_[0]->set_shape(output_shape); | |||||
| outputs_.at(0)->set_shape(output_shape); | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| } // namespace lite | } // namespace lite | ||||
| @@ -119,8 +119,8 @@ int Split::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outpu | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| for (int i = 0; i < number_split; ++i) { | for (int i = 0; i < number_split; ++i) { | ||||
| outputs_[i]->set_data_type(input->data_type()); | |||||
| outputs_[i]->set_format(input->format()); | |||||
| outputs_.at(i)->set_data_type(input->data_type()); | |||||
| outputs_.at(i)->set_format(input->format()); | |||||
| } | } | ||||
| if (!infer_flag()) { | if (!infer_flag()) { | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -129,26 +129,26 @@ int Split::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outpu | |||||
| std::vector<int> input_shape = input->shape(); | std::vector<int> input_shape = input->shape(); | ||||
| std::vector<int> size_split; | std::vector<int> size_split; | ||||
| for (size_t i = 0; i < GetSizeSplits().size(); ++i) { | for (size_t i = 0; i < GetSizeSplits().size(); ++i) { | ||||
| size_split.push_back(GetSizeSplits()[i]); | |||||
| size_split.push_back(GetSizeSplits().at(i)); | |||||
| } | } | ||||
| for (int i = 0; i < number_split; ++i) { | for (int i = 0; i < number_split; ++i) { | ||||
| std::vector<int> output_shape; | std::vector<int> output_shape; | ||||
| output_shape.insert(output_shape.begin(), input_shape.begin(), input_shape.end()); | output_shape.insert(output_shape.begin(), input_shape.begin(), input_shape.end()); | ||||
| int split_dim_i = input_shape[split_dim]; | |||||
| int split_dim_i = input_shape.at(split_dim); | |||||
| // support split size is -1 in the end. | // support split size is -1 in the end. | ||||
| if (size_split.empty()) { | if (size_split.empty()) { | ||||
| split_dim_i = input_shape[split_dim] / number_split; | |||||
| } else if (i == number_split - 1 && size_split[i] == -1) { | |||||
| split_dim_i = input_shape.at(split_dim) / number_split; | |||||
| } else if (i == number_split - 1 && size_split.at(i) == -1) { | |||||
| for (size_t j = 0; j < size_split.size() - 1; ++j) { | for (size_t j = 0; j < size_split.size() - 1; ++j) { | ||||
| split_dim_i -= size_split[j]; | |||||
| split_dim_i -= size_split.at(j); | |||||
| } | } | ||||
| } else { | } else { | ||||
| split_dim_i = size_split[i]; | |||||
| split_dim_i = size_split.at(i); | |||||
| } | } | ||||
| output_shape[split_dim] = split_dim_i; | |||||
| outputs_[i]->set_shape(output_shape); | |||||
| outputs_[i]->set_data_type(input->data_type()); | |||||
| outputs_[i]->set_format(input->format()); | |||||
| output_shape.at(split_dim) = split_dim_i; | |||||
| outputs_.at(i)->set_shape(output_shape); | |||||
| outputs_.at(i)->set_data_type(input->data_type()); | |||||
| outputs_.at(i)->set_format(input->format()); | |||||
| } | } | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -117,19 +117,19 @@ int Squeeze::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out | |||||
| } | } | ||||
| if (axes_.size() == 0) { | if (axes_.size() == 0) { | ||||
| for (size_t i = 0; i < in_shape.size(); i++) { | for (size_t i = 0; i < in_shape.size(); i++) { | ||||
| if (in_shape[i] != 1) { | |||||
| out_shape.push_back(in_shape[i]); | |||||
| if (in_shape.at(i) != 1) { | |||||
| out_shape.push_back(in_shape.at(i)); | |||||
| } | } | ||||
| } | } | ||||
| } else { | } else { | ||||
| size_t axisIdx = 0; | size_t axisIdx = 0; | ||||
| for (size_t i = 0; i < in_shape.size(); i++) { | for (size_t i = 0; i < in_shape.size(); i++) { | ||||
| if (axisIdx < axes_.size() && axes_[axisIdx] == static_cast<int>(i)) { | |||||
| MS_ASSERT(in_shape[i] == 1); | |||||
| if (axisIdx < axes_.size() && axes_.at(axisIdx) == static_cast<int>(i)) { | |||||
| MS_ASSERT(in_shape.at(i) == 1); | |||||
| axisIdx++; | axisIdx++; | ||||
| continue; | continue; | ||||
| } else { | } else { | ||||
| out_shape.push_back(in_shape[i]); | |||||
| out_shape.push_back(in_shape.at(i)); | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -80,8 +80,8 @@ int Stack::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> output | |||||
| } | } | ||||
| auto input = inputs.at(0); | auto input = inputs.at(0); | ||||
| auto input0_data_type = input->data_type(); | auto input0_data_type = input->data_type(); | ||||
| outputs[0]->set_data_type(input0_data_type); | |||||
| outputs[0]->set_format(input->format()); | |||||
| outputs.at(0)->set_data_type(input0_data_type); | |||||
| outputs.at(0)->set_format(input->format()); | |||||
| if (!infer_flag()) { | if (!infer_flag()) { | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -95,25 +95,25 @@ int Stack::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> output | |||||
| } | } | ||||
| for (size_t i = 1; i < inputs.size(); ++i) { | for (size_t i = 1; i < inputs.size(); ++i) { | ||||
| auto input_shape_tmp = inputs[i]->shape(); | |||||
| auto input_shape_tmp = inputs.at(i)->shape(); | |||||
| if (input_shape_tmp.size() != input_shape.size()) { | if (input_shape_tmp.size() != input_shape.size()) { | ||||
| MS_LOG(ERROR) << "All input shape size should be the same!"; | MS_LOG(ERROR) << "All input shape size should be the same!"; | ||||
| return RET_PARAM_INVALID; | return RET_PARAM_INVALID; | ||||
| } | } | ||||
| for (size_t j = 0; j < input_shape.size(); ++j) { | for (size_t j = 0; j < input_shape.size(); ++j) { | ||||
| if (input_shape_tmp[j] != input_shape[j]) { | |||||
| if (input_shape_tmp.at(j) != input_shape.at(j)) { | |||||
| MS_LOG(ERROR) << "All input shape should be the same!"; | MS_LOG(ERROR) << "All input shape should be the same!"; | ||||
| return RET_PARAM_INVALID; | return RET_PARAM_INVALID; | ||||
| } | } | ||||
| } | } | ||||
| if (inputs[i]->data_type() != input0_data_type) { | |||||
| if (inputs.at(i)->data_type() != input0_data_type) { | |||||
| MS_LOG(ERROR) << "All input shuld have the same data type!input[" << i | MS_LOG(ERROR) << "All input shuld have the same data type!input[" << i | ||||
| << "] data type = " << inputs[i]->data_type(); | |||||
| << "] data type = " << inputs.at(i)->data_type(); | |||||
| return RET_PARAM_INVALID; | return RET_PARAM_INVALID; | ||||
| } | } | ||||
| } | } | ||||
| output_shape.insert(output_shape.begin() + axis, inputs.size()); | output_shape.insert(output_shape.begin() + axis, inputs.size()); | ||||
| outputs[0]->set_shape(output_shape); | |||||
| outputs.at(0)->set_shape(output_shape); | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| } // namespace lite | } // namespace lite | ||||
| @@ -179,7 +179,7 @@ constexpr size_t kStridedSliceMultiInputNumMax = 5; | |||||
| } // namespace | } // namespace | ||||
| bool StridedSlice::CheckInputs(std::vector<lite::Tensor *> inputs_) { | bool StridedSlice::CheckInputs(std::vector<lite::Tensor *> inputs_) { | ||||
| for (size_t i = 1; i < inputs_.size(); ++i) { | for (size_t i = 1; i < inputs_.size(); ++i) { | ||||
| if (inputs_[i]->data_c() == nullptr) { | |||||
| if (inputs_.at(i)->data_c() == nullptr) { | |||||
| MS_LOG(DEBUG) << "strided_slice has input from other node, which only can be obtained when running."; | MS_LOG(DEBUG) << "strided_slice has input from other node, which only can be obtained when running."; | ||||
| return false; | return false; | ||||
| } | } | ||||
| @@ -309,8 +309,8 @@ int StridedSlice::HandleAxesInputExist(const std::vector<lite::Tensor *> &inputs | |||||
| } else { | } else { | ||||
| axes.assign(axes_data, axes_data + begin_ndim); | axes.assign(axes_data, axes_data + begin_ndim); | ||||
| for (int i = 0; i < begin_ndim; ++i) { | for (int i = 0; i < begin_ndim; ++i) { | ||||
| if (axes[i] < 0) { | |||||
| axes[i] += ndim_; | |||||
| if (axes.at(i) < 0) { | |||||
| axes.at(i) += ndim_; | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -321,20 +321,20 @@ int StridedSlice::HandleAxesInputExist(const std::vector<lite::Tensor *> &inputs | |||||
| strides_.assign(ndim_, 0); | strides_.assign(ndim_, 0); | ||||
| auto input_shape = input_tensor->shape(); | auto input_shape = input_tensor->shape(); | ||||
| for (int i = 0; i < ndim_; ++i) { | for (int i = 0; i < ndim_; ++i) { | ||||
| in_shape_[i] = input_shape.at(i); | |||||
| in_shape_.at(i) = input_shape.at(i); | |||||
| } | } | ||||
| for (int i = 0; i < ndim_; ++i) { | for (int i = 0; i < ndim_; ++i) { | ||||
| auto axes_it = std::find(axes.begin(), axes.end(), i); | auto axes_it = std::find(axes.begin(), axes.end(), i); | ||||
| if (axes_it != axes.end()) { | if (axes_it != axes.end()) { | ||||
| auto axis = axes_it - axes.begin(); | auto axis = axes_it - axes.begin(); | ||||
| // begins or ends exceed limit will be set to limit | // begins or ends exceed limit will be set to limit | ||||
| begins_[i] = std::max(std::min(begin_data[axis], input_shape[i] - 1), -input_shape[i]); | |||||
| ends_[i] = std::max(std::min(end_data[axis], input_shape[i]), -input_shape[i] - 1); | |||||
| strides_[i] = stride_data[axis]; | |||||
| begins_.at(i) = std::max(std::min(begin_data[axis], input_shape.at(i) - 1), -input_shape.at(i)); | |||||
| ends_.at(i) = std::max(std::min(end_data[axis], input_shape.at(i)), -input_shape.at(i) - 1); | |||||
| strides_.at(i) = stride_data[axis]; | |||||
| } else { | } else { | ||||
| begins_[i] = 0; | |||||
| ends_[i] = input_shape[i]; | |||||
| strides_[i] = 1; | |||||
| begins_.at(i) = 0; | |||||
| ends_.at(i) = input_shape.at(i); | |||||
| strides_.at(i) = 1; | |||||
| } | } | ||||
| } | } | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -353,7 +353,7 @@ int StridedSlice::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lit | |||||
| } | } | ||||
| auto input = inputs.at(0); | auto input = inputs.at(0); | ||||
| outputs.front()->set_data_type(input->data_type()); | outputs.front()->set_data_type(input->data_type()); | ||||
| outputs[0]->set_format(input->format()); | |||||
| outputs.at(0)->set_format(input->format()); | |||||
| MS_ASSERT(input != nullptr); | MS_ASSERT(input != nullptr); | ||||
| auto input_shape = input->shape(); | auto input_shape = input->shape(); | ||||
| auto inferflag = infer_flag(); | auto inferflag = infer_flag(); | ||||
| @@ -369,9 +369,9 @@ int StridedSlice::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lit | |||||
| if (inferflag) { | if (inferflag) { | ||||
| in_shape_.emplace_back(input_shape.at(i)); | in_shape_.emplace_back(input_shape.at(i)); | ||||
| } | } | ||||
| begins_.emplace_back((GetBegin())[i]); | |||||
| ends_.emplace_back((GetEnd())[i]); | |||||
| strides_.emplace_back((GetStride())[i]); | |||||
| begins_.emplace_back((GetBegin()).at(i)); | |||||
| ends_.emplace_back((GetEnd()).at(i)); | |||||
| strides_.emplace_back((GetStride()).at(i)); | |||||
| } | } | ||||
| } | } | ||||
| if (!CheckInputs(inputs)) { | if (!CheckInputs(inputs)) { | ||||
| @@ -31,10 +31,10 @@ int TensorListFromTensor::InferShape(std::vector<lite::Tensor *> inputs_, std::v | |||||
| // outputs0: vector<tensor>.size() dtype | // outputs0: vector<tensor>.size() dtype | ||||
| // outputs1: element_shape | // outputs1: element_shape | ||||
| // outputs2-n: vector<tensor> | // outputs2-n: vector<tensor> | ||||
| auto input = inputs_[0]; | |||||
| auto input = inputs_.at(0); | |||||
| MS_ASSERT(input != nullptr); | MS_ASSERT(input != nullptr); | ||||
| std::vector<int> in_shape = input->shape(); | std::vector<int> in_shape = input->shape(); | ||||
| int dim0 = in_shape[0]; | |||||
| int dim0 = in_shape.at(0); | |||||
| if (dim0 <= 0) { | if (dim0 <= 0) { | ||||
| MS_LOG(ERROR) << "inputs_[0] dim0:" << dim0 << " must greater than 0"; | MS_LOG(ERROR) << "inputs_[0] dim0:" << dim0 << " must greater than 0"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| @@ -46,29 +46,29 @@ int TensorListFromTensor::InferShape(std::vector<lite::Tensor *> inputs_, std::v | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| for (int i = 0; i < dim0; ++i) { | for (int i = 0; i < dim0; ++i) { | ||||
| auto output = outputs_[i + 2]; | |||||
| auto output = outputs_.at(i + 2); | |||||
| MS_ASSERT(output != nullptr); | MS_ASSERT(output != nullptr); | ||||
| output->set_data_type(input->data_type()); | output->set_data_type(input->data_type()); | ||||
| output->set_shape(out_shape); | output->set_shape(out_shape); | ||||
| } | } | ||||
| auto output = outputs_[0]; // vector<tensor>.size(), tensorlist.dtype | |||||
| auto output = outputs_.at(0); // vector<tensor>.size(), tensorlist.dtype | |||||
| MS_ASSERT(output != nullptr); | MS_ASSERT(output != nullptr); | ||||
| output->set_data_type(kNumberTypeInt); | output->set_data_type(kNumberTypeInt); | ||||
| output->set_shape(std::vector<int>(1, 2)); // one element.value = 2 | output->set_shape(std::vector<int>(1, 2)); // one element.value = 2 | ||||
| output = outputs_[1]; // element_shape tensor | |||||
| output = outputs_.at(1); // element_shape tensor | |||||
| MS_ASSERT(output != nullptr); | MS_ASSERT(output != nullptr); | ||||
| output->set_data_type(inputs_[1]->data_type()); | |||||
| output->set_format(inputs_[1]->format()); | |||||
| output->set_shape(inputs_[1]->shape()); | |||||
| output->set_data_type(inputs_.at(1)->data_type()); | |||||
| output->set_format(inputs_.at(1)->format()); | |||||
| output->set_shape(inputs_.at(1)->shape()); | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| bool TensorListGetItem::IsFullyDefined(const std::vector<int> &shape) const { | bool TensorListGetItem::IsFullyDefined(const std::vector<int> &shape) const { | ||||
| for (size_t i = 0; i < shape.size(); ++i) { | for (size_t i = 0; i < shape.size(); ++i) { | ||||
| if (shape[i] < 0) { | |||||
| if (shape.at(i) < 0) { | |||||
| return false; | return false; | ||||
| } | } | ||||
| } | } | ||||
| @@ -77,21 +77,21 @@ bool TensorListGetItem::IsFullyDefined(const std::vector<int> &shape) const { | |||||
| int TensorListGetItem::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { | int TensorListGetItem::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { | ||||
| int in_vec_size = inputs_.size(); | int in_vec_size = inputs_.size(); | ||||
| auto input0 = inputs_[0]; | |||||
| auto input0 = inputs_.at(0); | |||||
| MS_ASSERT(input0 != nullptr); | MS_ASSERT(input0 != nullptr); | ||||
| auto in0_ptr = reinterpret_cast<int *>(input0->data_c()); | auto in0_ptr = reinterpret_cast<int *>(input0->data_c()); | ||||
| if (in_vec_size != in0_ptr[0] + 4) { | if (in_vec_size != in0_ptr[0] + 4) { | ||||
| MS_LOG(ERROR) << "inputs_.size():" << in_vec_size << " must be equal to:" << in0_ptr[0] + 4; | MS_LOG(ERROR) << "inputs_.size():" << in_vec_size << " must be equal to:" << in0_ptr[0] + 4; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| auto get_index = inputs_[in0_ptr[0] + 2]; | |||||
| auto get_index = inputs_.at(in0_ptr[0] + 2); | |||||
| MS_ASSERT(get_index != nullptr); | MS_ASSERT(get_index != nullptr); | ||||
| index_ = reinterpret_cast<int *>(get_index->data_c())[0]; | index_ = reinterpret_cast<int *>(get_index->data_c())[0]; | ||||
| if (index_ < 0 || index_ > in0_ptr[0]) { | if (index_ < 0 || index_ > in0_ptr[0]) { | ||||
| MS_LOG(ERROR) << "index_:" << index_ << "must in [0, " << in0_ptr[0] << "]"; | MS_LOG(ERROR) << "index_:" << index_ << "must in [0, " << in0_ptr[0] << "]"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| auto input_index = inputs_[index_ + 2]; | |||||
| auto input_index = inputs_.at(index_ + 2); | |||||
| MS_ASSERT(input_index != nullptr); | MS_ASSERT(input_index != nullptr); | ||||
| auto output = outputs_.front(); | auto output = outputs_.front(); | ||||
| MS_ASSERT(output != nullptr); | MS_ASSERT(output != nullptr); | ||||
| @@ -100,7 +100,7 @@ int TensorListGetItem::InferShape(std::vector<lite::Tensor *> inputs_, std::vect | |||||
| output->set_data_type(input_index->data_type()); | output->set_data_type(input_index->data_type()); | ||||
| output->set_shape(input_index->shape()); | output->set_shape(input_index->shape()); | ||||
| } else { | } else { | ||||
| auto ele_shape_tensor = inputs_[in0_ptr[0] + 3]; | |||||
| auto ele_shape_tensor = inputs_.at(in0_ptr[0] + 3); | |||||
| MS_ASSERT(ele_shape_tensor != nullptr); | MS_ASSERT(ele_shape_tensor != nullptr); | ||||
| auto ele_shape_type = ele_shape_tensor->data_type(); | auto ele_shape_type = ele_shape_tensor->data_type(); | ||||
| if (ele_shape_type != kNumberTypeInt) { | if (ele_shape_type != kNumberTypeInt) { | ||||
| @@ -114,11 +114,11 @@ int TensorListGetItem::InferShape(std::vector<lite::Tensor *> inputs_, std::vect | |||||
| } | } | ||||
| if (!IsFullyDefined(element_shape_)) { | if (!IsFullyDefined(element_shape_)) { | ||||
| for (int i = 0; i < in0_ptr[0]; ++i) { | for (int i = 0; i < in0_ptr[0]; ++i) { | ||||
| auto input = inputs_[i + 2]; | |||||
| auto input = inputs_.at(i + 2); | |||||
| if (input->data_type() != kTypeUnknown) { | if (input->data_type() != kTypeUnknown) { | ||||
| std::vector<int> tmp = input->shape(); | std::vector<int> tmp = input->shape(); | ||||
| for (size_t j = 0; j < tmp.size(); ++j) { | for (size_t j = 0; j < tmp.size(); ++j) { | ||||
| element_shape_[j] = element_shape_[j] >= 0 ? element_shape_[j] : tmp[j]; | |||||
| element_shape_.at(j) = element_shape_.at(j) >= 0 ? element_shape_.at(j) : tmp.at(j); | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -189,7 +189,7 @@ int TensorListReserve::InferShape(std::vector<lite::Tensor *> inputs_, std::vect | |||||
| << " must be \"kNumberTypeInt\":" << kNumberTypeInt; | << " must be \"kNumberTypeInt\":" << kNumberTypeInt; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| auto input1 = inputs_[1]; | |||||
| auto input1 = inputs_.at(1); | |||||
| MS_ASSERT(input1 != nullptr); | MS_ASSERT(input1 != nullptr); | ||||
| auto num_ele_type = input1->data_type(); | auto num_ele_type = input1->data_type(); | ||||
| if (num_ele_type != kNumberTypeInt) { | if (num_ele_type != kNumberTypeInt) { | ||||
| @@ -204,18 +204,18 @@ int TensorListReserve::InferShape(std::vector<lite::Tensor *> inputs_, std::vect | |||||
| } | } | ||||
| for (int i = 0; i < num_elements; ++i) { | for (int i = 0; i < num_elements; ++i) { | ||||
| auto output = outputs_[i + 2]; | |||||
| auto output = outputs_.at(i + 2); | |||||
| MS_ASSERT(output != nullptr); | MS_ASSERT(output != nullptr); | ||||
| output->set_data_type(kTypeUnknown); | output->set_data_type(kTypeUnknown); | ||||
| output->set_shape(std::vector<int>(1, 0)); // shape = [0] | output->set_shape(std::vector<int>(1, 0)); // shape = [0] | ||||
| } | } | ||||
| auto output = outputs_[0]; // vector<tensor>.size(), tensorlist.dtype | |||||
| auto output = outputs_.at(0); // vector<tensor>.size(), tensorlist.dtype | |||||
| MS_ASSERT(output != nullptr); | MS_ASSERT(output != nullptr); | ||||
| output->set_data_type(kNumberTypeInt); | output->set_data_type(kNumberTypeInt); | ||||
| output->set_shape(std::vector<int>(1, 2)); // one element.value = 2 | output->set_shape(std::vector<int>(1, 2)); // one element.value = 2 | ||||
| output = outputs_[1]; // element_shape tensor | |||||
| output = outputs_.at(1); // element_shape tensor | |||||
| MS_ASSERT(output != nullptr); | MS_ASSERT(output != nullptr); | ||||
| output->set_data_type(input0->data_type()); | output->set_data_type(input0->data_type()); | ||||
| output->set_format(input0->format()); | output->set_format(input0->format()); | ||||
| @@ -225,7 +225,7 @@ int TensorListReserve::InferShape(std::vector<lite::Tensor *> inputs_, std::vect | |||||
| bool TensorListStack::IsFullyDefined(const std::vector<int> &shape) const { | bool TensorListStack::IsFullyDefined(const std::vector<int> &shape) const { | ||||
| for (size_t i = 0; i < shape.size(); ++i) { | for (size_t i = 0; i < shape.size(); ++i) { | ||||
| if (shape[i] < 0) { | |||||
| if (shape.at(i) < 0) { | |||||
| return false; | return false; | ||||
| } | } | ||||
| } | } | ||||
| @@ -243,16 +243,16 @@ int TensorListStack::InferShape(std::vector<lite::Tensor *> inputs_, std::vector | |||||
| MS_LOG(ERROR) << "inputs_.size():" << vec_in_size << " must be equal:" << input0_ptr[0] + 3; | MS_LOG(ERROR) << "inputs_.size():" << vec_in_size << " must be equal:" << input0_ptr[0] + 3; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| auto ele_shape = inputs_[input0_ptr[0] + 2]; // element shape | |||||
| auto ele_shape = inputs_.at(input0_ptr[0] + 2); // element shape | |||||
| MS_ASSERT(ele_shape != nullptr); | MS_ASSERT(ele_shape != nullptr); | ||||
| auto ele_shape_ptr = reinterpret_cast<int *>(ele_shape->data_c()); | auto ele_shape_ptr = reinterpret_cast<int *>(ele_shape->data_c()); | ||||
| for (int i = 0; ele_shape->ElementsNum(); ++i) { | for (int i = 0; ele_shape->ElementsNum(); ++i) { | ||||
| output_shape_.push_back(ele_shape_ptr[i]); | output_shape_.push_back(ele_shape_ptr[i]); | ||||
| } | } | ||||
| std::vector<int> tensorlist_shape; | std::vector<int> tensorlist_shape; | ||||
| MS_ASSERT(inputs_[1] != nullptr); | |||||
| auto input1_ptr = reinterpret_cast<int *>(inputs_[1]->data_c()); | |||||
| for (int i = 0; i < inputs_[1]->ElementsNum(); ++i) { | |||||
| MS_ASSERT(inputs_.at(1) != nullptr); | |||||
| auto input1_ptr = reinterpret_cast<int *>(inputs_.at(1)->data_c()); | |||||
| for (int i = 0; i < inputs_.at(1)->ElementsNum(); ++i) { | |||||
| tensorlist_shape.push_back(input1_ptr[i]); | tensorlist_shape.push_back(input1_ptr[i]); | ||||
| } | } | ||||
| auto status = MergeShape(tensorlist_shape); | auto status = MergeShape(tensorlist_shape); | ||||
| @@ -266,7 +266,7 @@ int TensorListStack::InferShape(std::vector<lite::Tensor *> inputs_, std::vector | |||||
| } | } | ||||
| if (!IsFullyDefined(tensorlist_shape)) { | if (!IsFullyDefined(tensorlist_shape)) { | ||||
| for (int i = 0; i < input0_ptr[0]; ++i) { // get tensorlist every tensor | for (int i = 0; i < input0_ptr[0]; ++i) { // get tensorlist every tensor | ||||
| auto tensor_tmp = inputs_[i + 2]; | |||||
| auto tensor_tmp = inputs_.at(i + 2); | |||||
| MS_ASSERT(tensor_tmp != nullptr); | MS_ASSERT(tensor_tmp != nullptr); | ||||
| if (tensor_tmp->data_type() != kTypeUnknown) { | if (tensor_tmp->data_type() != kTypeUnknown) { | ||||
| status = MergeShape(tensor_tmp->shape()); | status = MergeShape(tensor_tmp->shape()); | ||||
| @@ -298,15 +298,15 @@ int TensorListStack::MergeShape(const std::vector<int> &shape) { | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| for (size_t i = 0; i < dim0; ++i) { | for (size_t i = 0; i < dim0; ++i) { | ||||
| int dim0_size = shape[i]; | |||||
| int dim1_size = output_shape_[i]; | |||||
| int dim0_size = shape.at(i); | |||||
| int dim1_size = output_shape_.at(i); | |||||
| if (dim0_size >= 0 && dim1_size >= 0 && dim0_size != dim1_size) { | if (dim0_size >= 0 && dim1_size >= 0 && dim0_size != dim1_size) { | ||||
| MS_LOG(ERROR) << "shape[" << i << "]:" << dim0_size << " is incompatible with output_shape_[" << i | MS_LOG(ERROR) << "shape[" << i << "]:" << dim0_size << " is incompatible with output_shape_[" << i | ||||
| << "]:" << dim1_size; | << "]:" << dim1_size; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| int tmp_size = dim1_size >= 0 ? dim1_size : dim0_size; | int tmp_size = dim1_size >= 0 ? dim1_size : dim0_size; | ||||
| output_shape_[i] = tmp_size; | |||||
| output_shape_.at(i) = tmp_size; | |||||
| } | } | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -160,10 +160,14 @@ int Tile::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output | |||||
| MS_ASSERT(multiples.size() == dims.size()); | MS_ASSERT(multiples.size() == dims.size()); | ||||
| for (size_t i = 0; i < in_dims; ++i) { | for (size_t i = 0; i < in_dims; ++i) { | ||||
| out_shape.push_back(input->shape()[i]); | |||||
| out_shape.push_back(input->shape().at(i)); | |||||
| } | } | ||||
| for (size_t i = 0; i < dims.size(); ++i) { | for (size_t i = 0; i < dims.size(); ++i) { | ||||
| out_shape[dims[i]] = input->shape()[dims[i]] * (multiples[i]); | |||||
| if (multiples.at(i) > std::numeric_limits<int>::max() / input->shape().at(dims.at(i))) { | |||||
| MS_LOG(ERROR) << "The value of multiples[" << i << "] is too big"; | |||||
| return RET_ERROR; | |||||
| } | |||||
| out_shape.at(dims.at(i)) = input->shape().at(dims.at(i)) * (multiples.at(i)); | |||||
| } | } | ||||
| #endif | #endif | ||||
| output->set_shape(out_shape); | output->set_shape(out_shape); | ||||
| @@ -72,9 +72,9 @@ int TopK::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| auto out_shape = input->shape(); | auto out_shape = input->shape(); | ||||
| out_shape[out_shape.size() - 1] = GetK(); | |||||
| out_shape.at(out_shape.size() - 1) = GetK(); | |||||
| if (inputs_.size() == kDoubleNum && inputs_.at(1)->data_c() != nullptr) { | if (inputs_.size() == kDoubleNum && inputs_.at(1)->data_c() != nullptr) { | ||||
| out_shape[out_shape.size() - 1] = reinterpret_cast<int *>(inputs_.at(1)->data_c())[0]; | |||||
| out_shape.at(out_shape.size() - 1) = reinterpret_cast<int *>(inputs_.at(1)->data_c())[0]; | |||||
| } | } | ||||
| output0->set_shape(out_shape); | output0->set_shape(out_shape); | ||||
| output1->set_shape(out_shape); | output1->set_shape(out_shape); | ||||
| @@ -62,7 +62,7 @@ int Transpose::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> & | |||||
| auto tuple = val->cast<ValueTuplePtr>(); | auto tuple = val->cast<ValueTuplePtr>(); | ||||
| MS_ASSERT(tuple != nullptr); | MS_ASSERT(tuple != nullptr); | ||||
| for (size_t i = 0; i < tuple->size(); i++) { | for (size_t i = 0; i < tuple->size(); i++) { | ||||
| auto elem = tuple->value()[i]; | |||||
| auto elem = tuple->value().at(i); | |||||
| MS_ASSERT(elem != nullptr); | MS_ASSERT(elem != nullptr); | ||||
| attr->perm.emplace_back(CastToInt(elem).front()); | attr->perm.emplace_back(CastToInt(elem).front()); | ||||
| } | } | ||||
| @@ -134,13 +134,13 @@ int Transpose::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> o | |||||
| } | } | ||||
| std::vector<int> perm; | std::vector<int> perm; | ||||
| for (size_t i = 0; i < GetPerm().size(); i++) { | for (size_t i = 0; i < GetPerm().size(); i++) { | ||||
| perm.push_back(GetPerm()[i]); | |||||
| perm.push_back(GetPerm().at(i)); | |||||
| } | } | ||||
| std::vector<int> in_shape = input->shape(); | std::vector<int> in_shape = input->shape(); | ||||
| std::vector<int> out_shape; | std::vector<int> out_shape; | ||||
| out_shape.resize(perm.size()); | out_shape.resize(perm.size()); | ||||
| for (size_t i = 0; i < perm.size(); ++i) { | for (size_t i = 0; i < perm.size(); ++i) { | ||||
| out_shape[i] = in_shape[perm[i]]; | |||||
| out_shape.at(i) = in_shape.at(perm.at(i)); | |||||
| } | } | ||||
| output->set_shape(out_shape); | output->set_shape(out_shape); | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -46,8 +46,8 @@ int UnsortedSegmentSum::UnPackAttr(const Primitive &prim, const std::vector<AnfN | |||||
| } | } | ||||
| if (this->primitive_->value.value == nullptr) { | if (this->primitive_->value.value == nullptr) { | ||||
| std::unique_ptr<schema::UnsortedSegmentSumT> attr = std::make_unique<schema::UnsortedSegmentSumT>(); | std::unique_ptr<schema::UnsortedSegmentSumT> attr = std::make_unique<schema::UnsortedSegmentSumT>(); | ||||
| if (inputs[2]->isa<ValueNode>()) { | |||||
| ValuePtr value = inputs[2]->cast<ValueNodePtr>()->value(); | |||||
| if (inputs.at(2)->isa<ValueNode>()) { | |||||
| ValuePtr value = inputs.at(2)->cast<ValueNodePtr>()->value(); | |||||
| attr->numSegments = CastToInt(value).front(); | attr->numSegments = CastToInt(value).front(); | ||||
| this->primitive_->value.value = attr.release(); | this->primitive_->value.value = attr.release(); | ||||
| } | } | ||||
| @@ -92,14 +92,14 @@ int UnsortedSegmentSum::InferShape(std::vector<Tensor *> inputs_, std::vector<Te | |||||
| } | } | ||||
| Tensor *out = outputs_.front(); | Tensor *out = outputs_.front(); | ||||
| Tensor *x = inputs_.front(); | Tensor *x = inputs_.front(); | ||||
| Tensor *segment_id = inputs_[1]; | |||||
| Tensor *segment_id = inputs_.at(1); | |||||
| std::vector<int> x_shape = x->shape(); | std::vector<int> x_shape = x->shape(); | ||||
| std::vector<int> segment_id_shape = segment_id->shape(); | std::vector<int> segment_id_shape = segment_id->shape(); | ||||
| int num_segments = GetNumSegments(); | int num_segments = GetNumSegments(); | ||||
| std::vector<int> output_shape; | std::vector<int> output_shape; | ||||
| output_shape.push_back(num_segments); | output_shape.push_back(num_segments); | ||||
| for (int index = segment_id_shape.size(); index < static_cast<int>(x_shape.size()); index++) { | for (int index = segment_id_shape.size(); index < static_cast<int>(x_shape.size()); index++) { | ||||
| output_shape.push_back(x_shape[index]); | |||||
| output_shape.push_back(x_shape.at(index)); | |||||
| } | } | ||||
| out->set_shape(output_shape); | out->set_shape(output_shape); | ||||
| out->set_format(x->format()); | out->set_format(x->format()); | ||||
| @@ -97,14 +97,14 @@ int Unsqueeze::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> o | |||||
| size_t in_itr = 0; | size_t in_itr = 0; | ||||
| size_t ax_itr = 0; | size_t ax_itr = 0; | ||||
| for (size_t i = 0; i < sz; i++) { | for (size_t i = 0; i < sz; i++) { | ||||
| if (ax_itr < dim_rank && dims[ax_itr] == static_cast<int>(i)) { | |||||
| if (ax_itr < dim_rank && dims.at(ax_itr) == static_cast<int>(i)) { | |||||
| out_shape.emplace_back(1); | out_shape.emplace_back(1); | ||||
| ax_itr++; | ax_itr++; | ||||
| } else if (ax_itr < dim_rank && dims[ax_itr] + sz == i) { | |||||
| } else if (ax_itr < dim_rank && dims.at(ax_itr) + sz == i) { | |||||
| out_shape.emplace_back(1); | out_shape.emplace_back(1); | ||||
| ax_itr++; | ax_itr++; | ||||
| } else { | } else { | ||||
| out_shape.emplace_back(in_shape[in_itr]); | |||||
| out_shape.emplace_back(in_shape.at(in_itr)); | |||||
| in_itr++; | in_itr++; | ||||
| } | } | ||||
| } | } | ||||
| @@ -73,7 +73,7 @@ int Upsample::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite:: | |||||
| auto scale_tensor = inputs_.at(1); | auto scale_tensor = inputs_.at(1); | ||||
| MS_ASSERT(scale_tensor); | MS_ASSERT(scale_tensor); | ||||
| auto scale_shape = scale_tensor->shape(); | auto scale_shape = scale_tensor->shape(); | ||||
| if (scale_shape.size() != 1 && scale_shape[0] != 4) { | |||||
| if (scale_shape.size() != 1 && scale_shape.at(0) != 4) { | |||||
| MS_LOG(ERROR) << "Upsample scale tensor shape should be 4"; | MS_LOG(ERROR) << "Upsample scale tensor shape should be 4"; | ||||
| return RET_INFER_ERR; | return RET_INFER_ERR; | ||||
| } | } | ||||
| @@ -84,12 +84,12 @@ int Upsample::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite:: | |||||
| } | } | ||||
| std::vector<int> out_shape = input_shape; // n, h, w, c; n, c not changed, h = floor(input_h * scale_h). | std::vector<int> out_shape = input_shape; // n, h, w, c; n, c not changed, h = floor(input_h * scale_h). | ||||
| int new_height = static_cast<int>(floor(input_shape[1] * scale[1])); | |||||
| int new_height = static_cast<int>(floor(input_shape.at(1) * scale[1])); | |||||
| MS_ASSERT(new_height > 0); | MS_ASSERT(new_height > 0); | ||||
| int new_width = static_cast<int>(floor(input_shape[2] * scale[2])); | |||||
| int new_width = static_cast<int>(floor(input_shape.at(2) * scale[2])); | |||||
| MS_ASSERT(new_width > 0); | MS_ASSERT(new_width > 0); | ||||
| out_shape[1] = new_height; | |||||
| out_shape[2] = new_width; | |||||
| out_shape.at(1) = new_height; | |||||
| out_shape.at(2) = new_width; | |||||
| auto out_tensor = outputs_.at(0); | auto out_tensor = outputs_.at(0); | ||||
| MS_ASSERT(out_tensor); | MS_ASSERT(out_tensor); | ||||
| @@ -93,28 +93,28 @@ int Where::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outpu | |||||
| int axisout = 0; | int axisout = 0; | ||||
| size_t temp = 0; | size_t temp = 0; | ||||
| for (size_t j = 0; j < shape_tmp.size(); j++) { | for (size_t j = 0; j < shape_tmp.size(); j++) { | ||||
| if (shape_tmp[j] == shape_tmp1[j] && shape_tmp[j] != shape_tmp2[j]) { | |||||
| if (shape_tmp.at(j) == shape_tmp1.at(j) && shape_tmp.at(j) != shape_tmp2.at(j)) { | |||||
| axisout = j; | axisout = j; | ||||
| break; | break; | ||||
| } | } | ||||
| if (shape_tmp[j] == shape_tmp2[j] && shape_tmp[j] != shape_tmp1[j]) { | |||||
| if (shape_tmp.at(j) == shape_tmp2.at(j) && shape_tmp.at(j) != shape_tmp1.at(j)) { | |||||
| axisout = j; | axisout = j; | ||||
| break; | break; | ||||
| } | } | ||||
| if (shape_tmp1[j] == shape_tmp2[j] && shape_tmp[j] != shape_tmp1[j]) { | |||||
| if (shape_tmp1.at(j) == shape_tmp2.at(j) && shape_tmp.at(j) != shape_tmp1.at(j)) { | |||||
| axisout = j; | axisout = j; | ||||
| break; | break; | ||||
| } | } | ||||
| temp += 1; | temp += 1; | ||||
| if (temp == shape_tmp.size()) { | if (temp == shape_tmp.size()) { | ||||
| outputs_[0]->set_shape(shape_tmp); | |||||
| outputs_.at(0)->set_shape(shape_tmp); | |||||
| output->set_data_type(input->data_type()); | output->set_data_type(input->data_type()); | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| } | } | ||||
| auto output_shape = shape_tmp; | auto output_shape = shape_tmp; | ||||
| output_shape[axisout] = nummax; | |||||
| outputs_[0]->set_shape(output_shape); | |||||
| output_shape.at(axisout) = nummax; | |||||
| outputs_.at(0)->set_shape(output_shape); | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| } // namespace lite | } // namespace lite | ||||
| @@ -96,9 +96,9 @@ int While::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outpu | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| for (size_t i = 0; i < inputs_.size(); i++) { | for (size_t i = 0; i < inputs_.size(); i++) { | ||||
| outputs_[i]->set_data_type(inputs_[i]->data_type()); | |||||
| outputs_[i]->set_format(inputs_[i]->format()); | |||||
| outputs_[i]->set_shape(inputs_[i]->shape()); | |||||
| outputs_.at(i)->set_data_type(inputs_.at(i)->data_type()); | |||||
| outputs_.at(i)->set_format(inputs_.at(i)->format()); | |||||
| outputs_.at(i)->set_shape(inputs_.at(i)->shape()); | |||||
| } | } | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -131,16 +131,16 @@ void ReduceBaseCPUKernel::CalculateInnerOuterSize() { | |||||
| int axis = axes_[i]; | int axis = axes_[i]; | ||||
| auto outer_size = 1; | auto outer_size = 1; | ||||
| for (int j = 0; j < axis; j++) { | for (int j = 0; j < axis; j++) { | ||||
| outer_size *= tmp_shape[j]; | |||||
| outer_size *= tmp_shape.at(j); | |||||
| } | } | ||||
| outer_sizes_.emplace_back(outer_size); | outer_sizes_.emplace_back(outer_size); | ||||
| auto inner_size = 1; | auto inner_size = 1; | ||||
| for (int k = axis + 1; k < static_cast<int>(tmp_shape.size()); k++) { | for (int k = axis + 1; k < static_cast<int>(tmp_shape.size()); k++) { | ||||
| inner_size *= tmp_shape[k]; | |||||
| inner_size *= tmp_shape.at(k); | |||||
| } | } | ||||
| inner_sizes_.emplace_back(inner_size); | inner_sizes_.emplace_back(inner_size); | ||||
| axis_sizes_.emplace_back(tmp_shape[axis]); | |||||
| tmp_shape[axis] = 1; | |||||
| axis_sizes_.emplace_back(tmp_shape.at(axis)); | |||||
| tmp_shape.at(axis) = 1; | |||||
| } | } | ||||
| } | } | ||||
| @@ -152,12 +152,12 @@ void ReduceBaseCPUKernel::CalculateTmpBufferSize() { | |||||
| size_t size = 1; | size_t size = 1; | ||||
| for (size_t j = 0; j < input_shape.size(); j++) { | for (size_t j = 0; j < input_shape.size(); j++) { | ||||
| if (axis != static_cast<int>(j)) { | if (axis != static_cast<int>(j)) { | ||||
| size *= input_shape[j]; | |||||
| size *= input_shape.at(j); | |||||
| } | } | ||||
| } | } | ||||
| MS_ASSERT(context_->allocator != nullptr); | MS_ASSERT(context_->allocator != nullptr); | ||||
| buffer_sizes_.emplace_back(size); | buffer_sizes_.emplace_back(size); | ||||
| input_shape[axis] = 1; | |||||
| input_shape.at(axis) = 1; | |||||
| } | } | ||||
| } | } | ||||
| @@ -57,7 +57,7 @@ int ResizeBaseCPUKernel::CheckParameters() { | |||||
| return RET_INVALID_OP_ATTR; | return RET_INVALID_OP_ATTR; | ||||
| } | } | ||||
| } else if (this->in_tensors_.size() == lite::kDoubleNum) { | } else if (this->in_tensors_.size() == lite::kDoubleNum) { | ||||
| auto out_shape = this->in_tensors_[1]->data_c(); | |||||
| auto out_shape = this->in_tensors_.at(1)->data_c(); | |||||
| if (out_shape == nullptr) { | if (out_shape == nullptr) { | ||||
| MS_LOG(INFO) << "Out shape is not assigned"; | MS_LOG(INFO) << "Out shape is not assigned"; | ||||
| const_shape_ = false; | const_shape_ = false; | ||||
| @@ -48,8 +48,8 @@ int SoftmaxBaseCPUKernel::ReSize() { | |||||
| softmax_param_->axis_ += in_dims; | softmax_param_->axis_ += in_dims; | ||||
| } | } | ||||
| for (size_t i = 0; i < in_dims; i++) { | for (size_t i = 0; i < in_dims; i++) { | ||||
| softmax_param_->input_shape_[i] = in_shape[i]; | |||||
| ele_size *= in_shape[i]; | |||||
| softmax_param_->input_shape_[i] = in_shape.at(i); | |||||
| ele_size *= in_shape.at(i); | |||||
| } | } | ||||
| softmax_param_->element_size_ = ele_size; | softmax_param_->element_size_ = ele_size; | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -41,21 +41,21 @@ int SplitBaseCPUKernel::ReSize() { | |||||
| MS_ASSERT(input_shape.size() >= 2 && input_shape.size() <= SPLIT_STRIDES_SIZE); | MS_ASSERT(input_shape.size() >= 2 && input_shape.size() <= SPLIT_STRIDES_SIZE); | ||||
| param->strides_[input_shape.size() - 1] = 1; | param->strides_[input_shape.size() - 1] = 1; | ||||
| for (int i = input_shape.size() - 2; i >= 0; i--) { | for (int i = input_shape.size() - 2; i >= 0; i--) { | ||||
| param->strides_[i] = param->strides_[i + 1] * input_shape[i + 1]; | |||||
| param->strides_[i] = param->strides_[i + 1] * input_shape.at(i + 1); | |||||
| } | } | ||||
| MS_ASSERT(static_cast<size_t>(param->split_dim_) < input_shape.size()); | MS_ASSERT(static_cast<size_t>(param->split_dim_) < input_shape.size()); | ||||
| param->split_count_ = | param->split_count_ = | ||||
| param->strides_[0] * input_shape[0] / (input_shape[param->split_dim_] * param->strides_[param->split_dim_]); | |||||
| param->strides_[0] * input_shape.at(0) / (input_shape.at(param->split_dim_) * param->strides_[param->split_dim_]); | |||||
| param->n_dims_ = input_shape.size(); | param->n_dims_ = input_shape.size(); | ||||
| if (param->split_sizes_[0] == 0) { | if (param->split_sizes_[0] == 0) { | ||||
| MS_ASSERT(param->num_split_ > 0 && static_cast<int>(param->num_split_) < input_shape.size()); | MS_ASSERT(param->num_split_ > 0 && static_cast<int>(param->num_split_) < input_shape.size()); | ||||
| if (input_shape[param->split_dim_] % param->num_split_ != 0) { | |||||
| if (input_shape.at(param->split_dim_) % param->num_split_ != 0) { | |||||
| MS_LOG(ERROR) << "Default split size is not usable."; | MS_LOG(ERROR) << "Default split size is not usable."; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| int split_size = input_shape[param->split_dim_] / param->num_split_; | |||||
| int split_size = input_shape.at(param->split_dim_) / param->num_split_; | |||||
| for (int i = 0; i < param->num_split_; i++) { | for (int i = 0; i < param->num_split_; i++) { | ||||
| param->split_sizes_[i] = split_size; | param->split_sizes_[i] = split_size; | ||||
| } | } | ||||
| @@ -63,7 +63,7 @@ int SplitBaseCPUKernel::ReSize() { | |||||
| MS_ASSERT(param->num_split_ >= 1 && param->num_split_ <= SPLIT_STRIDES_SIZE); | MS_ASSERT(param->num_split_ >= 1 && param->num_split_ <= SPLIT_STRIDES_SIZE); | ||||
| if (param->split_sizes_[param->num_split_ - 1] == -1) { | if (param->split_sizes_[param->num_split_ - 1] == -1) { | ||||
| int split_shape_end = input_shape[param->split_dim_]; | |||||
| int split_shape_end = input_shape.at(param->split_dim_); | |||||
| for (int i = 0; i < param->num_split_ - 1; i++) { | for (int i = 0; i < param->num_split_ - 1; i++) { | ||||
| split_shape_end -= param->split_sizes_[i]; | split_shape_end -= param->split_sizes_[i]; | ||||
| } | } | ||||
| @@ -92,10 +92,10 @@ int ReduceFp16CPUKernel::Run() { | |||||
| fp16_src_data_ = fp16_input_; | fp16_src_data_ = fp16_input_; | ||||
| for (size_t i = 0; i < data_buffers_.size(); ++i) { | for (size_t i = 0; i < data_buffers_.size(); ++i) { | ||||
| fp16_dst_data_ = data_buffers_[i]; | |||||
| outer_size_ = outer_sizes_[i]; | |||||
| inner_size_ = inner_sizes_[i]; | |||||
| axis_size_ = axis_sizes_[i]; | |||||
| fp16_dst_data_ = data_buffers_.at(i); | |||||
| outer_size_ = outer_sizes_.at(i); | |||||
| inner_size_ = inner_sizes_.at(i); | |||||
| axis_size_ = axis_sizes_.at(i); | |||||
| auto error_code = ParallelLaunch(this->context_->thread_pool_, ReduceFp16Impl, this, context_->thread_num_); | auto error_code = ParallelLaunch(this->context_->thread_pool_, ReduceFp16Impl, this, context_->thread_num_); | ||||
| if (error_code != RET_OK) { | if (error_code != RET_OK) { | ||||
| FreeTmpBuffer(); | FreeTmpBuffer(); | ||||
| @@ -39,7 +39,7 @@ int SplitFp16CPUKernel::Init() { | |||||
| } | } | ||||
| output_ptr_.resize(param->num_split_); | output_ptr_.resize(param->num_split_); | ||||
| for (size_t i = 0; i < output_ptr_.size(); i++) { | for (size_t i = 0; i < output_ptr_.size(); i++) { | ||||
| output_ptr_[i] = nullptr; | |||||
| output_ptr_.at(i) = nullptr; | |||||
| } | } | ||||
| if (!InferShapeDone()) { | if (!InferShapeDone()) { | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -82,8 +82,8 @@ int SplitFp16CPUKernel::Run() { | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| for (int i = 0; i < param->num_split_; i++) { | for (int i = 0; i < param->num_split_; i++) { | ||||
| output_ptr_[i] = MallocOutputFp16(out_tensors_.at(i), context_); | |||||
| if (output_ptr_[i] == nullptr) { | |||||
| output_ptr_.at(i) = MallocOutputFp16(out_tensors_.at(i), context_); | |||||
| if (output_ptr_.at(i) == nullptr) { | |||||
| FreeInputAndOutput(); | FreeInputAndOutput(); | ||||
| MS_LOG(ERROR) << "input or output is nullptr"; | MS_LOG(ERROR) << "input or output is nullptr"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| @@ -92,7 +92,7 @@ int SplitFp16CPUKernel::Run() { | |||||
| auto ret = ParallelLaunch(this->context_->thread_pool_, SplitFp16Run, this, thread_n_num_); | auto ret = ParallelLaunch(this->context_->thread_pool_, SplitFp16Run, this, thread_n_num_); | ||||
| for (int i = 0; i < param->num_split_; i++) { | for (int i = 0; i < param->num_split_; i++) { | ||||
| if (out_tensors_.at(i)->data_type() == kNumberTypeFloat32) { | if (out_tensors_.at(i)->data_type() == kNumberTypeFloat32) { | ||||
| Float16ToFloat32(output_ptr_[i], reinterpret_cast<float *>(out_tensors_.at(i)->MutableData()), | |||||
| Float16ToFloat32(output_ptr_.at(i), reinterpret_cast<float *>(out_tensors_.at(i)->MutableData()), | |||||
| out_tensors_.at(i)->ElementsNum()); | out_tensors_.at(i)->ElementsNum()); | ||||
| } | } | ||||
| } | } | ||||
| @@ -110,8 +110,8 @@ void SplitFp16CPUKernel::FreeInputAndOutput() { | |||||
| } | } | ||||
| for (int i = 0; i < param->num_split_; i++) { | for (int i = 0; i < param->num_split_; i++) { | ||||
| if (out_tensors_.at(i)->data_type() == kNumberTypeFloat32) { | if (out_tensors_.at(i)->data_type() == kNumberTypeFloat32) { | ||||
| context_->allocator->Free(output_ptr_[i]); | |||||
| output_ptr_[i] = nullptr; | |||||
| context_->allocator->Free(output_ptr_.at(i)); | |||||
| output_ptr_.at(i) = nullptr; | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -40,22 +40,22 @@ int StackFp16CPUKernel::Init() { | |||||
| void StackFp16CPUKernel::InitMallocFlags() { | void StackFp16CPUKernel::InitMallocFlags() { | ||||
| malloc_buffers_.resize(in_tensors_.size()); | malloc_buffers_.resize(in_tensors_.size()); | ||||
| for (size_t i = 0; i < in_tensors_.size(); ++i) { | for (size_t i = 0; i < in_tensors_.size(); ++i) { | ||||
| malloc_buffers_[i] = in_tensors_[i]->data_type() == kNumberTypeFloat32; | |||||
| malloc_buffers_.at(i) = in_tensors_.at(i)->data_type() == kNumberTypeFloat32; | |||||
| } | } | ||||
| malloc_out = out_tensors_[0]->data_type() == kNumberTypeFloat32; | |||||
| malloc_out = out_tensors_.at(0)->data_type() == kNumberTypeFloat32; | |||||
| } | } | ||||
| int StackFp16CPUKernel::MallocAssignBuffer() { | int StackFp16CPUKernel::MallocAssignBuffer() { | ||||
| buffers_.resize(in_tensors_.size(), nullptr); | buffers_.resize(in_tensors_.size(), nullptr); | ||||
| for (size_t i = 0; i < in_tensors_.size(); ++i) { | for (size_t i = 0; i < in_tensors_.size(); ++i) { | ||||
| buffers_[i] = ConvertInputFp32toFp16(in_tensors_[i], context_); | |||||
| if (buffers_[i] == nullptr) { | |||||
| buffers_.at(i) = ConvertInputFp32toFp16(in_tensors_.at(i), context_); | |||||
| if (buffers_.at(i) == nullptr) { | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| } | } | ||||
| out_buffer_ = nullptr; | out_buffer_ = nullptr; | ||||
| out_buffer_ = MallocOutputFp16(out_tensors_[0], context_); | |||||
| out_buffer_ = MallocOutputFp16(out_tensors_.at(0), context_); | |||||
| if (out_buffer_ == nullptr) { | if (out_buffer_ == nullptr) { | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| @@ -64,9 +64,9 @@ int StackFp16CPUKernel::MallocAssignBuffer() { | |||||
| void StackFp16CPUKernel::FreeBuffer() { | void StackFp16CPUKernel::FreeBuffer() { | ||||
| for (size_t i = 0; i < buffers_.size(); ++i) { | for (size_t i = 0; i < buffers_.size(); ++i) { | ||||
| if (malloc_buffers_[i] && buffers_[i] != nullptr) { | |||||
| context_->allocator->Free(buffers_[i]); | |||||
| buffers_[i] = nullptr; | |||||
| if (malloc_buffers_.at(i) && buffers_.at(i) != nullptr) { | |||||
| context_->allocator->Free(buffers_.at(i)); | |||||
| buffers_.at(i) = nullptr; | |||||
| } | } | ||||
| } | } | ||||
| if (malloc_out && out_buffer_ != nullptr) { | if (malloc_out && out_buffer_ != nullptr) { | ||||
| @@ -77,9 +77,9 @@ void StackFp16CPUKernel::FreeBuffer() { | |||||
| int StackFp16CPUKernel::Run() { | int StackFp16CPUKernel::Run() { | ||||
| size_t inputs_num = in_tensors_.size(); | size_t inputs_num = in_tensors_.size(); | ||||
| auto input0 = in_tensors_[0]; | |||||
| auto input0 = in_tensors_.at(0); | |||||
| if (inputs_num == 1) { | if (inputs_num == 1) { | ||||
| memcpy(out_tensors_[0]->MutableData(), input0->MutableData(), input0->Size()); | |||||
| memcpy(out_tensors_.at(0)->MutableData(), input0->MutableData(), input0->Size()); | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| InitMallocFlags(); | InitMallocFlags(); | ||||
| @@ -39,7 +39,7 @@ int TransposeFp16CPUKernel::Init() { | |||||
| int TransposeFp16CPUKernel::ReSize() { | int TransposeFp16CPUKernel::ReSize() { | ||||
| TransposeParameter *param = reinterpret_cast<TransposeParameter *>(this->op_parameter_); | TransposeParameter *param = reinterpret_cast<TransposeParameter *>(this->op_parameter_); | ||||
| num_unit_ = static_cast<int>(in_tensors_[kInputIndex]->shape().at(param->perm_[kNHWC_H])); | |||||
| num_unit_ = static_cast<int>(in_tensors_.at(kInputIndex)->shape().at(param->perm_[kNHWC_H])); | |||||
| thread_h_num_ = MSMIN(thread_num_, num_unit_); | thread_h_num_ = MSMIN(thread_num_, num_unit_); | ||||
| thread_h_stride_ = UP_DIV(num_unit_, thread_h_num_); | thread_h_stride_ = UP_DIV(num_unit_, thread_h_num_); | ||||
| auto &in_tensor = in_tensors_.front(); | auto &in_tensor = in_tensors_.front(); | ||||
| @@ -50,8 +50,8 @@ int TransposeFp16CPUKernel::ReSize() { | |||||
| param->out_strides_[param->num_axes_ - 1] = 1; | param->out_strides_[param->num_axes_ - 1] = 1; | ||||
| param->data_size_ = in_tensor->Size(); | param->data_size_ = in_tensor->Size(); | ||||
| for (int i = param->num_axes_ - 2; i >= 0; i--) { | for (int i = param->num_axes_ - 2; i >= 0; i--) { | ||||
| param->strides_[i] = in_shape[i + 1] * param->strides_[i + 1]; | |||||
| param->out_strides_[i] = out_shape[i + 1] * param->out_strides_[i + 1]; | |||||
| param->strides_[i] = in_shape.at(i + 1) * param->strides_[i + 1]; | |||||
| param->out_strides_[i] = out_shape.at(i + 1) * param->out_strides_[i + 1]; | |||||
| } | } | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -41,7 +41,7 @@ bool TensorListFromTensorCPUKernel::IsCompatibleShape() { | |||||
| } | } | ||||
| int *elements_shape = reinterpret_cast<int *>(input1_->data_c()); // element shape in tensor data | int *elements_shape = reinterpret_cast<int *>(input1_->data_c()); // element shape in tensor data | ||||
| for (int i = 0; i < in1_ele_num; ++i) { | for (int i = 0; i < in1_ele_num; ++i) { | ||||
| const int dim0 = tensor_shape[i + 1]; | |||||
| const int dim0 = tensor_shape.at(i + 1); | |||||
| const int dim1 = *(elements_shape + i); | const int dim1 = *(elements_shape + i); | ||||
| if (dim0 >= 0 && dim1 >= 0 && dim0 != dim1) { | if (dim0 >= 0 && dim1 >= 0 && dim0 != dim1) { | ||||
| MS_LOG(ERROR) << "input0_->shape()[" << i + 1 << "]:" << dim0 << " is not equal input1_->data_c()[" << i | MS_LOG(ERROR) << "input0_->shape()[" << i + 1 << "]:" << dim0 << " is not equal input1_->data_c()[" << i | ||||
| @@ -53,17 +53,17 @@ bool TensorListFromTensorCPUKernel::IsCompatibleShape() { | |||||
| } | } | ||||
| int TensorListFromTensorCPUKernel::Init() { | int TensorListFromTensorCPUKernel::Init() { | ||||
| input0_ = in_tensors_[0]; // row tensor | |||||
| input1_ = in_tensors_[1]; // element_shape tensor | |||||
| output0_ = out_tensors_[0]; | |||||
| output1_ = out_tensors_[1]; | |||||
| input0_ = in_tensors_.at(0); // row tensor | |||||
| input1_ = in_tensors_.at(1); // element_shape tensor | |||||
| output0_ = out_tensors_.at(0); | |||||
| output1_ = out_tensors_.at(1); | |||||
| return IsCompatibleShape(); | return IsCompatibleShape(); | ||||
| } | } | ||||
| int TensorListFromTensorCPUKernel::ReSize() { return RET_OK; } | int TensorListFromTensorCPUKernel::ReSize() { return RET_OK; } | ||||
| int TensorListFromTensorCPUKernel::Run() { | int TensorListFromTensorCPUKernel::Run() { | ||||
| int dim0 = input0_->shape()[0]; | |||||
| int dim0 = input0_->shape().at(0); | |||||
| size_t devision_dim0 = input0_->ElementsNum() / dim0; | size_t devision_dim0 = input0_->ElementsNum() / dim0; | ||||
| auto out0_ptr = reinterpret_cast<int *>(output0_->MutableData()); | auto out0_ptr = reinterpret_cast<int *>(output0_->MutableData()); | ||||
| *out0_ptr = dim0; | *out0_ptr = dim0; | ||||
| @@ -81,7 +81,7 @@ int TensorListFromTensorCPUKernel::Run() { | |||||
| auto in_ptr = reinterpret_cast<float *>(input0_); | auto in_ptr = reinterpret_cast<float *>(input0_); | ||||
| size_t index = 0; | size_t index = 0; | ||||
| for (int i = 0; i < dim0; ++i) { | for (int i = 0; i < dim0; ++i) { | ||||
| auto out_ptr = reinterpret_cast<float *>(out_tensors_[i + 2]->MutableData()); | |||||
| auto out_ptr = reinterpret_cast<float *>(out_tensors_.at(i + 2)->MutableData()); | |||||
| memcpy(out_ptr, in_ptr + index, devision_dim0 * sizeof(float)); | memcpy(out_ptr, in_ptr + index, devision_dim0 * sizeof(float)); | ||||
| index += devision_dim0; | index += devision_dim0; | ||||
| } | } | ||||
| @@ -29,14 +29,14 @@ using mindspore::schema::PrimitiveType_TensorListGetItem; | |||||
| namespace mindspore::kernel { | namespace mindspore::kernel { | ||||
| int TensorListGetItemCPUKernel::Init() { | int TensorListGetItemCPUKernel::Init() { | ||||
| auto input0 = reinterpret_cast<int *>(in_tensors_[0]->data_c()); | |||||
| auto input0 = reinterpret_cast<int *>(in_tensors_.at(0)->data_c()); | |||||
| size_t dim0 = *input0; | size_t dim0 = *input0; | ||||
| int in_dtype = *(input0 + 1); | int in_dtype = *(input0 + 1); | ||||
| if (dtype_ != in_dtype) { | if (dtype_ != in_dtype) { | ||||
| MS_LOG(ERROR) << "op dtype:" << dtype_ << " is not equal in_tensors dtype:" << in_dtype; | MS_LOG(ERROR) << "op dtype:" << dtype_ << " is not equal in_tensors dtype:" << in_dtype; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| index_ = *(reinterpret_cast<int *>(in_tensors_[dim0 + 2]->data_c())); | |||||
| index_ = *(reinterpret_cast<int *>(in_tensors_.at(dim0 + 2)->data_c())); | |||||
| if (index_ < 0) { | if (index_ < 0) { | ||||
| MS_LOG(ERROR) << "index tensor:[" << index_ << "] must be greater than or equal to 0"; | MS_LOG(ERROR) << "index tensor:[" << index_ << "] must be greater than or equal to 0"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| @@ -50,16 +50,16 @@ int TensorListGetItemCPUKernel::Init() { | |||||
| } | } | ||||
| int TensorListGetItemCPUKernel::Run() { | int TensorListGetItemCPUKernel::Run() { | ||||
| if (in_tensors_[index_]->data_type() != kTypeUnknown) { | |||||
| auto status = out_tensors_[0]->CopyTensorData(*in_tensors_[index_]); // tensorlist shape | |||||
| if (in_tensors_.at(index_)->data_type() != kTypeUnknown) { | |||||
| auto status = out_tensors_.at(0)->CopyTensorData(*in_tensors_.at(index_)); // tensorlist shape | |||||
| if (status == RET_ERROR) { | if (status == RET_ERROR) { | ||||
| MS_LOG(ERROR) << "copy tensor data failed!"; | MS_LOG(ERROR) << "copy tensor data failed!"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| } else { | } else { | ||||
| // reset 0 and dtype = dtype_ | // reset 0 and dtype = dtype_ | ||||
| auto out_ptr = reinterpret_cast<char *>(out_tensors_[0]->MutableData()); | |||||
| memset(out_ptr, 0, lite::DataTypeSize(dtype_) * out_tensors_[0]->ElementsNum()); | |||||
| auto out_ptr = reinterpret_cast<char *>(out_tensors_.at(0)->MutableData()); | |||||
| memset(out_ptr, 0, lite::DataTypeSize(dtype_) * out_tensors_.at(0)->ElementsNum()); | |||||
| } | } | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -30,10 +30,10 @@ namespace mindspore::kernel { | |||||
| int TensorListReserveCPUKernel::Init() { return RET_OK; } | int TensorListReserveCPUKernel::Init() { return RET_OK; } | ||||
| int TensorListReserveCPUKernel::Run() { | int TensorListReserveCPUKernel::Run() { | ||||
| auto out0_ptr = reinterpret_cast<int *>(out_tensors_[0]->MutableData()); // tensorlist size() and dtype | |||||
| out0_ptr[0] = reinterpret_cast<int *>(in_tensors_[0]->data_c())[0]; // num_elements | |||||
| auto out0_ptr = reinterpret_cast<int *>(out_tensors_.at(0)->MutableData()); // tensorlist size() and dtype | |||||
| out0_ptr[0] = reinterpret_cast<int *>(in_tensors_.at(0)->data_c())[0]; // num_elements | |||||
| out0_ptr[1] = element_dtype_; | out0_ptr[1] = element_dtype_; | ||||
| auto status = out_tensors_[1]->CopyTensorData(*in_tensors_[1]); // elements_shape | |||||
| auto status = out_tensors_.at(1)->CopyTensorData(*in_tensors_.at(1)); // elements_shape | |||||
| if (status == RET_ERROR) { | if (status == RET_ERROR) { | ||||
| MS_LOG(ERROR) << "copy tensor data failed!"; | MS_LOG(ERROR) << "copy tensor data failed!"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| @@ -29,12 +29,12 @@ using mindspore::schema::PrimitiveType_TensorListStack; | |||||
| namespace mindspore::kernel { | namespace mindspore::kernel { | ||||
| int TensorListStackCPUKernel::CheckParam() { | int TensorListStackCPUKernel::CheckParam() { | ||||
| auto in0_dtype = in_tensors_[0]->data_type(); | |||||
| auto in0_dtype = in_tensors_.at(0)->data_type(); | |||||
| if (in0_dtype != kNumberTypeInt) { | if (in0_dtype != kNumberTypeInt) { | ||||
| MS_LOG(ERROR) << "in_tensors_[0]->data_type():" << in0_dtype | MS_LOG(ERROR) << "in_tensors_[0]->data_type():" << in0_dtype | ||||
| << " must be equal \"kNumberTypeInt\":" << kNumberTypeInt; | << " must be equal \"kNumberTypeInt\":" << kNumberTypeInt; | ||||
| } | } | ||||
| auto in0_ptr = reinterpret_cast<int *>(in_tensors_[0]->data_c()); | |||||
| auto in0_ptr = reinterpret_cast<int *>(in_tensors_.at(0)->data_c()); | |||||
| if (in0_ptr[1] != dtype_) { | if (in0_ptr[1] != dtype_) { | ||||
| MS_LOG(ERROR) << "in_tensors_[0].data_type:[" << in0_ptr[1] << "] must be equal " | MS_LOG(ERROR) << "in_tensors_[0].data_type:[" << in0_ptr[1] << "] must be equal " | ||||
| << "param.data_type:[" << dtype_ << "]"; | << "param.data_type:[" << dtype_ << "]"; | ||||
| @@ -50,12 +50,12 @@ int TensorListStackCPUKernel::CheckParam() { | |||||
| } | } | ||||
| int TensorListStackCPUKernel::Init() { | int TensorListStackCPUKernel::Init() { | ||||
| output0_ = out_tensors_[0]; | |||||
| output0_ = out_tensors_.at(0); | |||||
| if (output0_->format() != schema::Format_NC) { // shape().size() = 2 | if (output0_->format() != schema::Format_NC) { // shape().size() = 2 | ||||
| MS_LOG(ERROR) << "out_tensor_[0] format must be \"Format:NC\", but now is:" << output0_->format(); | MS_LOG(ERROR) << "out_tensor_[0] format must be \"Format:NC\", but now is:" << output0_->format(); | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| int dim0 = output0_->shape()[0]; | |||||
| int dim0 = output0_->shape().at(0); | |||||
| if (dim0 != 1) { // dim0 must be 1 | if (dim0 != 1) { // dim0 must be 1 | ||||
| MS_LOG(ERROR) << "out_tensor_[0] dim0 must be 1, but now is:" << dim0; | MS_LOG(ERROR) << "out_tensor_[0] dim0 must be 1, but now is:" << dim0; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| @@ -66,19 +66,19 @@ int TensorListStackCPUKernel::Init() { | |||||
| int TensorListStackCPUKernel::Run() { | int TensorListStackCPUKernel::Run() { | ||||
| size_t in_ele_num = 0; | size_t in_ele_num = 0; | ||||
| for (int i = 0; i < num_element_; ++i) { | for (int i = 0; i < num_element_; ++i) { | ||||
| in_ele_num += in_tensors_[i + 2]->ElementsNum(); | |||||
| in_ele_num += in_tensors_.at(i + 2)->ElementsNum(); | |||||
| } | } | ||||
| size_t out_ele_num = out_tensors_[0]->ElementsNum(); | |||||
| size_t out_ele_num = out_tensors_.at(0)->ElementsNum(); | |||||
| if (in_ele_num > out_ele_num) { | if (in_ele_num > out_ele_num) { | ||||
| MS_LOG(ERROR) << "out_tensors_[0]->ElementsNum():" << out_ele_num << "must greater than or equal to in_ele_num" | MS_LOG(ERROR) << "out_tensors_[0]->ElementsNum():" << out_ele_num << "must greater than or equal to in_ele_num" | ||||
| << in_ele_num; | << in_ele_num; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| size_t index = 0; | size_t index = 0; | ||||
| auto out_ptr = reinterpret_cast<float *>(out_tensors_[0]->MutableData()); | |||||
| auto out_ptr = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData()); | |||||
| for (int i = 0; i < num_element_; ++i) { | for (int i = 0; i < num_element_; ++i) { | ||||
| auto in_ptr = reinterpret_cast<float *>(in_tensors_[i + 2]->data_c()); | |||||
| size_t in_size = in_tensors_[i + 2]->ElementsNum(); | |||||
| auto in_ptr = reinterpret_cast<float *>(in_tensors_.at(i + 2)->data_c()); | |||||
| size_t in_size = in_tensors_.at(i + 2)->ElementsNum(); | |||||
| memcpy(out_ptr + index, in_ptr, in_size * sizeof(float)); | memcpy(out_ptr + index, in_ptr, in_size * sizeof(float)); | ||||
| index += in_size; | index += in_size; | ||||
| } | } | ||||
| @@ -140,13 +140,13 @@ int ReduceCPUKernel::Run() { | |||||
| HandleASumAndSumSquare(); | HandleASumAndSumSquare(); | ||||
| for (size_t i = 0; i < static_cast<size_t>(num_axes_); ++i) { | for (size_t i = 0; i < static_cast<size_t>(num_axes_); ++i) { | ||||
| if (i != static_cast<size_t>(num_axes_ - 1)) { | if (i != static_cast<size_t>(num_axes_ - 1)) { | ||||
| dst_data_ = data_buffers_[i]; | |||||
| dst_data_ = data_buffers_.at(i); | |||||
| } else { | } else { | ||||
| dst_data_ = out_tensors_.at(0)->MutableData(); | dst_data_ = out_tensors_.at(0)->MutableData(); | ||||
| } | } | ||||
| outer_size_ = outer_sizes_[i]; | |||||
| inner_size_ = inner_sizes_[i]; | |||||
| axis_size_ = axis_sizes_[i]; | |||||
| outer_size_ = outer_sizes_.at(i); | |||||
| inner_size_ = inner_sizes_.at(i); | |||||
| axis_size_ = axis_sizes_.at(i); | |||||
| auto error_code = ParallelLaunch(this->context_->thread_pool_, ReduceImpl, this, context_->thread_num_); | auto error_code = ParallelLaunch(this->context_->thread_pool_, ReduceImpl, this, context_->thread_num_); | ||||
| if (error_code != RET_OK) { | if (error_code != RET_OK) { | ||||
| MS_LOG(ERROR) << "Reduce run error, error_code[" << error_code << "]"; | MS_LOG(ERROR) << "Reduce run error, error_code[" << error_code << "]"; | ||||
| @@ -51,7 +51,7 @@ int ResizeCPUKernel::ReSize() { | |||||
| auto input = in_tensors_.at(0); | auto input = in_tensors_.at(0); | ||||
| auto input_shape = input->shape(); | auto input_shape = input->shape(); | ||||
| ret = PrepareResizeBilinear(input_shape.data(), out_tensors_[0]->shape().data(), align_corners_, y_bottoms_, | |||||
| ret = PrepareResizeBilinear(input_shape.data(), out_tensors_.at(0)->shape().data(), align_corners_, y_bottoms_, | |||||
| y_tops_, x_lefts_, x_rights_, y_bottom_weights_, x_left_weights_); | y_tops_, x_lefts_, x_rights_, y_bottom_weights_, x_left_weights_); | ||||
| if (ret != RET_OK) { | if (ret != RET_OK) { | ||||
| FreeTmpBuffer(); | FreeTmpBuffer(); | ||||
| @@ -164,15 +164,15 @@ int ResizeCPUKernel::RunImpl(int task_id) { | |||||
| switch (method_) { | switch (method_) { | ||||
| case static_cast<int>(schema::ResizeMethod_LINEAR): { | case static_cast<int>(schema::ResizeMethod_LINEAR): { | ||||
| int n_h_begin, n_h_end; | int n_h_begin, n_h_end; | ||||
| int n = out_tensors_.at(0)->shape()[0]; | |||||
| int n = out_tensors_.at(0)->shape().at(0); | |||||
| int h = new_height_; | int h = new_height_; | ||||
| int unit = UP_DIV(n * h, context_->thread_num_); | int unit = UP_DIV(n * h, context_->thread_num_); | ||||
| n_h_begin = unit * task_id; | n_h_begin = unit * task_id; | ||||
| n_h_end = std::min(n_h_begin + unit, n * h); | n_h_end = std::min(n_h_begin + unit, n * h); | ||||
| int c = in_tensors_.at(0)->shape()[3]; | |||||
| int c = in_tensors_.at(0)->shape().at(3); | |||||
| float *line0 = line_buffer_ + new_width_ * c * 2 * task_id; | float *line0 = line_buffer_ + new_width_ * c * 2 * task_id; | ||||
| float *line1 = line0 + new_width_ * c; | float *line1 = line0 + new_width_ * c; | ||||
| ret = ResizeBilinear2(input_data, output_data, input_shape.data(), out_tensors_[0]->shape().data(), y_bottoms_, | |||||
| ret = ResizeBilinear2(input_data, output_data, input_shape.data(), out_tensors_.at(0)->shape().data(), y_bottoms_, | |||||
| y_tops_, x_lefts_, x_rights_, y_bottom_weights_, x_left_weights_, line0, line1, n_h_begin, | y_tops_, x_lefts_, x_rights_, y_bottom_weights_, x_left_weights_, line0, line1, n_h_begin, | ||||
| n_h_end); | n_h_end); | ||||
| @@ -186,8 +186,8 @@ int ResizeCPUKernel::RunImpl(int task_id) { | |||||
| MS_LOG(ERROR) << "The out shape data is nullptr."; | MS_LOG(ERROR) << "The out shape data is nullptr."; | ||||
| return RET_NULL_PTR; | return RET_NULL_PTR; | ||||
| } else { | } else { | ||||
| out_tensors_[0]->shape()[1] = static_cast<int64_t>(data[0]); | |||||
| out_tensors_[0]->shape()[2] = static_cast<int64_t>(data[1]); | |||||
| out_tensors_.at(0)->shape().at(1) = static_cast<int64_t>(data[0]); | |||||
| out_tensors_.at(0)->shape().at(2) = static_cast<int64_t>(data[1]); | |||||
| } | } | ||||
| } | } | ||||
| ret = ResizeNearestNeighbor(input_data, output_data, input_shape.data(), out_tensors_[0]->shape().data(), | ret = ResizeNearestNeighbor(input_data, output_data, input_shape.data(), out_tensors_[0]->shape().data(), | ||||
| @@ -31,8 +31,8 @@ namespace mindspore::kernel { | |||||
| int ReverseCPUKernel::Stride(int index) { | int ReverseCPUKernel::Stride(int index) { | ||||
| int stride = 1; | int stride = 1; | ||||
| for (size_t i = index + 1; i < in_tensors_[0]->shape().size(); ++i) { | |||||
| stride *= in_tensors_[0]->shape()[i]; | |||||
| for (size_t i = index + 1; i < in_tensors_.at(0)->shape().size(); ++i) { | |||||
| stride *= in_tensors_.at(0)->shape().at(i); | |||||
| } | } | ||||
| return stride; | return stride; | ||||
| } | } | ||||
| @@ -43,7 +43,7 @@ int ReverseCPUKernel::ReSize() { | |||||
| thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_); | thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_); | ||||
| auto *param = reinterpret_cast<ReverseParameter *>(op_parameter_); | auto *param = reinterpret_cast<ReverseParameter *>(op_parameter_); | ||||
| auto input_shape = in_tensors_[0]->shape(); | |||||
| auto input_shape = in_tensors_.at(0)->shape(); | |||||
| if (param->num_axis_ > static_cast<int>(input_shape.size())) { | if (param->num_axis_ > static_cast<int>(input_shape.size())) { | ||||
| MS_LOG(ERROR) << "Reverse dims : " << param->num_axis_ | MS_LOG(ERROR) << "Reverse dims : " << param->num_axis_ | ||||
| << "is greater than input shape size :" << input_shape.size(); | << "is greater than input shape size :" << input_shape.size(); | ||||
| @@ -72,7 +72,7 @@ int ReverseCPUKernel::ReSize() { | |||||
| inCount_[i] = input_shape[axis]; | inCount_[i] = input_shape[axis]; | ||||
| outCount_[i] = 1; | outCount_[i] = 1; | ||||
| for (int j = 0; j < axis; j++) { | for (int j = 0; j < axis; j++) { | ||||
| outCount_[i] *= input_shape[j]; | |||||
| outCount_[i] *= input_shape.at(j); | |||||
| } | } | ||||
| } | } | ||||
| @@ -39,14 +39,14 @@ void ReverseSequenceCPUKernel::ConvertAxisToPositive(const std::vector<int> shap | |||||
| int ReverseSequenceCPUKernel::CalcCountPreAxis(const std::vector<int> shape, int axis) { | int ReverseSequenceCPUKernel::CalcCountPreAxis(const std::vector<int> shape, int axis) { | ||||
| int count = 1; | int count = 1; | ||||
| for (int i = 0; i < axis; ++i) { | for (int i = 0; i < axis; ++i) { | ||||
| count *= shape[i]; | |||||
| count *= shape.at(i); | |||||
| } | } | ||||
| return count; | return count; | ||||
| } | } | ||||
| int ReverseSequenceCPUKernel::CalcCountAfterAxis(const std::vector<int> shape, int axis) { | int ReverseSequenceCPUKernel::CalcCountAfterAxis(const std::vector<int> shape, int axis) { | ||||
| int count = 1; | int count = 1; | ||||
| for (size_t i = axis + 1; i < shape.size(); ++i) { | for (size_t i = axis + 1; i < shape.size(); ++i) { | ||||
| count *= shape[i]; | |||||
| count *= shape.at(i); | |||||
| } | } | ||||
| return count; | return count; | ||||
| } | } | ||||
| @@ -50,21 +50,21 @@ int ROIPoolingCPUKernel::ReSize() { | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| param_->ndim_ = ndims; | param_->ndim_ = ndims; | ||||
| param_->input_n_ = in_shape[0]; | |||||
| param_->input_h_ = in_shape[1]; | |||||
| param_->input_w_ = in_shape[2]; | |||||
| param_->input_c_ = in_shape[3]; | |||||
| param_->output_n_ = out_shape[0]; | |||||
| param_->output_h_ = out_shape[1]; | |||||
| param_->output_w_ = out_shape[2]; | |||||
| param_->output_c_ = out_shape[3]; | |||||
| param_->input_n_ = in_shape.at(0); | |||||
| param_->input_h_ = in_shape.at(1); | |||||
| param_->input_w_ = in_shape.at(2); | |||||
| param_->input_c_ = in_shape.at(3); | |||||
| param_->output_n_ = out_shape.at(0); | |||||
| param_->output_h_ = out_shape.at(1); | |||||
| param_->output_w_ = out_shape.at(2); | |||||
| param_->output_c_ = out_shape.at(3); | |||||
| param_->in_strides_[ndims - 1] = 1; | param_->in_strides_[ndims - 1] = 1; | ||||
| param_->out_strides_[ndims - 1] = 1; | param_->out_strides_[ndims - 1] = 1; | ||||
| for (int i = ndims - 2; i >= 0; --i) { | for (int i = ndims - 2; i >= 0; --i) { | ||||
| param_->in_strides_[i] = in_shape[i + 1] * param_->in_strides_[i + 1]; | |||||
| param_->out_strides_[i] = out_shape[i + 1] * param_->out_strides_[i + 1]; | |||||
| param_->in_strides_[i] = in_shape.at(i + 1) * param_->in_strides_[i + 1]; | |||||
| param_->out_strides_[i] = out_shape.at(i + 1) * param_->out_strides_[i + 1]; | |||||
| } | } | ||||
| param_->thread_num_ = MSMIN(param_->op_parameter_.thread_num_, out_shape[0]); | |||||
| param_->thread_num_ = MSMIN(param_->op_parameter_.thread_num_, out_shape.at(0)); | |||||
| max_c_ = reinterpret_cast<float *>(malloc(param_->input_c_ * sizeof(float))); | max_c_ = reinterpret_cast<float *>(malloc(param_->input_c_ * sizeof(float))); | ||||
| if (max_c_ == nullptr) { | if (max_c_ == nullptr) { | ||||
| MS_LOG(ERROR) << "malloc max_c failed."; | MS_LOG(ERROR) << "malloc max_c failed."; | ||||
| @@ -101,17 +101,17 @@ int ScaleCPUKernel::CalculateParameter() { | |||||
| scale_param_->axis_size_ = 1; | scale_param_->axis_size_ = 1; | ||||
| scale_param_->inner_size_ = 1; | scale_param_->inner_size_ = 1; | ||||
| for (int i = 0; i < scale_param_->axis_; i++) { | for (int i = 0; i < scale_param_->axis_; i++) { | ||||
| scale_param_->outer_size_ *= in_shape[i]; | |||||
| scale_param_->outer_size_ *= in_shape.at(i); | |||||
| } | } | ||||
| for (size_t i = 0; i < scale_shape.size(); i++) { | for (size_t i = 0; i < scale_shape.size(); i++) { | ||||
| if (in_shape[i + scale_param_->axis_] != scale_shape[i]) { | |||||
| if (in_shape.at(i + scale_param_->axis_) != scale_shape.at(i)) { | |||||
| MS_LOG(ERROR) << "Scale tensor shape is incorrect."; | MS_LOG(ERROR) << "Scale tensor shape is incorrect."; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| scale_param_->axis_size_ *= in_shape[i + scale_param_->axis_]; | |||||
| scale_param_->axis_size_ *= in_shape.at(i + scale_param_->axis_); | |||||
| } | } | ||||
| for (size_t i = scale_param_->axis_ + scale_shape.size(); i < in_shape.size(); i++) { | for (size_t i = scale_param_->axis_ + scale_shape.size(); i < in_shape.size(); i++) { | ||||
| scale_param_->inner_size_ *= in_shape[i]; | |||||
| scale_param_->inner_size_ *= in_shape.at(i); | |||||
| } | } | ||||
| scale_param_->op_parameter_.thread_num_ = MSMIN(scale_param_->op_parameter_.thread_num_, scale_param_->outer_size_); | scale_param_->op_parameter_.thread_num_ = MSMIN(scale_param_->op_parameter_.thread_num_, scale_param_->outer_size_); | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -177,7 +177,7 @@ int ScaleCPUKernel::Run() { | |||||
| auto in_tensor = in_tensors_.front(); | auto in_tensor = in_tensors_.front(); | ||||
| input_ptr_ = reinterpret_cast<float *>(in_tensor->data_c()); | input_ptr_ = reinterpret_cast<float *>(in_tensor->data_c()); | ||||
| if (!scale_param_->const_scale_) { | if (!scale_param_->const_scale_) { | ||||
| auto scale_tensor = in_tensors_[1]; | |||||
| auto scale_tensor = in_tensors_.at(1); | |||||
| scale_ = reinterpret_cast<float *>(scale_tensor->data_c()); | scale_ = reinterpret_cast<float *>(scale_tensor->data_c()); | ||||
| } | } | ||||
| if (!scale_param_->const_offset_) { | if (!scale_param_->const_offset_) { | ||||
| @@ -73,13 +73,13 @@ int ScatterNDCPUKernel::ReSize() { | |||||
| // check update shape | // check update shape | ||||
| auto update_shape = update->shape(); | auto update_shape = update->shape(); | ||||
| for (size_t i = 0; i < indices_shape.size() - 1; i++) { | for (size_t i = 0; i < indices_shape.size() - 1; i++) { | ||||
| if (update_shape[i] != indices_shape[i]) { | |||||
| if (update_shape.at(i) != indices_shape.at(i)) { | |||||
| MS_LOG(ERROR) << "Value of " << i << " th dimension of indices is not equal to that of update."; | MS_LOG(ERROR) << "Value of " << i << " th dimension of indices is not equal to that of update."; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| } | } | ||||
| for (size_t i = 0; i < shape->ElementsNum() - (indices_shape.size() - 1); i++) { | for (size_t i = 0; i < shape->ElementsNum() - (indices_shape.size() - 1); i++) { | ||||
| if (update_shape[i + indices_shape.size() - 1] != shape_data[i + indices_shape.size() - 1]) { | |||||
| if (update_shape.at(i + indices_shape.size() - 1) != shape_data[i + indices_shape.size() - 1]) { | |||||
| MS_LOG(ERROR) << "Value of " << i + indices_shape.size() - 1 | MS_LOG(ERROR) << "Value of " << i + indices_shape.size() - 1 | ||||
| << " th dimension of indices is not equal to the corresbonding dimension of shape."; | << " th dimension of indices is not equal to the corresbonding dimension of shape."; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| @@ -90,7 +90,7 @@ int ScatterNDCPUKernel::ReSize() { | |||||
| // calculate unit_size_ | // calculate unit_size_ | ||||
| unit_size_ = 1; | unit_size_ = 1; | ||||
| for (int i = indices_shape.size() - 1; i < update_rank; i++) { | for (int i = indices_shape.size() - 1; i < update_rank; i++) { | ||||
| unit_size_ *= update_shape[i]; | |||||
| unit_size_ *= update_shape.at(i); | |||||
| } | } | ||||
| // calculate offsets | // calculate offsets | ||||
| @@ -102,9 +102,9 @@ int ScatterNDCPUKernel::ReSize() { | |||||
| } | } | ||||
| num_unit_ = 1; | num_unit_ = 1; | ||||
| num_unit_ *= update_shape[indices_shape.size() - 2]; | |||||
| num_unit_ *= update_shape.at(indices_shape.size() - 2); | |||||
| for (int i = indices_shape.size() - 3; i >= 0; i--) { | for (int i = indices_shape.size() - 3; i >= 0; i--) { | ||||
| num_unit_ *= update_shape[i]; | |||||
| num_unit_ *= update_shape.at(i); | |||||
| } | } | ||||
| int *indices_ptr = reinterpret_cast<int *>(indices->MutableData()); | int *indices_ptr = reinterpret_cast<int *>(indices->MutableData()); | ||||
| @@ -112,7 +112,7 @@ int ScatterNDCPUKernel::ReSize() { | |||||
| for (int i = 0; i < num_unit_; i++) { | for (int i = 0; i < num_unit_; i++) { | ||||
| int tmp_stride = 0; | int tmp_stride = 0; | ||||
| for (int j = 0; j < indice_unit_rank; j++) { | for (int j = 0; j < indice_unit_rank; j++) { | ||||
| tmp_stride += indices_ptr[i * indice_unit_rank + j] * out_strides_[j] * unit_size_; | |||||
| tmp_stride += indices_ptr[i * indice_unit_rank + j] * out_strides_.at(j) * unit_size_; | |||||
| } | } | ||||
| output_unit_offsets_.push_back(tmp_stride); | output_unit_offsets_.push_back(tmp_stride); | ||||
| } | } | ||||
| @@ -43,7 +43,7 @@ int ShapeCPUKernel::Run() { | |||||
| } | } | ||||
| for (size_t i = 0; i < in_tensor->shape().size(); i++) { | for (size_t i = 0; i < in_tensor->shape().size(); i++) { | ||||
| reinterpret_cast<int *>(out_tensor->MutableData())[i] = in_tensor->shape()[i]; | |||||
| reinterpret_cast<int *>(out_tensor->MutableData())[i] = in_tensor->shape().at(i); | |||||
| } | } | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -68,7 +68,7 @@ int SkipGramCPUKernel::Run() { | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| StringPack sentence = mindspore::lite::ParseTensorBuffer(in_tensors_[0]).at(0); | |||||
| StringPack sentence = mindspore::lite::ParseTensorBuffer(in_tensors_.at(0)).at(0); | |||||
| std::vector<StringPack> words; | std::vector<StringPack> words; | ||||
| ParseSentenceToWords(sentence, &words); | ParseSentenceToWords(sentence, &words); | ||||
| @@ -78,12 +78,12 @@ int SkipGramCPUKernel::Run() { | |||||
| int index = 1; | int index = 1; | ||||
| int size = words.size(); | int size = words.size(); | ||||
| while (index >= 0) { | while (index >= 0) { | ||||
| if (index < skip_gram_parameter_->ngram_size && stack[index] + 1 < size && | |||||
| (index == 0 || stack[index] - stack[index - 1] <= skip_gram_parameter_->max_skip_size)) { | |||||
| stack[index]++; | |||||
| if (index < skip_gram_parameter_->ngram_size && stack.at(index) + 1 < size && | |||||
| (index == 0 || stack.at(index) - stack.at(index - 1) <= skip_gram_parameter_->max_skip_size)) { | |||||
| stack.at(index)++; | |||||
| index++; | index++; | ||||
| if (index < skip_gram_parameter_->ngram_size) { | if (index < skip_gram_parameter_->ngram_size) { | ||||
| stack[index] = stack[index - 1]; | |||||
| stack.at(index) = stack.at(index - 1); | |||||
| } | } | ||||
| } else { | } else { | ||||
| if (index > 0 && ((skip_gram_parameter_->include_all_ngrams && index <= skip_gram_parameter_->ngram_size) || | if (index > 0 && ((skip_gram_parameter_->include_all_ngrams && index <= skip_gram_parameter_->ngram_size) || | ||||
| @@ -92,16 +92,16 @@ int SkipGramCPUKernel::Run() { | |||||
| char blank[1] = {' '}; | char blank[1] = {' '}; | ||||
| StringPack blank_str = {1, blank}; | StringPack blank_str = {1, blank}; | ||||
| for (int i = 0; i < 2 * index - 2; i += 2) { | for (int i = 0; i < 2 * index - 2; i += 2) { | ||||
| gram[i] = words[stack[i / 2]]; | |||||
| gram[i + 1] = blank_str; | |||||
| gram.at(i) = words.at(stack.at(i / 2)); | |||||
| gram.at(i + 1) = blank_str; | |||||
| } | } | ||||
| gram[2 * index - 2] = words[stack[index - 1]]; | |||||
| gram.at(2 * index - 2) = words.at(stack.at(index - 1)); | |||||
| result.push_back(gram); | result.push_back(gram); | ||||
| } | } | ||||
| index--; | index--; | ||||
| } | } | ||||
| } | } | ||||
| auto ret = mindspore::lite::WriteSeperatedStringsToTensor(out_tensors_[0], result); | |||||
| auto ret = mindspore::lite::WriteSeperatedStringsToTensor(out_tensors_.at(0), result); | |||||
| return ret; | return ret; | ||||
| } | } | ||||
| @@ -45,8 +45,8 @@ int SliceCPUKernel::ReSize() { | |||||
| } | } | ||||
| for (int i = 0; i < param_->param_length_; ++i) { | for (int i = 0; i < param_->param_length_; ++i) { | ||||
| param_->shape_[i] = in_tensors_.at(0)->DimensionSize(i); | param_->shape_[i] = in_tensors_.at(0)->DimensionSize(i); | ||||
| param_->begin_[i] = begin[i]; | |||||
| param_->size_[i] = size[i] < 0 ? param_->shape_[i] - param_->begin_[i] : size[i]; | |||||
| param_->begin_[i] = begin.at(i); | |||||
| param_->size_[i] = size.at(i) < 0 ? param_->shape_[i] - param_->begin_[i] : size.at(i); | |||||
| param_->end_[i] = param_->begin_[i] + param_->size_[i]; | param_->end_[i] = param_->begin_[i] + param_->size_[i]; | ||||
| } | } | ||||
| if (param_->param_length_ < DIMENSION_4D) { | if (param_->param_length_ < DIMENSION_4D) { | ||||
| @@ -63,8 +63,8 @@ int SliceCPUKernel::Init() { | |||||
| } | } | ||||
| int SliceCPUKernel::SliceParallelRun(int thread_id) { | int SliceCPUKernel::SliceParallelRun(int thread_id) { | ||||
| const float *input_data = reinterpret_cast<const float *>(in_tensors_[0]->MutableData()); | |||||
| float *output_data = reinterpret_cast<float *>(out_tensors_[0]->MutableData()); | |||||
| const float *input_data = reinterpret_cast<const float *>(in_tensors_.at(0)->MutableData()); | |||||
| float *output_data = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData()); | |||||
| MS_ASSERT(input_data); | MS_ASSERT(input_data); | ||||
| MS_ASSERT(output_data); | MS_ASSERT(output_data); | ||||
| DoSlice(input_data, output_data, param_, thread_id); | DoSlice(input_data, output_data, param_, thread_id); | ||||
| @@ -77,8 +77,8 @@ int SliceCPUKernel::Run() { | |||||
| MS_LOG(ERROR) << "PreProcess fail!ret: " << ret; | MS_LOG(ERROR) << "PreProcess fail!ret: " << ret; | ||||
| return ret; | return ret; | ||||
| } | } | ||||
| const float *input_data = reinterpret_cast<const float *>(in_tensors_[0]->MutableData()); | |||||
| float *output_data = reinterpret_cast<float *>(out_tensors_[0]->MutableData()); | |||||
| const float *input_data = reinterpret_cast<const float *>(in_tensors_.at(0)->MutableData()); | |||||
| float *output_data = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData()); | |||||
| if (param_->size_[1] < op_parameter_->thread_num_) { | if (param_->size_[1] < op_parameter_->thread_num_) { | ||||
| DoSliceNoParallel(input_data, output_data, param_); | DoSliceNoParallel(input_data, output_data, param_); | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -51,11 +51,11 @@ int SoftmaxCPUKernel::ReSize() { | |||||
| auto in_shape = in_tensors_.front()->shape(); | auto in_shape = in_tensors_.front()->shape(); | ||||
| int out_plane_size = 1; | int out_plane_size = 1; | ||||
| for (int i = 0; i < axis; ++i) { | for (int i = 0; i < axis; ++i) { | ||||
| out_plane_size *= in_shape[i]; | |||||
| out_plane_size *= in_shape.at(i); | |||||
| } | } | ||||
| int in_plane_size = 1; | int in_plane_size = 1; | ||||
| for (int i = axis + 1; i < n_dim; i++) { | for (int i = axis + 1; i < n_dim; i++) { | ||||
| in_plane_size *= in_shape[i]; | |||||
| in_plane_size *= in_shape.at(i); | |||||
| } | } | ||||
| in_plane_size_ = in_plane_size; | in_plane_size_ = in_plane_size; | ||||
| out_plane_size_ = out_plane_size; | out_plane_size_ = out_plane_size; | ||||
| @@ -45,12 +45,12 @@ int SpaceToDepthCPUKernel::Init() { | |||||
| } | } | ||||
| int SpaceToDepthCPUKernel::ReSize() { | int SpaceToDepthCPUKernel::ReSize() { | ||||
| if (in_tensors_[0]->format() != schema::Format::Format_NHWC) { | |||||
| if (in_tensors_.at(0)->format() != schema::Format::Format_NHWC) { | |||||
| MS_LOG(ERROR) << "space_to_depth only support NHWC now!"; | MS_LOG(ERROR) << "space_to_depth only support NHWC now!"; | ||||
| return RET_FORMAT_ERR; | return RET_FORMAT_ERR; | ||||
| } | } | ||||
| num_unit_ = static_cast<int>(out_tensors_[0]->shape().at(kNHWC_H)); | |||||
| num_unit_ = static_cast<int>(out_tensors_.at(0)->shape().at(kNHWC_H)); | |||||
| thread_h_num_ = MSMIN(op_parameter_->thread_num_, num_unit_); | thread_h_num_ = MSMIN(op_parameter_->thread_num_, num_unit_); | ||||
| thread_h_stride_ = UP_DIV(num_unit_, thread_h_num_); | thread_h_stride_ = UP_DIV(num_unit_, thread_h_num_); | ||||
| return RET_OK; | return RET_OK; | ||||
| @@ -62,8 +62,8 @@ int SpaceToDepthCPUKernel::SpaceToDepth(int task_id) { | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| int thread_offset = task_id * thread_h_stride_; | int thread_offset = task_id * thread_h_stride_; | ||||
| auto in_shape = in_tensors_[0]->shape(); | |||||
| auto out_shape = out_tensors_[0]->shape(); | |||||
| auto in_shape = in_tensors_.at(0)->shape(); | |||||
| auto out_shape = out_tensors_.at(0)->shape(); | |||||
| SpaceToDepthParameter *param = reinterpret_cast<SpaceToDepthParameter *>(op_parameter_); | SpaceToDepthParameter *param = reinterpret_cast<SpaceToDepthParameter *>(op_parameter_); | ||||
| MS_ASSERT(param); | MS_ASSERT(param); | ||||
| MS_ASSERT(input_ptr_); | MS_ASSERT(input_ptr_); | ||||
| @@ -88,9 +88,9 @@ int SpaceToDepthRun(void *cdata, int task_id) { | |||||
| } | } | ||||
| int SpaceToDepthCPUKernel::Run() { | int SpaceToDepthCPUKernel::Run() { | ||||
| input_ptr_ = reinterpret_cast<float *>(in_tensors_[0]->MutableData()); | |||||
| output_ptr_ = reinterpret_cast<float *>(out_tensors_[0]->MutableData()); | |||||
| if (in_tensors_[0]->format() == schema::Format::Format_NHWC) { | |||||
| input_ptr_ = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData()); | |||||
| output_ptr_ = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData()); | |||||
| if (in_tensors_.at(0)->format() == schema::Format::Format_NHWC) { | |||||
| auto ret = ParallelLaunch(this->context_->thread_pool_, SpaceToDepthRun, this, thread_h_num_); | auto ret = ParallelLaunch(this->context_->thread_pool_, SpaceToDepthRun, this, thread_h_num_); | ||||
| if (ret != RET_OK) { | if (ret != RET_OK) { | ||||
| MS_LOG(ERROR) << "SpaceToDepth error error_code[" << ret << "]"; | MS_LOG(ERROR) << "SpaceToDepth error error_code[" << ret << "]"; | ||||
| @@ -91,7 +91,7 @@ int SparseToDenseRun(void *cdata, int task_id) { | |||||
| int SparseToDenseCPUKernel::GenerateIndices() { | int SparseToDenseCPUKernel::GenerateIndices() { | ||||
| auto input0 = in_tensors_.at(0); | auto input0 = in_tensors_.at(0); | ||||
| index_num = input0->shape()[0]; | |||||
| index_num = input0->shape().at(0); | |||||
| if (index_num >= std::numeric_limits<int>::max() / static_cast<int>(sizeof(int *))) { | if (index_num >= std::numeric_limits<int>::max() / static_cast<int>(sizeof(int *))) { | ||||
| MS_LOG(ERROR) << "Input dim is invalid, dim: " << index_num; | MS_LOG(ERROR) << "Input dim is invalid, dim: " << index_num; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| @@ -120,7 +120,7 @@ int SparseToDenseCPUKernel::GenerateIndices() { | |||||
| break; | break; | ||||
| } | } | ||||
| case 2: { | case 2: { | ||||
| int true_dims = input0->shape()[1]; | |||||
| int true_dims = input0->shape().at(1); | |||||
| MS_ASSERT(true_dims <= DIMENSION_4D); | MS_ASSERT(true_dims <= DIMENSION_4D); | ||||
| for (int i = 0; i < index_num; i++) { | for (int i = 0; i < index_num; i++) { | ||||
| sparse_indices_vect[i] = new int[DIMENSION_4D]; | sparse_indices_vect[i] = new int[DIMENSION_4D]; | ||||
| @@ -77,7 +77,7 @@ int SplitCPUKernel::Run() { | |||||
| auto in_tensor = in_tensors_.front(); | auto in_tensor = in_tensors_.front(); | ||||
| input_ptr_ = reinterpret_cast<float *>(in_tensor->MutableData()); | input_ptr_ = reinterpret_cast<float *>(in_tensor->MutableData()); | ||||
| for (int i = 0; i < param->num_split_; i++) { | for (int i = 0; i < param->num_split_; i++) { | ||||
| output_ptr_[i] = reinterpret_cast<float *>(out_tensors_.at(i)->MutableData()); | |||||
| output_ptr_.at(i) = reinterpret_cast<float *>(out_tensors_.at(i)->MutableData()); | |||||
| } | } | ||||
| auto ret = ParallelLaunch(this->context_->thread_pool_, SplitRun, this, thread_n_num_); | auto ret = ParallelLaunch(this->context_->thread_pool_, SplitRun, this, thread_n_num_); | ||||
| if (ret != RET_OK) { | if (ret != RET_OK) { | ||||
| @@ -29,7 +29,7 @@ using mindspore::schema::PrimitiveType_Stack; | |||||
| namespace mindspore::kernel { | namespace mindspore::kernel { | ||||
| int StackCPUKernel::ReSize() { | int StackCPUKernel::ReSize() { | ||||
| StackParameter *param = reinterpret_cast<StackParameter *>(op_parameter_); | StackParameter *param = reinterpret_cast<StackParameter *>(op_parameter_); | ||||
| auto input0_shape = in_tensors_[0]->shape(); | |||||
| auto input0_shape = in_tensors_.at(0)->shape(); | |||||
| axis_ = param->axis_ < 0 ? param->axis_ + input0_shape.size() + 1 : param->axis_; | axis_ = param->axis_ < 0 ? param->axis_ + input0_shape.size() + 1 : param->axis_; | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -44,31 +44,31 @@ int StackCPUKernel::Init() { | |||||
| int StackCPUKernel::Run() { | int StackCPUKernel::Run() { | ||||
| size_t inputs_num = in_tensors_.size(); | size_t inputs_num = in_tensors_.size(); | ||||
| auto input0 = in_tensors_[0]; | |||||
| auto input0 = in_tensors_.at(0); | |||||
| if (inputs_num == 1) { | if (inputs_num == 1) { | ||||
| auto *output_data = reinterpret_cast<int8_t *>(out_tensors_[0]->MutableData()); | |||||
| auto *output_data = reinterpret_cast<int8_t *>(out_tensors_.at(0)->MutableData()); | |||||
| MS_ASSERT(output_data); | MS_ASSERT(output_data); | ||||
| auto *input_data = reinterpret_cast<const int8_t *>(input0->MutableData()); | auto *input_data = reinterpret_cast<const int8_t *>(input0->MutableData()); | ||||
| MS_ASSERT(input_data); | MS_ASSERT(input_data); | ||||
| DoStackOneInput(input_data, output_data, input0->Size()); | DoStackOneInput(input_data, output_data, input0->Size()); | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| auto input0_shape = in_tensors_[0]->shape(); | |||||
| if (in_tensors_[0]->data_type() == kNumberTypeFloat32 || in_tensors_[0]->data_type() == kNumberTypeFloat) { | |||||
| auto *output_data = reinterpret_cast<float *>(out_tensors_[0]->MutableData()); | |||||
| auto input0_shape = in_tensors_.at(0)->shape(); | |||||
| if (in_tensors_.at(0)->data_type() == kNumberTypeFloat32 || in_tensors_.at(0)->data_type() == kNumberTypeFloat) { | |||||
| auto *output_data = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData()); | |||||
| MS_ASSERT(output_data); | MS_ASSERT(output_data); | ||||
| float *inputs[inputs_num]; | float *inputs[inputs_num]; | ||||
| for (size_t i = 0; i < inputs_num; ++i) { | for (size_t i = 0; i < inputs_num; ++i) { | ||||
| inputs[i] = reinterpret_cast<float *>(in_tensors_[i]->MutableData()); | |||||
| inputs[i] = reinterpret_cast<float *>(in_tensors_.at(i)->MutableData()); | |||||
| MS_ASSERT(inputs[i]); | MS_ASSERT(inputs[i]); | ||||
| } | } | ||||
| DoStack(inputs, inputs_num, input0_shape.data(), input0_shape.size(), axis_, output_data); | DoStack(inputs, inputs_num, input0_shape.data(), input0_shape.size(), axis_, output_data); | ||||
| } else { | } else { | ||||
| auto *output_data = reinterpret_cast<int32_t *>(out_tensors_[0]->MutableData()); | |||||
| auto *output_data = reinterpret_cast<int32_t *>(out_tensors_.at(0)->MutableData()); | |||||
| MS_ASSERT(output_data); | MS_ASSERT(output_data); | ||||
| int32_t *inputs[inputs_num]; | int32_t *inputs[inputs_num]; | ||||
| for (size_t i = 0; i < inputs_num; ++i) { | for (size_t i = 0; i < inputs_num; ++i) { | ||||
| inputs[i] = reinterpret_cast<int32_t *>(in_tensors_[i]->MutableData()); | |||||
| inputs[i] = reinterpret_cast<int32_t *>(in_tensors_.at(i)->MutableData()); | |||||
| MS_ASSERT(inputs[i]); | MS_ASSERT(inputs[i]); | ||||
| } | } | ||||
| DoStackInt32(inputs, inputs_num, input0_shape.data(), input0_shape.size(), axis_, output_data); | DoStackInt32(inputs, inputs_num, input0_shape.data(), input0_shape.size(), axis_, output_data); | ||||
| @@ -42,10 +42,10 @@ void TileCPUKernel::ComputeStrides(const int *shape, int *strides, int ndim) { | |||||
| int TileCPUKernel::ReSize() { | int TileCPUKernel::ReSize() { | ||||
| auto tile_parameter_ = reinterpret_cast<TileParameter *>(op_parameter_); | auto tile_parameter_ = reinterpret_cast<TileParameter *>(op_parameter_); | ||||
| MS_ASSERT(tile_parameter_); | MS_ASSERT(tile_parameter_); | ||||
| tile_parameter_->in_dim_ = in_tensors_[0]->shape().size(); | |||||
| tile_parameter_->in_dim_ = in_tensors_.at(0)->shape().size(); | |||||
| for (int i = 0; i < tile_parameter_->in_dim_; ++i) { | for (int i = 0; i < tile_parameter_->in_dim_; ++i) { | ||||
| tile_parameter_->in_shape_[i] = in_tensors_[0]->shape()[i]; | |||||
| tile_parameter_->out_shape_[i] = out_tensors_[0]->shape()[i]; | |||||
| tile_parameter_->in_shape_[i] = in_tensors_.at(0)->shape().at(i); | |||||
| tile_parameter_->out_shape_[i] = out_tensors_.at(0)->shape().at(i); | |||||
| } | } | ||||
| ComputeStrides(tile_parameter_->in_shape_, tile_parameter_->in_strides_, tile_parameter_->in_dim_); | ComputeStrides(tile_parameter_->in_shape_, tile_parameter_->in_strides_, tile_parameter_->in_dim_); | ||||
| ComputeStrides(tile_parameter_->out_shape_, tile_parameter_->out_strides_, tile_parameter_->in_dim_); | ComputeStrides(tile_parameter_->out_shape_, tile_parameter_->out_strides_, tile_parameter_->in_dim_); | ||||
| @@ -37,10 +37,10 @@ int TopKCPUKernel::Init() { | |||||
| int TopKCPUKernel::ReSize() { | int TopKCPUKernel::ReSize() { | ||||
| lite::Tensor *input = in_tensors_.at(0); | lite::Tensor *input = in_tensors_.at(0); | ||||
| TopkParameter *parameter = reinterpret_cast<TopkParameter *>(op_parameter_); | TopkParameter *parameter = reinterpret_cast<TopkParameter *>(op_parameter_); | ||||
| parameter->last_dim_size_ = input->shape()[input->shape().size() - 1]; | |||||
| parameter->last_dim_size_ = input->shape().at(input->shape().size() - 1); | |||||
| parameter->loop_num_ = 1; | parameter->loop_num_ = 1; | ||||
| for (size_t i = 0; i < input->shape().size() - 1; ++i) { | for (size_t i = 0; i < input->shape().size() - 1; ++i) { | ||||
| parameter->loop_num_ *= input->shape()[i]; | |||||
| parameter->loop_num_ *= input->shape().at(i); | |||||
| } | } | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -36,7 +36,7 @@ int TransposeCPUKernel::Init() { | |||||
| int TransposeCPUKernel::ReSize() { | int TransposeCPUKernel::ReSize() { | ||||
| TransposeParameter *param = reinterpret_cast<TransposeParameter *>(op_parameter_); | TransposeParameter *param = reinterpret_cast<TransposeParameter *>(op_parameter_); | ||||
| num_unit_ = static_cast<int>(in_tensors_[kInputIndex]->shape().at(param->perm_[kNHWC_H])); | |||||
| num_unit_ = static_cast<int>(in_tensors_.at(kInputIndex)->shape().at(param->perm_[kNHWC_H])); | |||||
| thread_h_num_ = MSMIN(thread_num_, num_unit_); | thread_h_num_ = MSMIN(thread_num_, num_unit_); | ||||
| thread_h_stride_ = UP_DIV(num_unit_, thread_h_num_); | thread_h_stride_ = UP_DIV(num_unit_, thread_h_num_); | ||||
| @@ -48,8 +48,8 @@ int TransposeCPUKernel::ReSize() { | |||||
| param->out_strides_[param->num_axes_ - 1] = 1; | param->out_strides_[param->num_axes_ - 1] = 1; | ||||
| param->data_size_ = inTensor->Size(); | param->data_size_ = inTensor->Size(); | ||||
| for (int i = param->num_axes_ - 2; i >= 0; i--) { | for (int i = param->num_axes_ - 2; i >= 0; i--) { | ||||
| param->strides_[i] = in_shape[i + 1] * param->strides_[i + 1]; | |||||
| param->out_strides_[i] = out_shape[i + 1] * param->out_strides_[i + 1]; | |||||
| param->strides_[i] = in_shape.at(i + 1) * param->strides_[i + 1]; | |||||
| param->out_strides_[i] = out_shape.at(i + 1) * param->out_strides_[i + 1]; | |||||
| } | } | ||||
| if (this->in_shape_ != nullptr) { | if (this->in_shape_ != nullptr) { | ||||
| free(this->in_shape_); | free(this->in_shape_); | ||||
| @@ -39,7 +39,7 @@ int UniqueCPUKernel::Run() { | |||||
| Unique(input, in_tensors_.at(0)->ElementsNum(), output0, &output0_len, output1); | Unique(input, in_tensors_.at(0)->ElementsNum(), output0, &output0_len, output1); | ||||
| std::vector<int> out_shape = out_tensors_.at(0)->shape(); | std::vector<int> out_shape = out_tensors_.at(0)->shape(); | ||||
| out_shape[out_shape.size() - 1] = output0_len; | |||||
| out_shape.at(out_shape.size() - 1) = output0_len; | |||||
| out_tensors_.at(0)->set_shape(out_shape); | out_tensors_.at(0)->set_shape(out_shape); | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -44,8 +44,8 @@ int UpsampleCPUKernel::ReSize() { | |||||
| MS_LOG(ERROR) << "Upsample out tensor dim should be 4"; | MS_LOG(ERROR) << "Upsample out tensor dim should be 4"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| new_height_ = out_shape[1]; | |||||
| new_width_ = out_shape[2]; | |||||
| new_height_ = out_shape.at(1); | |||||
| new_width_ = out_shape.at(2); | |||||
| if (param_->method_ == 0) { // bilinear | if (param_->method_ == 0) { // bilinear | ||||
| FreeTmpBuffer(); | FreeTmpBuffer(); | ||||
| @@ -96,12 +96,12 @@ int UpsampleCPUKernel::RunImpl(int task_id) { | |||||
| switch (param_->method_) { | switch (param_->method_) { | ||||
| case static_cast<int>(schema::ResizeMethod_LINEAR): { | case static_cast<int>(schema::ResizeMethod_LINEAR): { | ||||
| int n_h_begin, n_h_end; | int n_h_begin, n_h_end; | ||||
| int n = out_tensor->shape()[0]; | |||||
| int n = out_tensor->shape().at(0); | |||||
| int h = new_height_; | int h = new_height_; | ||||
| int unit = UP_DIV(n * h, context_->thread_num_); | int unit = UP_DIV(n * h, context_->thread_num_); | ||||
| n_h_begin = unit * task_id; | n_h_begin = unit * task_id; | ||||
| n_h_end = std::min(n_h_begin + unit, n * h); | n_h_end = std::min(n_h_begin + unit, n * h); | ||||
| int c = in_tensors_.at(0)->shape()[3]; | |||||
| int c = in_tensors_.at(0)->shape().at(3); | |||||
| float *line0 = line_buffer_ + new_width_ * c * 2 * task_id; | float *line0 = line_buffer_ + new_width_ * c * 2 * task_id; | ||||
| float *line1 = line0 + new_width_ * c; | float *line1 = line0 + new_width_ * c; | ||||
| ret = | ret = | ||||
| @@ -33,13 +33,13 @@ namespace mindspore::kernel { | |||||
| int SgdCPUKernel::ReSize() { return RET_OK; } | int SgdCPUKernel::ReSize() { return RET_OK; } | ||||
| int SgdCPUKernel::Execute(int task_id) { | int SgdCPUKernel::Execute(int task_id) { | ||||
| auto weight = reinterpret_cast<float *>(in_tensors_[0]->MutableData()); | |||||
| auto accumulate = reinterpret_cast<float *>(in_tensors_[3]->MutableData()); | |||||
| float learning_rate = reinterpret_cast<float *>(in_tensors_[2]->MutableData())[0]; | |||||
| auto gradient = reinterpret_cast<float *>(in_tensors_[1]->MutableData()); | |||||
| float moment = reinterpret_cast<float *>(in_tensors_[4]->MutableData())[0]; | |||||
| size_t elem_num = in_tensors_[0]->ElementsNum(); | |||||
| auto stat = reinterpret_cast<float *>(in_tensors_[5]->MutableData()); | |||||
| auto weight = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData()); | |||||
| auto accumulate = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData()); | |||||
| float learning_rate = reinterpret_cast<float *>(in_tensors_.at(2)->MutableData())[0]; | |||||
| auto gradient = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData()); | |||||
| float moment = reinterpret_cast<float *>(in_tensors_.at(4)->MutableData())[0]; | |||||
| size_t elem_num = in_tensors_.at(0)->ElementsNum(); | |||||
| auto stat = reinterpret_cast<float *>(in_tensors_.at(5)->MutableData()); | |||||
| if (stat[0] > 0) { | if (stat[0] > 0) { | ||||
| stat[0] = 0; | stat[0] = 0; | ||||
| @@ -96,8 +96,8 @@ int SgdCPUKernel::Run() { | |||||
| int SgdCPUKernel::Init() { | int SgdCPUKernel::Init() { | ||||
| // Only for test with uninitialized Data | // Only for test with uninitialized Data | ||||
| size_t elem_num = in_tensors_[0]->ElementsNum(); | |||||
| auto accumulate = reinterpret_cast<float *>(in_tensors_[3]->MutableData()); | |||||
| size_t elem_num = in_tensors_.at(0)->ElementsNum(); | |||||
| auto accumulate = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData()); | |||||
| for (size_t i = 0; i < elem_num; i++) { | for (size_t i = 0; i < elem_num; i++) { | ||||
| accumulate[i] = 0.0; | accumulate[i] = 0.0; | ||||
| } | } | ||||
| @@ -101,11 +101,11 @@ int SoftmaxCrossEntropyWithLogitsCPUKernel::Run() { | |||||
| } | } | ||||
| int SoftmaxCrossEntropyWithLogitsCPUKernel::Init() { | int SoftmaxCrossEntropyWithLogitsCPUKernel::Init() { | ||||
| auto dims = in_tensors_[0]->shape(); | |||||
| auto dims = in_tensors_.at(0)->shape(); | |||||
| param_->n_dim_ = 2; | param_->n_dim_ = 2; | ||||
| param_->number_of_classes_ = dims[1]; | |||||
| param_->batch_size_ = dims[0]; | |||||
| for (unsigned int i = 0; i < dims.size(); i++) param_->input_shape_[i] = dims[i]; | |||||
| param_->number_of_classes_ = dims.at(1); | |||||
| param_->batch_size_ = dims.at(0); | |||||
| for (unsigned int i = 0; i < dims.size(); i++) param_->input_shape_[i] = dims.at(i); | |||||
| if (this->in_tensors_.size() != 2) { | if (this->in_tensors_.size() != 2) { | ||||
| MS_LOG(ERROR) << "softmax entropy loss should have two inputs"; | MS_LOG(ERROR) << "softmax entropy loss should have two inputs"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| @@ -117,11 +117,11 @@ int SoftmaxCrossEntropyWithLogitsCPUKernel::Init() { | |||||
| } | } | ||||
| size_t data_size = in_tensors_.at(0)->ElementsNum(); | size_t data_size = in_tensors_.at(0)->ElementsNum(); | ||||
| set_workspace_size((data_size + dims[0]) * sizeof(float)); | |||||
| set_workspace_size((data_size + dims.at(0)) * sizeof(float)); | |||||
| sm_params_.n_dim_ = 2; | sm_params_.n_dim_ = 2; | ||||
| sm_params_.element_size_ = data_size; | sm_params_.element_size_ = data_size; | ||||
| sm_params_.axis_ = 1; | sm_params_.axis_ = 1; | ||||
| for (size_t i = 0; i < dims.size(); i++) sm_params_.input_shape_[i] = dims[i]; | |||||
| for (size_t i = 0; i < dims.size(); i++) sm_params_.input_shape_[i] = dims.at(i); | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -35,8 +35,8 @@ int SoftmaxGradCPUKernel::Init() { | |||||
| int ele_size = 1; | int ele_size = 1; | ||||
| param->n_dim_ = in_dims; | param->n_dim_ = in_dims; | ||||
| for (size_t i = 0; i < in_dims; i++) { | for (size_t i = 0; i < in_dims; i++) { | ||||
| param->input_shape_[i] = in_shape[i]; | |||||
| ele_size *= in_shape[i]; | |||||
| param->input_shape_[i] = in_shape.at(i); | |||||
| ele_size *= in_shape.at(i); | |||||
| } | } | ||||
| param->element_size_ = ele_size; | param->element_size_ = ele_size; | ||||
| @@ -50,9 +50,9 @@ int SoftmaxGradCPUKernel::Init() { | |||||
| inner_size_ = 1; | inner_size_ = 1; | ||||
| for (size_t i = axis + 1; i < in_dims; i++) { | for (size_t i = axis + 1; i < in_dims; i++) { | ||||
| inner_size_ *= in_shape[i]; | |||||
| inner_size_ *= in_shape.at(i); | |||||
| } | } | ||||
| set_workspace_size(inner_size_ * (1 + in_shape[axis]) * sizeof(float)); | |||||
| set_workspace_size(inner_size_ * (1 + in_shape.at(axis)) * sizeof(float)); | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -121,11 +121,11 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Run() { | |||||
| } | } | ||||
| int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Init() { | int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Init() { | ||||
| auto dims = in_tensors_[0]->shape(); | |||||
| auto dims = in_tensors_.at(0)->shape(); | |||||
| param->n_dim_ = 2; | param->n_dim_ = 2; | ||||
| param->number_of_classes_ = dims[1]; | |||||
| param->batch_size_ = dims[0]; | |||||
| for (unsigned int i = 0; i < dims.size(); i++) param->input_shape_[i] = dims[i]; | |||||
| param->number_of_classes_ = dims.at(1); | |||||
| param->batch_size_ = dims.at(0); | |||||
| for (unsigned int i = 0; i < dims.size(); i++) param->input_shape_[i] = dims.at(i); | |||||
| if (2 != this->in_tensors_.size()) { | if (2 != this->in_tensors_.size()) { | ||||
| MS_LOG(ERROR) << "sparse softmax entropy loss should have two inputs"; | MS_LOG(ERROR) << "sparse softmax entropy loss should have two inputs"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| @@ -136,11 +136,11 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Init() { | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| size_t data_size = in_tensors_.at(0)->ElementsNum(); | size_t data_size = in_tensors_.at(0)->ElementsNum(); | ||||
| set_workspace_size((data_size + dims[0]) * sizeof(float)); | |||||
| set_workspace_size((data_size + dims.at(0)) * sizeof(float)); | |||||
| sm_params_.n_dim_ = 2; | sm_params_.n_dim_ = 2; | ||||
| sm_params_.element_size_ = data_size; | sm_params_.element_size_ = data_size; | ||||
| sm_params_.axis_ = 1; | sm_params_.axis_ = 1; | ||||
| for (size_t i = 0; i < dims.size(); i++) sm_params_.input_shape_[i] = dims[i]; | |||||
| for (size_t i = 0; i < dims.size(); i++) sm_params_.input_shape_[i] = dims.at(i); | |||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||
| @@ -96,32 +96,32 @@ int ResizeInt8CPUKernel::Init() { | |||||
| int ResizeInt8CPUKernel::InitResizeQuantArg() { | int ResizeInt8CPUKernel::InitResizeQuantArg() { | ||||
| auto out_shape = out_tensors_.front()->shape(); | auto out_shape = out_tensors_.front()->shape(); | ||||
| resize_quant_arg_.x_axis_index_ = reinterpret_cast<int32_t *>(malloc(out_shape[2] * sizeof(int32_t))); | |||||
| resize_quant_arg_.x_axis_index_ = reinterpret_cast<int32_t *>(malloc(out_shape.at(2) * sizeof(int32_t))); | |||||
| if (resize_quant_arg_.x_axis_index_ == nullptr) { | if (resize_quant_arg_.x_axis_index_ == nullptr) { | ||||
| MS_LOG(ERROR) << "malloc x axis index array failed."; | MS_LOG(ERROR) << "malloc x axis index array failed."; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| resize_quant_arg_.x_axis_lower_ = reinterpret_cast<int32_t *>(malloc(out_shape[2] * sizeof(int32_t))); | |||||
| resize_quant_arg_.x_axis_lower_ = reinterpret_cast<int32_t *>(malloc(out_shape.at(2) * sizeof(int32_t))); | |||||
| if (resize_quant_arg_.x_axis_lower_ == nullptr) { | if (resize_quant_arg_.x_axis_lower_ == nullptr) { | ||||
| MS_LOG(ERROR) << "malloc x_axis_lower_ array failed."; | MS_LOG(ERROR) << "malloc x_axis_lower_ array failed."; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| resize_quant_arg_.x_axis_upper_ = reinterpret_cast<int32_t *>(malloc(out_shape[2] * sizeof(int32_t))); | |||||
| resize_quant_arg_.x_axis_upper_ = reinterpret_cast<int32_t *>(malloc(out_shape.at(2) * sizeof(int32_t))); | |||||
| if (resize_quant_arg_.x_axis_upper_ == nullptr) { | if (resize_quant_arg_.x_axis_upper_ == nullptr) { | ||||
| MS_LOG(ERROR) << "malloc x_axis_upper_ array failed."; | MS_LOG(ERROR) << "malloc x_axis_upper_ array failed."; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| resize_quant_arg_.y_axis_index_ = reinterpret_cast<int32_t *>(malloc(out_shape[1] * sizeof(int32_t))); | |||||
| resize_quant_arg_.y_axis_index_ = reinterpret_cast<int32_t *>(malloc(out_shape.at(1) * sizeof(int32_t))); | |||||
| if (resize_quant_arg_.y_axis_index_ == nullptr) { | if (resize_quant_arg_.y_axis_index_ == nullptr) { | ||||
| MS_LOG(ERROR) << "malloc y_axis_index_ array failed."; | MS_LOG(ERROR) << "malloc y_axis_index_ array failed."; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| resize_quant_arg_.y_axis_lower_ = reinterpret_cast<int32_t *>(malloc(out_shape[1] * sizeof(int32_t))); | |||||
| resize_quant_arg_.y_axis_lower_ = reinterpret_cast<int32_t *>(malloc(out_shape.at(1) * sizeof(int32_t))); | |||||
| if (resize_quant_arg_.y_axis_lower_ == nullptr) { | if (resize_quant_arg_.y_axis_lower_ == nullptr) { | ||||
| MS_LOG(ERROR) << "malloc y_axis_lower_ array failed."; | MS_LOG(ERROR) << "malloc y_axis_lower_ array failed."; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| resize_quant_arg_.y_axis_upper_ = reinterpret_cast<int32_t *>(malloc(out_shape[1] * sizeof(int32_t))); | |||||
| resize_quant_arg_.y_axis_upper_ = reinterpret_cast<int32_t *>(malloc(out_shape.at(1) * sizeof(int32_t))); | |||||
| if (resize_quant_arg_.y_axis_upper_ == nullptr) { | if (resize_quant_arg_.y_axis_upper_ == nullptr) { | ||||
| MS_LOG(ERROR) << "malloc y_axis_upper_ array failed."; | MS_LOG(ERROR) << "malloc y_axis_upper_ array failed."; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| @@ -141,10 +141,10 @@ int ScaleInt8CPUKernel::InitParameter() { | |||||
| second_in_shape_.resize(len); | second_in_shape_.resize(len); | ||||
| size_t i = 0; | size_t i = 0; | ||||
| for (; i < input1_size; ++i) { | for (; i < input1_size; ++i) { | ||||
| second_in_shape_[i] = input1_shape[i]; | |||||
| second_in_shape_.at(i) = input1_shape.at(i); | |||||
| } | } | ||||
| for (; i < len; ++i) { | for (; i < len; ++i) { | ||||
| second_in_shape_[i] = 1; | |||||
| second_in_shape_.at(i) = 1; | |||||
| } | } | ||||
| input1_size = len; | input1_size = len; | ||||
| } | } | ||||
| @@ -164,7 +164,7 @@ int ScaleInt8CPUKernel::InitParameter() { | |||||
| if (i < fill_dim_num) { | if (i < fill_dim_num) { | ||||
| tile_para->in_shape1_[i] = 1; | tile_para->in_shape1_[i] = 1; | ||||
| } else { | } else { | ||||
| tile_para->in_shape1_[i] = second_in_shape_[j++]; | |||||
| tile_para->in_shape1_[i] = second_in_shape_.at(j++); | |||||
| } | } | ||||
| tile_para->out_shape_[i] = out_tensors_.at(0)->DimensionSize(i); | tile_para->out_shape_[i] = out_tensors_.at(0)->DimensionSize(i); | ||||
| } | } | ||||
| @@ -51,9 +51,9 @@ int SliceInt8CPUKernel::Init() { | |||||
| } | } | ||||
| int SliceInt8CPUKernel::DoSlice(int task_id) { | int SliceInt8CPUKernel::DoSlice(int task_id) { | ||||
| const int8_t *input_data = reinterpret_cast<const int8_t *>(in_tensors_[0]->MutableData()); | |||||
| const int8_t *input_data = reinterpret_cast<const int8_t *>(in_tensors_.at(0)->MutableData()); | |||||
| MS_ASSERT(input_data); | MS_ASSERT(input_data); | ||||
| int8_t *output_data = reinterpret_cast<int8_t *>(out_tensors_[0]->MutableData()); | |||||
| int8_t *output_data = reinterpret_cast<int8_t *>(out_tensors_.at(0)->MutableData()); | |||||
| MS_ASSERT(output_data); | MS_ASSERT(output_data); | ||||
| auto ret = SliceInt8(input_data, output_data, param_, task_id); | auto ret = SliceInt8(input_data, output_data, param_, task_id); | ||||
| @@ -73,9 +73,9 @@ int SliceInt8Run(void *cdata, int task_id) { | |||||
| } | } | ||||
| int SliceInt8CPUKernel::Run() { | int SliceInt8CPUKernel::Run() { | ||||
| const int8_t *input_data = reinterpret_cast<const int8_t *>(in_tensors_[0]->MutableData()); | |||||
| const int8_t *input_data = reinterpret_cast<const int8_t *>(in_tensors_.at(0)->MutableData()); | |||||
| MS_ASSERT(input_data); | MS_ASSERT(input_data); | ||||
| int8_t *output_data = reinterpret_cast<int8_t *>(out_tensors_[0]->MutableData()); | |||||
| int8_t *output_data = reinterpret_cast<int8_t *>(out_tensors_.at(0)->MutableData()); | |||||
| MS_ASSERT(output_data); | MS_ASSERT(output_data); | ||||
| mindspore::lite::STATUS ret = RET_ERROR; | mindspore::lite::STATUS ret = RET_ERROR; | ||||
| if (param_->size_[1] < param_->op_parameter_.thread_num_) { | if (param_->size_[1] < param_->op_parameter_.thread_num_) { | ||||
| @@ -36,10 +36,10 @@ int TopKInt8CPUKernel::ReSize() { | |||||
| MS_ASSERT(parameter); | MS_ASSERT(parameter); | ||||
| lite::Tensor *input = in_tensors_.at(0); | lite::Tensor *input = in_tensors_.at(0); | ||||
| MS_ASSERT(input); | MS_ASSERT(input); | ||||
| parameter->last_dim_size_ = input->shape()[input->shape().size() - 1]; | |||||
| parameter->last_dim_size_ = input->shape().at(input->shape().size() - 1); | |||||
| parameter->loop_num_ = 1; | parameter->loop_num_ = 1; | ||||
| for (size_t i = 0; i < input->shape().size() - 1; ++i) { | for (size_t i = 0; i < input->shape().size() - 1; ++i) { | ||||
| parameter->loop_num_ *= input->shape()[i]; | |||||
| parameter->loop_num_ *= input->shape().at(i); | |||||
| } | } | ||||
| return RET_OK; | return RET_OK; | ||||
| } | } | ||||