From 3fae397b702038df9f37eb016448be48c4b8aaa9 Mon Sep 17 00:00:00 2001 From: yeyunpeng Date: Tue, 18 Aug 2020 14:29:49 +0800 Subject: [PATCH] change ops --- mindspore/lite/c_ops/CMakeLists.txt | 3 - mindspore/lite/c_ops/addn.cc | 59 -- mindspore/lite/c_ops/argmax.cc | 75 -- mindspore/lite/c_ops/argmin.cc | 74 -- mindspore/lite/c_ops/arithmetic.cc | 99 -- mindspore/lite/c_ops/arithmetic_self.cc | 34 - mindspore/lite/c_ops/batch_to_space.cc | 114 --- mindspore/lite/c_ops/broadcast_to.cc | 79 -- mindspore/lite/c_ops/cast.cc | 64 -- mindspore/lite/c_ops/concat.cc | 93 -- mindspore/lite/c_ops/crop.cc | 55 -- mindspore/lite/c_ops/depth_to_space.cc | 75 -- mindspore/lite/c_ops/embedding_lookup.cc | 72 -- mindspore/lite/c_ops/expand_dims.cc | 60 -- mindspore/lite/c_ops/fill.cc | 56 -- mindspore/lite/c_ops/flatten.cc | 47 - mindspore/lite/c_ops/gather.cc | 87 -- mindspore/lite/c_ops/gather_nd.cc | 74 -- mindspore/lite/c_ops/lstm.cc | 77 -- mindspore/lite/c_ops/matmul.cc | 77 -- mindspore/lite/c_ops/mean.cc | 94 -- mindspore/lite/c_ops/nchw2nhwc.cc | 41 - mindspore/lite/c_ops/nhwc2nchw.cc | 41 - mindspore/lite/c_ops/one_hot.cc | 79 -- mindspore/lite/c_ops/pad.cc | 76 -- mindspore/lite/c_ops/pooling.cc | 139 --- mindspore/lite/c_ops/power.cc | 62 -- mindspore/lite/c_ops/prior_box.cc | 127 --- mindspore/lite/c_ops/quant_dtype_cast.cc | 49 - mindspore/lite/c_ops/range.cc | 59 -- mindspore/lite/c_ops/rank.cc | 33 - mindspore/lite/c_ops/reduce.cc | 99 -- mindspore/lite/c_ops/reshape.cc | 153 --- mindspore/lite/c_ops/resize.cc | 82 -- mindspore/lite/c_ops/reverse_sequence.cc | 60 -- mindspore/lite/c_ops/roi_pooling.cc | 75 -- mindspore/lite/c_ops/scatter_nd.cc | 61 -- mindspore/lite/c_ops/shape.cc | 52 - mindspore/lite/c_ops/slice.cc | 90 -- mindspore/lite/c_ops/softmax.cc | 43 - mindspore/lite/c_ops/space_to_batch.cc | 110 --- mindspore/lite/c_ops/space_to_depth.cc | 73 -- mindspore/lite/c_ops/split.cc | 83 -- mindspore/lite/c_ops/squeeze.cc | 84 -- mindspore/lite/c_ops/stack.cc | 93 -- mindspore/lite/c_ops/strided_slice.cc | 221 ----- mindspore/lite/c_ops/tile.cc | 55 -- mindspore/lite/c_ops/topk.cc | 62 -- mindspore/lite/c_ops/transpose.cc | 69 -- mindspore/lite/c_ops/unique.cc | 52 - mindspore/lite/c_ops/unsqueeze.cc | 80 -- mindspore/lite/c_ops/unstack.cc | 59 -- mindspore/lite/c_ops/where.cc | 93 -- mindspore/lite/include/model.h | 7 +- mindspore/lite/src/CMakeLists.txt | 80 +- mindspore/lite/src/executor.cc | 8 +- mindspore/lite/src/ir/primitive_value.cc | 19 - mindspore/lite/src/ir/primitive_value.h | 47 - mindspore/lite/src/kernel_registry.cc | 4 +- mindspore/lite/src/kernel_registry.h | 2 +- mindspore/lite/src/lite_kernel.h | 15 +- mindspore/lite/src/model.cc | 276 ++++-- mindspore/lite/src/ops/CMakeLists.txt | 4 +- mindspore/lite/{c_ops => src/ops}/abs.h | 15 +- .../lite/{c_ops => src/ops}/activation.cc | 4 +- .../lite/{c_ops => src/ops}/activation.h | 10 +- .../{c_ops => src/ops}/activation_grad.cc | 4 +- .../lite/{c_ops => src/ops}/activation_grad.h | 12 +- mindspore/lite/{c_ops => src/ops}/add.cc | 4 +- mindspore/lite/{c_ops => src/ops}/add.h | 11 +- mindspore/lite/src/ops/addn.cc | 24 +- mindspore/lite/{c_ops => src/ops}/addn.h | 10 +- mindspore/lite/src/ops/argmax.cc | 42 +- mindspore/lite/{c_ops => src/ops}/argmax.h | 11 +- mindspore/lite/src/ops/argmin.cc | 40 +- mindspore/lite/{c_ops => src/ops}/argmin.h | 11 +- mindspore/lite/src/ops/arithmetic.cc | 11 +- .../lite/{c_ops => src/ops}/arithmetic.h | 11 +- mindspore/lite/src/ops/arithmetic_self.cc | 13 +- .../lite/{c_ops => src/ops}/arithmetic_self.h | 11 +- .../lite/{c_ops => src/ops}/batch_norm.cc | 4 +- .../lite/{c_ops => src/ops}/batch_norm.h | 11 +- mindspore/lite/src/ops/batch_to_space.cc | 70 +- .../lite/{c_ops => src/ops}/batch_to_space.h | 11 +- mindspore/lite/{c_ops => src/ops}/bias_add.cc | 4 +- mindspore/lite/{c_ops => src/ops}/bias_add.h | 11 +- .../lite/{c_ops => src/ops}/bias_grad.cc | 4 +- mindspore/lite/{c_ops => src/ops}/bias_grad.h | 11 +- .../lite/{c_ops => src/ops}/bn_grad_input.cc | 4 +- .../lite/{c_ops => src/ops}/bn_grad_input.h | 11 +- mindspore/lite/src/ops/broadcast_to.cc | 39 +- .../lite/{c_ops => src/ops}/broadcast_to.h | 11 +- .../lite/{c_ops => src/ops}/caffe_p_relu.cc | 4 +- .../lite/{c_ops => src/ops}/caffe_p_relu.h | 13 +- mindspore/lite/src/ops/cast.cc | 27 +- mindspore/lite/{c_ops => src/ops}/cast.h | 11 +- mindspore/lite/src/ops/ceil.h | 40 + mindspore/lite/{c_ops => src/ops}/clip.cc | 4 +- mindspore/lite/{c_ops => src/ops}/clip.h | 11 +- mindspore/lite/src/ops/concat.cc | 25 +- mindspore/lite/{c_ops => src/ops}/concat.h | 11 +- ...stant_of_shape.cc => constant_of_shape.cc} | 13 +- mindspore/lite/src/ops/constant_of_shape.h | 41 + mindspore/lite/src/ops/conv.cc | 90 -- mindspore/lite/{c_ops => src/ops}/conv2d.cc | 47 +- mindspore/lite/{c_ops => src/ops}/conv2d.h | 10 +- .../{c_ops => src/ops}/conv2d_grad_filter.cc | 4 +- .../{c_ops => src/ops}/conv2d_grad_filter.h | 11 +- .../{c_ops => src/ops}/conv2d_grad_input.cc | 4 +- .../{c_ops => src/ops}/conv2d_grad_input.h | 11 +- .../lite/src/ops/convolution_depthwise.cc | 85 -- mindspore/lite/{c_ops => src/ops}/cos.h | 14 +- mindspore/lite/src/ops/crop.cc | 31 +- mindspore/lite/{c_ops => src/ops}/crop.h | 11 +- mindspore/lite/{c_ops => src/ops}/deconv2d.cc | 4 +- mindspore/lite/{c_ops => src/ops}/deconv2d.h | 11 +- mindspore/lite/src/ops/deconvolution.cc | 72 -- .../lite/src/ops/deconvolution_depthwise.cc | 74 -- .../{c_ops => src/ops}/dedepthwise_conv2d.cc | 4 +- .../{c_ops => src/ops}/dedepthwise_conv2d.h | 11 +- mindspore/lite/src/ops/depth_to_space.cc | 54 +- .../lite/{c_ops => src/ops}/depth_to_space.h | 11 +- .../{c_ops => src/ops}/depthwise_conv2d.cc | 4 +- .../{c_ops => src/ops}/depthwise_conv2d.h | 11 +- .../ops}/detection_post_process.cc | 4 +- .../ops}/detection_post_process.h | 11 +- mindspore/lite/{c_ops => src/ops}/div.cc | 4 +- mindspore/lite/{c_ops => src/ops}/div.h | 12 +- mindspore/lite/{c_ops => src/ops}/dropout.cc | 4 +- mindspore/lite/{c_ops => src/ops}/dropout.h | 11 +- mindspore/lite/{c_ops => src/ops}/eltwise.cc | 4 +- mindspore/lite/{c_ops => src/ops}/eltwise.h | 11 +- mindspore/lite/{c_ops => src/ops}/elu.cc | 4 +- mindspore/lite/{c_ops => src/ops}/elu.h | 11 +- mindspore/lite/src/ops/embedding_lookup.cc | 29 +- .../{c_ops => src/ops}/embedding_lookup.h | 11 +- .../ops}/embedding_lookup_sparse.cc | 4 +- .../ops}/embedding_lookup_sparse.h | 11 +- mindspore/lite/{c_ops => src/ops}/equal.h | 10 +- mindspore/lite/{c_ops => src/ops}/exp.h | 14 +- mindspore/lite/src/ops/expand_dims.cc | 24 +- .../lite/{c_ops => src/ops}/expand_dims.h | 11 +- .../ops}/fake_quant_with_min_max_vars.cc | 4 +- .../ops}/fake_quant_with_min_max_vars.h | 11 +- mindspore/lite/src/ops/fill.cc | 28 +- mindspore/lite/{c_ops => src/ops}/fill.h | 11 +- mindspore/lite/src/ops/flatten.cc | 17 +- mindspore/lite/{c_ops => src/ops}/flatten.h | 15 +- mindspore/lite/{c_ops => src/ops}/floor.h | 14 +- mindspore/lite/{c_ops => src/ops}/floor_div.h | 14 +- mindspore/lite/{c_ops => src/ops}/floor_mod.h | 14 +- .../{c_ops => src/ops}/full_connection.cc | 11 +- .../lite/{c_ops => src/ops}/full_connection.h | 13 +- mindspore/lite/src/ops/fullconnection.cc | 81 -- .../{c_ops => src/ops}/fused_batchnorm.cc | 4 +- .../lite/{c_ops => src/ops}/fused_batchnorm.h | 11 +- mindspore/lite/src/ops/gather.cc | 29 +- mindspore/lite/{c_ops => src/ops}/gather.h | 11 +- mindspore/lite/src/ops/gather_nd.cc | 29 +- mindspore/lite/{c_ops => src/ops}/gather_nd.h | 11 +- mindspore/lite/{c_ops => src/ops}/greater.h | 14 +- .../lite/{c_ops => src/ops}/greater_equal.h | 14 +- mindspore/lite/{c_ops => src/ops}/l2_norm.cc | 4 +- mindspore/lite/{c_ops => src/ops}/l2_norm.h | 11 +- .../lite/{c_ops => src/ops}/leaky_relu.cc | 4 +- .../lite/{c_ops => src/ops}/leaky_relu.h | 11 +- mindspore/lite/{c_ops => src/ops}/less.h | 14 +- .../lite/{c_ops => src/ops}/less_equal.h | 14 +- .../ops}/local_response_normalization.cc | 4 +- .../ops}/local_response_normalization.h | 11 +- mindspore/lite/{c_ops => src/ops}/log.h | 14 +- .../lite/{c_ops => src/ops}/logical_and.h | 14 +- .../lite/{c_ops => src/ops}/logical_not.h | 14 +- .../lite/{c_ops => src/ops}/logical_or.h | 14 +- mindspore/lite/{c_ops => src/ops}/lrn.cc | 4 +- mindspore/lite/{c_ops => src/ops}/lrn.h | 11 +- mindspore/lite/src/ops/lstm.cc | 30 +- mindspore/lite/{c_ops => src/ops}/lstm.h | 11 +- mindspore/lite/src/ops/matmul.cc | 32 +- mindspore/lite/{c_ops => src/ops}/matmul.h | 11 +- .../lite/{c_ops => src/ops}/matrix_diag.cc | 4 +- .../lite/{c_ops => src/ops}/matrix_diag.h | 11 +- mindspore/lite/{c_ops => src/ops}/maximum.h | 14 +- mindspore/lite/src/ops/mean.cc | 31 +- mindspore/lite/{c_ops => src/ops}/mean.h | 11 +- mindspore/lite/{c_ops => src/ops}/minimum.h | 14 +- mindspore/lite/{c_ops => src/ops}/mul.cc | 4 +- mindspore/lite/{c_ops => src/ops}/mul.h | 13 +- mindspore/lite/src/ops/nchw2nhwc.cc | 16 +- mindspore/lite/{c_ops => src/ops}/nchw2nhwc.h | 15 +- mindspore/lite/src/ops/nhwc2nchw.cc | 15 +- mindspore/lite/{c_ops => src/ops}/nhwc2nchw.h | 15 +- mindspore/lite/{c_ops => src/ops}/not_equal.h | 14 +- mindspore/lite/src/ops/one_hot.cc | 28 +- mindspore/lite/{c_ops => src/ops}/one_hot.h | 11 +- mindspore/lite/src/ops/ops.cc | 170 ---- mindspore/lite/src/ops/ops.h | 819 ---------------- mindspore/lite/src/ops/pad.cc | 35 +- mindspore/lite/{c_ops => src/ops}/pad.h | 11 +- mindspore/lite/{c_ops => src/ops}/permute.cc | 4 +- mindspore/lite/{c_ops => src/ops}/permute.h | 11 +- mindspore/lite/src/ops/pooling.cc | 86 +- mindspore/lite/{c_ops => src/ops}/pooling.h | 19 +- .../lite/{c_ops => src/ops}/pooling_grad.cc | 4 +- .../lite/{c_ops => src/ops}/pooling_grad.h | 11 +- mindspore/lite/src/ops/power.cc | 34 +- mindspore/lite/{c_ops => src/ops}/power.h | 11 +- .../lite/{c_ops => src/ops}/power_grad.cc | 4 +- .../lite/{c_ops => src/ops}/power_grad.h | 11 +- mindspore/lite/{c_ops => src/ops}/prelu.cc | 4 +- mindspore/lite/{c_ops => src/ops}/prelu.h | 15 +- mindspore/lite/src/ops/primitive_c.cc | 273 ++++++ .../lite/{c_ops => src/ops}/primitive_c.h | 49 +- mindspore/lite/src/ops/prior_box.cc | 93 +- mindspore/lite/{c_ops => src/ops}/prior_box.h | 11 +- mindspore/lite/src/ops/quant_dtype_cast.cc | 27 +- .../{c_ops => src/ops}/quant_dtype_cast.h | 11 +- mindspore/lite/src/ops/range.cc | 37 +- mindspore/lite/{c_ops => src/ops}/range.h | 11 +- mindspore/lite/src/ops/rank.cc | 15 +- mindspore/lite/{c_ops => src/ops}/rank.h | 15 +- mindspore/lite/src/ops/reduce.cc | 36 +- mindspore/lite/{c_ops => src/ops}/reduce.h | 11 +- mindspore/lite/src/ops/reshape.cc | 36 +- mindspore/lite/{c_ops => src/ops}/reshape.h | 11 +- mindspore/lite/src/ops/resize.cc | 58 +- mindspore/lite/{c_ops => src/ops}/resize.h | 11 +- mindspore/lite/{c_ops => src/ops}/reverse.cc | 4 +- mindspore/lite/{c_ops => src/ops}/reverse.h | 11 +- mindspore/lite/src/ops/reverse_sequence.cc | 44 +- .../{c_ops => src/ops}/reverse_sequence.h | 11 +- mindspore/lite/src/ops/roi_pooling.cc | 36 +- .../lite/{c_ops => src/ops}/roi_pooling.h | 11 +- mindspore/lite/src/ops/round.h | 40 + mindspore/lite/{c_ops => src/ops}/rsqrt.h | 14 +- mindspore/lite/{c_ops => src/ops}/scale.cc | 4 +- mindspore/lite/{c_ops => src/ops}/scale.h | 11 +- mindspore/lite/src/ops/scatter_nd.cc | 16 +- .../lite/{c_ops => src/ops}/scatter_nd.h | 15 +- mindspore/lite/src/ops/shape.cc | 15 +- mindspore/lite/{c_ops => src/ops}/shape.h | 15 +- mindspore/lite/{c_ops => src/ops}/sin.h | 14 +- mindspore/lite/src/ops/slice.cc | 40 +- mindspore/lite/{c_ops => src/ops}/slice.h | 12 +- mindspore/lite/src/ops/softmax.cc | 24 +- mindspore/lite/{c_ops => src/ops}/softmax.h | 11 +- .../ops}/softmax_cross_entropy.cc | 4 +- .../ops}/softmax_cross_entropy.h | 11 +- mindspore/lite/src/ops/space_to_batch.cc | 78 +- .../lite/{c_ops => src/ops}/space_to_batch.h | 11 +- .../{c_ops => src/ops}/space_to_batch_nd.cc | 4 +- .../{c_ops => src/ops}/space_to_batch_nd.h | 11 +- mindspore/lite/src/ops/space_to_depth.cc | 56 +- .../lite/{c_ops => src/ops}/space_to_depth.h | 11 +- .../{c_ops => src/ops}/sparse_to_dense.cc | 4 +- .../lite/{c_ops => src/ops}/sparse_to_dense.h | 11 +- mindspore/lite/src/ops/split.cc | 37 +- mindspore/lite/{c_ops => src/ops}/split.h | 11 +- mindspore/lite/{c_ops => src/ops}/sqrt.h | 14 +- mindspore/lite/{c_ops => src/ops}/square.h | 14 +- .../{c_ops => src/ops}/squared_difference.h | 14 +- mindspore/lite/src/ops/squeeze.cc | 30 +- mindspore/lite/{c_ops => src/ops}/squeeze.h | 11 +- mindspore/lite/src/ops/stack.cc | 38 +- mindspore/lite/{c_ops => src/ops}/stack.h | 11 +- mindspore/lite/src/ops/strided_slice.cc | 101 +- .../lite/{c_ops => src/ops}/strided_slice.h | 11 +- mindspore/lite/{c_ops => src/ops}/sub.cc | 4 +- mindspore/lite/{c_ops => src/ops}/sub.h | 13 +- mindspore/lite/src/ops/tile.cc | 29 +- mindspore/lite/{c_ops => src/ops}/tile.h | 11 +- mindspore/lite/src/ops/topk.cc | 31 +- mindspore/lite/{c_ops => src/ops}/topk.h | 11 +- mindspore/lite/src/ops/transpose.cc | 31 +- mindspore/lite/{c_ops => src/ops}/transpose.h | 11 +- mindspore/lite/src/ops/unique.cc | 26 +- mindspore/lite/{c_ops => src/ops}/unique.h | 11 +- mindspore/lite/src/ops/unsqueeze.cc | 25 +- mindspore/lite/{c_ops => src/ops}/unsqueeze.h | 11 +- mindspore/lite/src/ops/unstack.cc | 30 +- mindspore/lite/{c_ops => src/ops}/unstack.h | 11 +- mindspore/lite/{c_ops => src/ops}/upsample.cc | 12 +- mindspore/lite/{c_ops => src/ops}/upsample.h | 15 +- mindspore/lite/src/ops/where.cc | 31 +- mindspore/lite/{c_ops => src/ops}/where.h | 11 +- .../lite/{c_ops => src/ops}/zeros_like.cc | 12 +- .../lite/{c_ops => src/ops}/zeros_like.h | 15 +- mindspore/lite/src/ops/zeroslike.cc | 40 - mindspore/lite/src/param_value_lite.h | 2 +- mindspore/lite/src/populate_parameter.cc | 889 ++++++++++-------- mindspore/lite/src/populate_parameter.h | 9 +- .../kernel/arm/base/arg_min_max_base.cc | 6 +- .../kernel/arm/base/arg_min_max_base.h | 2 +- .../kernel/arm/base/batch_to_space_base.cc | 6 +- .../kernel/arm/base/batch_to_space_base.h | 2 +- .../kernel/arm/base/caffeprelu_base.cc | 3 +- .../runtime/kernel/arm/base/caffeprelu_base.h | 2 +- .../runtime/kernel/arm/base/concat_base.cc | 10 +- .../src/runtime/kernel/arm/base/concat_base.h | 2 +- .../kernel/arm/base/convolution_base.h | 2 +- .../src/runtime/kernel/arm/base/crop_base.cc | 9 +- .../src/runtime/kernel/arm/base/crop_base.h | 2 +- .../kernel/arm/base/depth_to_space_base.cc | 6 +- .../kernel/arm/base/depth_to_space_base.h | 2 +- .../kernel/arm/base/fullconnection_base.cc | 4 +- .../kernel/arm/base/fullconnection_base.h | 2 +- .../runtime/kernel/arm/base/matmul_base.cc | 2 +- .../src/runtime/kernel/arm/base/matmul_base.h | 2 +- .../lite/src/runtime/kernel/arm/base/pad.cc | 6 +- .../runtime/kernel/arm/base/pooling_base.cc | 10 +- .../runtime/kernel/arm/base/pooling_base.h | 2 +- .../src/runtime/kernel/arm/base/prelu_base.cc | 3 +- .../src/runtime/kernel/arm/base/prelu_base.h | 2 +- .../src/runtime/kernel/arm/base/prior_box.cc | 3 +- .../src/runtime/kernel/arm/base/prior_box.h | 2 +- .../kernel/arm/base/quant_dtype_cast.cc | 2 +- .../kernel/arm/base/quant_dtype_cast.h | 2 +- .../runtime/kernel/arm/base/reduce_base.cc | 9 +- .../src/runtime/kernel/arm/base/reduce_base.h | 2 +- .../runtime/kernel/arm/base/reshape_base.cc | 9 +- .../runtime/kernel/arm/base/reshape_base.h | 2 +- .../runtime/kernel/arm/base/resize_base.cc | 6 +- .../src/runtime/kernel/arm/base/resize_base.h | 2 +- .../src/runtime/kernel/arm/base/slice_base.cc | 6 +- .../src/runtime/kernel/arm/base/slice_base.h | 2 +- .../runtime/kernel/arm/base/softmax_base.cc | 6 +- .../runtime/kernel/arm/base/softmax_base.h | 2 +- .../src/runtime/kernel/arm/base/split_base.cc | 9 +- .../src/runtime/kernel/arm/base/split_base.h | 2 +- .../runtime/kernel/arm/base/squeeze_base.cc | 3 +- .../runtime/kernel/arm/base/squeeze_base.h | 2 +- .../runtime/kernel/arm/base/strided_slice.cc | 3 +- .../runtime/kernel/arm/base/strided_slice.h | 2 +- .../kernel/arm/fp16/arithmetic_fp16.cc | 3 +- .../runtime/kernel/arm/fp16/arithmetic_fp16.h | 2 +- .../src/runtime/kernel/arm/fp16/cast_fp16.cc | 3 +- .../src/runtime/kernel/arm/fp16/cast_fp16.h | 2 +- .../runtime/kernel/arm/fp16/concat_fp16.cc | 3 +- .../src/runtime/kernel/arm/fp16/concat_fp16.h | 2 +- .../kernel/arm/fp16/convolution_1x1_fp16.h | 2 +- .../kernel/arm/fp16/convolution_3x3_fp16.h | 2 +- .../kernel/arm/fp16/convolution_base_fp16.h | 2 +- .../arm/fp16/convolution_depthwise_fp16.cc | 3 +- .../arm/fp16/convolution_depthwise_fp16.h | 2 +- .../kernel/arm/fp16/convolution_fp16.cc | 3 +- .../kernel/arm/fp16/convolution_fp16.h | 2 +- .../kernel/arm/fp16/convolution_sw_fp16.h | 2 +- .../arm/fp16/convolution_winograd_fp16.h | 2 +- .../arm/fp16/deconvolution_depthwise_fp16.cc | 3 +- .../arm/fp16/deconvolution_depthwise_fp16.h | 2 +- .../kernel/arm/fp16/deconvolution_fp16.cc | 3 +- .../kernel/arm/fp16/deconvolution_fp16.h | 2 +- .../runtime/kernel/arm/fp16/pooling_fp16.cc | 3 +- .../runtime/kernel/arm/fp16/pooling_fp16.h | 2 +- .../runtime/kernel/arm/fp16/reduce_fp16.cc | 6 +- .../src/runtime/kernel/arm/fp16/reduce_fp16.h | 2 +- .../src/runtime/kernel/arm/fp16/split_fp16.cc | 3 +- .../src/runtime/kernel/arm/fp16/split_fp16.h | 2 +- .../runtime/kernel/arm/fp16/transpose_fp16.cc | 3 +- .../runtime/kernel/arm/fp16/transpose_fp16.h | 2 +- .../src/runtime/kernel/arm/fp32/activation.cc | 4 +- .../src/runtime/kernel/arm/fp32/activation.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/addn.cc | 3 +- .../lite/src/runtime/kernel/arm/fp32/addn.h | 2 +- .../src/runtime/kernel/arm/fp32/argminmax.h | 2 +- .../src/runtime/kernel/arm/fp32/arithmetic.cc | 3 +- .../src/runtime/kernel/arm/fp32/arithmetic.h | 2 +- .../kernel/arm/fp32/arithmetic_self.cc | 2 +- .../runtime/kernel/arm/fp32/arithmetic_self.h | 2 +- .../runtime/kernel/arm/fp32/batch_to_space.h | 2 +- .../src/runtime/kernel/arm/fp32/batchnorm.cc | 3 +- .../src/runtime/kernel/arm/fp32/batchnorm.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/bias.cc | 2 +- .../lite/src/runtime/kernel/arm/fp32/bias.h | 2 +- .../runtime/kernel/arm/fp32/broadcast_to.cc | 3 +- .../runtime/kernel/arm/fp32/broadcast_to.h | 2 +- .../src/runtime/kernel/arm/fp32/caffeprelu.cc | 3 +- .../src/runtime/kernel/arm/fp32/caffeprelu.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/cast.cc | 5 +- .../lite/src/runtime/kernel/arm/fp32/cast.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/concat.h | 2 +- .../kernel/arm/fp32/constant_of_shape.cc | 2 +- .../kernel/arm/fp32/constant_of_shape.h | 2 +- .../runtime/kernel/arm/fp32/convolution.cc | 3 +- .../src/runtime/kernel/arm/fp32/convolution.h | 2 +- .../runtime/kernel/arm/fp32/convolution_1x1.h | 2 +- .../kernel/arm/fp32/convolution_3x3.cc | 2 +- .../runtime/kernel/arm/fp32/convolution_3x3.h | 2 +- .../kernel/arm/fp32/convolution_depthwise.cc | 3 +- .../kernel/arm/fp32/convolution_depthwise.h | 2 +- .../arm/fp32/convolution_depthwise_3x3.cc | 2 +- .../arm/fp32/convolution_depthwise_3x3.h | 2 +- .../kernel/arm/fp32/convolution_slidewindow.h | 2 +- .../kernel/arm/fp32/convolution_winograd.cc | 18 +- .../kernel/arm/fp32/convolution_winograd.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/crop.h | 2 +- .../runtime/kernel/arm/fp32/deconvolution.cc | 3 +- .../runtime/kernel/arm/fp32/deconvolution.h | 2 +- .../arm/fp32/deconvolution_depthwise.cc | 3 +- .../kernel/arm/fp32/deconvolution_depthwise.h | 2 +- .../runtime/kernel/arm/fp32/depth_to_space.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/elu.cc | 2 +- .../lite/src/runtime/kernel/arm/fp32/elu.h | 2 +- .../kernel/arm/fp32/embedding_lookup.cc | 3 +- .../kernel/arm/fp32/embedding_lookup.h | 2 +- .../src/runtime/kernel/arm/fp32/expandDims.cc | 3 +- .../src/runtime/kernel/arm/fp32/expandDims.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/fill.cc | 3 +- .../lite/src/runtime/kernel/arm/fp32/fill.h | 2 +- .../src/runtime/kernel/arm/fp32/flatten.cc | 3 +- .../src/runtime/kernel/arm/fp32/flatten.h | 2 +- .../runtime/kernel/arm/fp32/fullconnection.h | 2 +- .../kernel/arm/fp32/fused_batchnorm.cc | 3 +- .../runtime/kernel/arm/fp32/fused_batchnorm.h | 2 +- .../src/runtime/kernel/arm/fp32/gather.cc | 13 +- .../lite/src/runtime/kernel/arm/fp32/gather.h | 2 +- .../src/runtime/kernel/arm/fp32/gatherNd.cc | 3 +- .../src/runtime/kernel/arm/fp32/gatherNd.h | 2 +- .../kernel/arm/fp32/local_response_norm.cc | 2 +- .../kernel/arm/fp32/local_response_norm.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/lstm.cc | 6 +- .../lite/src/runtime/kernel/arm/fp32/lstm.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/matmul.h | 2 +- .../src/runtime/kernel/arm/fp32/nchw2nhwc.cc | 3 +- .../src/runtime/kernel/arm/fp32/nchw2nhwc.h | 2 +- .../src/runtime/kernel/arm/fp32/nhwc2nchw.cc | 3 +- .../src/runtime/kernel/arm/fp32/nhwc2nchw.h | 2 +- .../src/runtime/kernel/arm/fp32/one_hot.cc | 3 +- .../src/runtime/kernel/arm/fp32/one_hot.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/pad.h | 2 +- .../src/runtime/kernel/arm/fp32/pooling.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/power.cc | 3 +- .../lite/src/runtime/kernel/arm/fp32/power.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/prelu.cc | 3 +- .../lite/src/runtime/kernel/arm/fp32/prelu.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/range.cc | 3 +- .../lite/src/runtime/kernel/arm/fp32/range.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/rank.cc | 3 +- .../lite/src/runtime/kernel/arm/fp32/rank.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/reduce.h | 2 +- .../src/runtime/kernel/arm/fp32/reshape.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/resize.h | 2 +- .../src/runtime/kernel/arm/fp32/reverse.cc | 3 +- .../src/runtime/kernel/arm/fp32/reverse.h | 2 +- .../kernel/arm/fp32/reverse_sequence.cc | 3 +- .../kernel/arm/fp32/reverse_sequence.h | 2 +- .../runtime/kernel/arm/fp32/roi_pooling.cc | 3 +- .../src/runtime/kernel/arm/fp32/roi_pooling.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/scale.cc | 3 +- .../lite/src/runtime/kernel/arm/fp32/scale.h | 2 +- .../src/runtime/kernel/arm/fp32/scatter_nd.cc | 3 +- .../src/runtime/kernel/arm/fp32/scatter_nd.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/shape.cc | 3 +- .../lite/src/runtime/kernel/arm/fp32/shape.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/slice.h | 2 +- .../src/runtime/kernel/arm/fp32/softmax.h | 2 +- .../runtime/kernel/arm/fp32/space_to_batch.cc | 3 +- .../runtime/kernel/arm/fp32/space_to_batch.h | 2 +- .../runtime/kernel/arm/fp32/space_to_depth.cc | 3 +- .../runtime/kernel/arm/fp32/space_to_depth.h | 2 +- .../kernel/arm/fp32/sparse_to_dense.cc | 3 +- .../runtime/kernel/arm/fp32/sparse_to_dense.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/split.h | 2 +- .../src/runtime/kernel/arm/fp32/squeeze.cc | 7 +- .../src/runtime/kernel/arm/fp32/squeeze.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/stack.cc | 3 +- .../lite/src/runtime/kernel/arm/fp32/stack.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/tile.cc | 2 +- .../lite/src/runtime/kernel/arm/fp32/tile.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/topk.cc | 2 +- .../lite/src/runtime/kernel/arm/fp32/topk.h | 2 +- .../src/runtime/kernel/arm/fp32/transpose.cc | 3 +- .../src/runtime/kernel/arm/fp32/transpose.h | 4 +- .../src/runtime/kernel/arm/fp32/unique.cc | 2 +- .../lite/src/runtime/kernel/arm/fp32/unique.h | 2 +- .../src/runtime/kernel/arm/fp32/unsqueeze.cc | 7 +- .../src/runtime/kernel/arm/fp32/unsqueeze.h | 2 +- .../src/runtime/kernel/arm/fp32/unstack.cc | 2 +- .../src/runtime/kernel/arm/fp32/unstack.h | 2 +- .../lite/src/runtime/kernel/arm/fp32/where.cc | 3 +- .../lite/src/runtime/kernel/arm/fp32/where.h | 2 +- .../src/runtime/kernel/arm/fp32/zeroslike.cc | 3 +- .../src/runtime/kernel/arm/fp32/zeroslike.h | 2 +- .../kernel/arm/fp32_grad/activation_grad.cc | 2 +- .../kernel/arm/fp32_grad/activation_grad.h | 2 +- .../kernel/arm/fp32_grad/arithmetic_grad.cc | 2 +- .../kernel/arm/fp32_grad/arithmetic_grad.h | 2 +- .../runtime/kernel/arm/fp32_grad/bias_grad.cc | 3 +- .../runtime/kernel/arm/fp32_grad/bias_grad.h | 2 +- .../runtime/kernel/arm/fp32_grad/bn_grad.cc | 3 +- .../runtime/kernel/arm/fp32_grad/bn_grad.h | 2 +- .../arm/fp32_grad/convolution_grad_filter.cc | 2 +- .../arm/fp32_grad/convolution_grad_filter.h | 2 +- .../arm/fp32_grad/convolution_grad_input.cc | 3 +- .../arm/fp32_grad/convolution_grad_input.h | 2 +- .../kernel/arm/fp32_grad/opt_momentum.cc | 3 +- .../kernel/arm/fp32_grad/opt_momentum.h | 2 +- .../kernel/arm/fp32_grad/pooling_grad.cc | 3 +- .../kernel/arm/fp32_grad/pooling_grad.h | 2 +- .../kernel/arm/fp32_grad/power_grad.cc | 3 +- .../runtime/kernel/arm/fp32_grad/power_grad.h | 2 +- ...parse_softmax_cross_entropy_with_logits.cc | 2 +- ...sparse_softmax_cross_entropy_with_logits.h | 3 +- .../src/runtime/kernel/arm/int8/activation.cc | 3 +- .../src/runtime/kernel/arm/int8/add_int8.cc | 2 +- .../src/runtime/kernel/arm/int8/add_int8.h | 2 +- .../runtime/kernel/arm/int8/argminmax_int8.h | 2 +- .../kernel/arm/int8/arithmetic_int8.cc | 3 +- .../runtime/kernel/arm/int8/arithmetic_int8.h | 2 +- .../kernel/arm/int8/arithmetic_self_int8.cc | 2 +- .../kernel/arm/int8/arithmetic_self_int8.h | 2 +- .../kernel/arm/int8/batch_to_space_int8.h | 2 +- .../runtime/kernel/arm/int8/bias_add_int8.cc | 2 +- .../runtime/kernel/arm/int8/bias_add_int8.h | 2 +- .../runtime/kernel/arm/int8/concat_int8.cc | 5 +- .../src/runtime/kernel/arm/int8/concat_int8.h | 2 +- .../kernel/arm/int8/convolution_3x3_int8.h | 2 +- .../arm/int8/convolution_depthwise_int8.cc | 5 +- .../arm/int8/convolution_depthwise_int8.h | 2 +- .../kernel/arm/int8/convolution_int8.cc | 11 +- .../kernel/arm/int8/convolution_int8.h | 2 +- .../src/runtime/kernel/arm/int8/crop_int8.h | 2 +- .../arm/int8/deconvolution_depthwise_int8.cc | 3 +- .../arm/int8/deconvolution_depthwise_int8.h | 2 +- .../kernel/arm/int8/deconvolution_int8.cc | 3 +- .../kernel/arm/int8/deconvolution_int8.h | 2 +- .../kernel/arm/int8/depth_to_space_int8.h | 2 +- .../src/runtime/kernel/arm/int8/div_int8.cc | 2 +- .../src/runtime/kernel/arm/int8/div_int8.h | 2 +- .../kernel/arm/int8/fullconnection_int8.h | 2 +- .../src/runtime/kernel/arm/int8/hswish_int8.h | 2 +- .../src/runtime/kernel/arm/int8/matmul_int8.h | 2 +- .../src/runtime/kernel/arm/int8/mul_int8.cc | 2 +- .../src/runtime/kernel/arm/int8/mul_int8.h | 2 +- .../src/runtime/kernel/arm/int8/pad_int8.h | 2 +- .../runtime/kernel/arm/int8/pooling_int8.h | 2 +- .../src/runtime/kernel/arm/int8/prelu_int8.h | 2 +- .../src/runtime/kernel/arm/int8/reduce_int8.h | 2 +- .../src/runtime/kernel/arm/int8/relux_int8.h | 6 +- .../runtime/kernel/arm/int8/reshape_int8.h | 2 +- .../src/runtime/kernel/arm/int8/resize_int8.h | 2 +- .../runtime/kernel/arm/int8/sigmoid_int8.h | 2 +- .../src/runtime/kernel/arm/int8/slice_int8.h | 2 +- .../runtime/kernel/arm/int8/softmax_int8.h | 2 +- .../src/runtime/kernel/arm/int8/split_int8.h | 2 +- .../runtime/kernel/arm/int8/squeeze_int8.h | 2 +- .../src/runtime/kernel/arm/int8/sub_int8.cc | 2 +- .../src/runtime/kernel/arm/int8/sub_int8.h | 2 +- .../src/runtime/kernel/arm/int8/topk_int8.cc | 2 +- .../src/runtime/kernel/arm/int8/topk_int8.h | 2 +- .../runtime/kernel/arm/int8/unsqueeze_int8.cc | 3 +- .../runtime/kernel/arm/int8/unsqueeze_int8.h | 2 +- .../kernel/opencl/kernel/activation.cc | 5 +- .../kernel/opencl/kernel/arithmetic.cc | 3 +- .../runtime/kernel/opencl/kernel/batchnorm.cc | 3 +- .../kernel/opencl/kernel/caffe_prelu.cc | 3 +- .../runtime/kernel/opencl/kernel/concat.cc | 3 +- .../kernel/opencl/kernel/conv2d_transpose.cc | 2 +- .../kernel/opencl/kernel/convolution.cc | 3 +- .../kernel/opencl/kernel/depthwise_conv2d.cc | 2 +- .../runtime/kernel/opencl/kernel/matmul.cc | 3 +- .../runtime/kernel/opencl/kernel/pooling2d.cc | 3 +- .../runtime/kernel/opencl/kernel/reshape.cc | 3 +- .../runtime/kernel/opencl/kernel/softmax.cc | 3 +- .../runtime/kernel/opencl/kernel/to_format.cc | 3 +- .../runtime/kernel/opencl/kernel/transpose.cc | 3 +- mindspore/lite/src/scheduler.cc | 6 +- mindspore/lite/src/scheduler.h | 4 +- .../fp32/convolution_depthwise_fp32_tests.cc | 1 - .../arm/fp32/deconvolution_fp32_tests.cc | 1 - .../kernel/arm/fp32/lstm_fp32_tests.cc | 1 - .../lite/tools/anf_importer/anf_importer.cc | 8 - .../anf_importer/import_from_meta_graphT.cc | 2 +- .../fusion/constant_folding_fusion.cc | 19 +- 574 files changed, 3771 insertions(+), 7812 deletions(-) delete mode 100644 mindspore/lite/c_ops/CMakeLists.txt delete mode 100644 mindspore/lite/c_ops/addn.cc delete mode 100644 mindspore/lite/c_ops/argmax.cc delete mode 100644 mindspore/lite/c_ops/argmin.cc delete mode 100644 mindspore/lite/c_ops/arithmetic.cc delete mode 100644 mindspore/lite/c_ops/arithmetic_self.cc delete mode 100644 mindspore/lite/c_ops/batch_to_space.cc delete mode 100644 mindspore/lite/c_ops/broadcast_to.cc delete mode 100644 mindspore/lite/c_ops/cast.cc delete mode 100644 mindspore/lite/c_ops/concat.cc delete mode 100644 mindspore/lite/c_ops/crop.cc delete mode 100644 mindspore/lite/c_ops/depth_to_space.cc delete mode 100644 mindspore/lite/c_ops/embedding_lookup.cc delete mode 100644 mindspore/lite/c_ops/expand_dims.cc delete mode 100644 mindspore/lite/c_ops/fill.cc delete mode 100644 mindspore/lite/c_ops/flatten.cc delete mode 100644 mindspore/lite/c_ops/gather.cc delete mode 100644 mindspore/lite/c_ops/gather_nd.cc delete mode 100644 mindspore/lite/c_ops/lstm.cc delete mode 100644 mindspore/lite/c_ops/matmul.cc delete mode 100644 mindspore/lite/c_ops/mean.cc delete mode 100644 mindspore/lite/c_ops/nchw2nhwc.cc delete mode 100644 mindspore/lite/c_ops/nhwc2nchw.cc delete mode 100644 mindspore/lite/c_ops/one_hot.cc delete mode 100644 mindspore/lite/c_ops/pad.cc delete mode 100644 mindspore/lite/c_ops/pooling.cc delete mode 100644 mindspore/lite/c_ops/power.cc delete mode 100644 mindspore/lite/c_ops/prior_box.cc delete mode 100644 mindspore/lite/c_ops/quant_dtype_cast.cc delete mode 100644 mindspore/lite/c_ops/range.cc delete mode 100644 mindspore/lite/c_ops/rank.cc delete mode 100644 mindspore/lite/c_ops/reduce.cc delete mode 100644 mindspore/lite/c_ops/reshape.cc delete mode 100644 mindspore/lite/c_ops/resize.cc delete mode 100644 mindspore/lite/c_ops/reverse_sequence.cc delete mode 100644 mindspore/lite/c_ops/roi_pooling.cc delete mode 100644 mindspore/lite/c_ops/scatter_nd.cc delete mode 100644 mindspore/lite/c_ops/shape.cc delete mode 100644 mindspore/lite/c_ops/slice.cc delete mode 100644 mindspore/lite/c_ops/softmax.cc delete mode 100644 mindspore/lite/c_ops/space_to_batch.cc delete mode 100644 mindspore/lite/c_ops/space_to_depth.cc delete mode 100644 mindspore/lite/c_ops/split.cc delete mode 100644 mindspore/lite/c_ops/squeeze.cc delete mode 100644 mindspore/lite/c_ops/stack.cc delete mode 100644 mindspore/lite/c_ops/strided_slice.cc delete mode 100644 mindspore/lite/c_ops/tile.cc delete mode 100644 mindspore/lite/c_ops/topk.cc delete mode 100644 mindspore/lite/c_ops/transpose.cc delete mode 100644 mindspore/lite/c_ops/unique.cc delete mode 100644 mindspore/lite/c_ops/unsqueeze.cc delete mode 100644 mindspore/lite/c_ops/unstack.cc delete mode 100644 mindspore/lite/c_ops/where.cc delete mode 100644 mindspore/lite/src/ir/primitive_value.cc delete mode 100644 mindspore/lite/src/ir/primitive_value.h rename mindspore/lite/{c_ops => src/ops}/abs.h (72%) rename mindspore/lite/{c_ops => src/ops}/activation.cc (95%) rename mindspore/lite/{c_ops => src/ops}/activation.h (82%) rename mindspore/lite/{c_ops => src/ops}/activation_grad.cc (93%) rename mindspore/lite/{c_ops => src/ops}/activation_grad.h (81%) rename mindspore/lite/{c_ops => src/ops}/add.cc (94%) rename mindspore/lite/{c_ops => src/ops}/add.h (83%) rename mindspore/lite/{c_ops => src/ops}/addn.h (83%) rename mindspore/lite/{c_ops => src/ops}/argmax.h (86%) rename mindspore/lite/{c_ops => src/ops}/argmin.h (86%) rename mindspore/lite/{c_ops => src/ops}/arithmetic.h (86%) rename mindspore/lite/{c_ops => src/ops}/arithmetic_self.h (82%) rename mindspore/lite/{c_ops => src/ops}/batch_norm.cc (93%) rename mindspore/lite/{c_ops => src/ops}/batch_norm.h (82%) rename mindspore/lite/{c_ops => src/ops}/batch_to_space.h (84%) rename mindspore/lite/{c_ops => src/ops}/bias_add.cc (94%) rename mindspore/lite/{c_ops => src/ops}/bias_add.h (82%) rename mindspore/lite/{c_ops => src/ops}/bias_grad.cc (94%) rename mindspore/lite/{c_ops => src/ops}/bias_grad.h (82%) rename mindspore/lite/{c_ops => src/ops}/bn_grad_input.cc (94%) rename mindspore/lite/{c_ops => src/ops}/bn_grad_input.h (82%) rename mindspore/lite/{c_ops => src/ops}/broadcast_to.h (83%) rename mindspore/lite/{c_ops => src/ops}/caffe_p_relu.cc (93%) rename mindspore/lite/{c_ops => src/ops}/caffe_p_relu.h (80%) rename mindspore/lite/{c_ops => src/ops}/cast.h (84%) create mode 100644 mindspore/lite/src/ops/ceil.h rename mindspore/lite/{c_ops => src/ops}/clip.cc (95%) rename mindspore/lite/{c_ops => src/ops}/clip.h (83%) rename mindspore/lite/{c_ops => src/ops}/concat.h (84%) rename mindspore/lite/src/ops/{canstant_of_shape.cc => constant_of_shape.cc} (81%) create mode 100644 mindspore/lite/src/ops/constant_of_shape.h delete mode 100644 mindspore/lite/src/ops/conv.cc rename mindspore/lite/{c_ops => src/ops}/conv2d.cc (90%) rename mindspore/lite/{c_ops => src/ops}/conv2d.h (91%) rename mindspore/lite/{c_ops => src/ops}/conv2d_grad_filter.cc (98%) rename mindspore/lite/{c_ops => src/ops}/conv2d_grad_filter.h (89%) rename mindspore/lite/{c_ops => src/ops}/conv2d_grad_input.cc (98%) rename mindspore/lite/{c_ops => src/ops}/conv2d_grad_input.h (89%) delete mode 100644 mindspore/lite/src/ops/convolution_depthwise.cc rename mindspore/lite/{c_ops => src/ops}/cos.h (72%) rename mindspore/lite/{c_ops => src/ops}/crop.h (84%) rename mindspore/lite/{c_ops => src/ops}/deconv2d.cc (99%) rename mindspore/lite/{c_ops => src/ops}/deconv2d.h (91%) delete mode 100644 mindspore/lite/src/ops/deconvolution.cc delete mode 100644 mindspore/lite/src/ops/deconvolution_depthwise.cc rename mindspore/lite/{c_ops => src/ops}/dedepthwise_conv2d.cc (99%) rename mindspore/lite/{c_ops => src/ops}/dedepthwise_conv2d.h (90%) rename mindspore/lite/{c_ops => src/ops}/depth_to_space.h (84%) rename mindspore/lite/{c_ops => src/ops}/depthwise_conv2d.cc (99%) rename mindspore/lite/{c_ops => src/ops}/depthwise_conv2d.h (90%) rename mindspore/lite/{c_ops => src/ops}/detection_post_process.cc (98%) rename mindspore/lite/{c_ops => src/ops}/detection_post_process.h (88%) rename mindspore/lite/{c_ops => src/ops}/div.cc (94%) rename mindspore/lite/{c_ops => src/ops}/div.h (77%) rename mindspore/lite/{c_ops => src/ops}/dropout.cc (93%) rename mindspore/lite/{c_ops => src/ops}/dropout.h (82%) rename mindspore/lite/{c_ops => src/ops}/eltwise.cc (93%) rename mindspore/lite/{c_ops => src/ops}/eltwise.h (82%) rename mindspore/lite/{c_ops => src/ops}/elu.cc (93%) rename mindspore/lite/{c_ops => src/ops}/elu.h (82%) rename mindspore/lite/{c_ops => src/ops}/embedding_lookup.h (83%) rename mindspore/lite/{c_ops => src/ops}/embedding_lookup_sparse.cc (96%) rename mindspore/lite/{c_ops => src/ops}/embedding_lookup_sparse.h (83%) rename mindspore/lite/{c_ops => src/ops}/equal.h (78%) rename mindspore/lite/{c_ops => src/ops}/exp.h (72%) rename mindspore/lite/{c_ops => src/ops}/expand_dims.h (83%) rename mindspore/lite/{c_ops => src/ops}/fake_quant_with_min_max_vars.cc (94%) rename mindspore/lite/{c_ops => src/ops}/fake_quant_with_min_max_vars.h (82%) rename mindspore/lite/{c_ops => src/ops}/fill.h (84%) rename mindspore/lite/{c_ops => src/ops}/flatten.h (74%) rename mindspore/lite/{c_ops => src/ops}/floor.h (72%) rename mindspore/lite/{c_ops => src/ops}/floor_div.h (73%) rename mindspore/lite/{c_ops => src/ops}/floor_mod.h (73%) rename mindspore/lite/{c_ops => src/ops}/full_connection.cc (85%) rename mindspore/lite/{c_ops => src/ops}/full_connection.h (84%) delete mode 100644 mindspore/lite/src/ops/fullconnection.cc rename mindspore/lite/{c_ops => src/ops}/fused_batchnorm.cc (96%) rename mindspore/lite/{c_ops => src/ops}/fused_batchnorm.h (83%) rename mindspore/lite/{c_ops => src/ops}/gather.h (84%) rename mindspore/lite/{c_ops => src/ops}/gather_nd.h (83%) rename mindspore/lite/{c_ops => src/ops}/greater.h (73%) rename mindspore/lite/{c_ops => src/ops}/greater_equal.h (73%) rename mindspore/lite/{c_ops => src/ops}/l2_norm.cc (95%) rename mindspore/lite/{c_ops => src/ops}/l2_norm.h (83%) rename mindspore/lite/{c_ops => src/ops}/leaky_relu.cc (94%) rename mindspore/lite/{c_ops => src/ops}/leaky_relu.h (82%) rename mindspore/lite/{c_ops => src/ops}/less.h (73%) rename mindspore/lite/{c_ops => src/ops}/less_equal.h (73%) rename mindspore/lite/{c_ops => src/ops}/local_response_normalization.cc (96%) rename mindspore/lite/{c_ops => src/ops}/local_response_normalization.h (83%) rename mindspore/lite/{c_ops => src/ops}/log.h (72%) rename mindspore/lite/{c_ops => src/ops}/logical_and.h (73%) rename mindspore/lite/{c_ops => src/ops}/logical_not.h (72%) rename mindspore/lite/{c_ops => src/ops}/logical_or.h (73%) rename mindspore/lite/{c_ops => src/ops}/lrn.cc (96%) rename mindspore/lite/{c_ops => src/ops}/lrn.h (84%) rename mindspore/lite/{c_ops => src/ops}/lstm.h (84%) rename mindspore/lite/{c_ops => src/ops}/matmul.h (84%) rename mindspore/lite/{c_ops => src/ops}/matrix_diag.cc (96%) rename mindspore/lite/{c_ops => src/ops}/matrix_diag.h (84%) rename mindspore/lite/{c_ops => src/ops}/maximum.h (73%) rename mindspore/lite/{c_ops => src/ops}/mean.h (84%) rename mindspore/lite/{c_ops => src/ops}/minimum.h (73%) rename mindspore/lite/{c_ops => src/ops}/mul.cc (94%) rename mindspore/lite/{c_ops => src/ops}/mul.h (81%) rename mindspore/lite/{c_ops => src/ops}/nchw2nhwc.h (74%) rename mindspore/lite/{c_ops => src/ops}/nhwc2nchw.h (74%) rename mindspore/lite/{c_ops => src/ops}/not_equal.h (73%) rename mindspore/lite/{c_ops => src/ops}/one_hot.h (83%) delete mode 100644 mindspore/lite/src/ops/ops.cc delete mode 100644 mindspore/lite/src/ops/ops.h rename mindspore/lite/{c_ops => src/ops}/pad.h (85%) rename mindspore/lite/{c_ops => src/ops}/permute.cc (94%) rename mindspore/lite/{c_ops => src/ops}/permute.h (82%) rename mindspore/lite/{c_ops => src/ops}/pooling.h (82%) rename mindspore/lite/{c_ops => src/ops}/pooling_grad.cc (98%) rename mindspore/lite/{c_ops => src/ops}/pooling_grad.h (88%) rename mindspore/lite/{c_ops => src/ops}/power.h (84%) rename mindspore/lite/{c_ops => src/ops}/power_grad.cc (95%) rename mindspore/lite/{c_ops => src/ops}/power_grad.h (83%) rename mindspore/lite/{c_ops => src/ops}/prelu.cc (94%) rename mindspore/lite/{c_ops => src/ops}/prelu.h (81%) create mode 100644 mindspore/lite/src/ops/primitive_c.cc rename mindspore/lite/{c_ops => src/ops}/primitive_c.h (53%) rename mindspore/lite/{c_ops => src/ops}/prior_box.h (89%) rename mindspore/lite/{c_ops => src/ops}/quant_dtype_cast.h (83%) rename mindspore/lite/{c_ops => src/ops}/range.h (85%) rename mindspore/lite/{c_ops => src/ops}/rank.h (74%) rename mindspore/lite/{c_ops => src/ops}/reduce.h (85%) rename mindspore/lite/{c_ops => src/ops}/reshape.h (85%) rename mindspore/lite/{c_ops => src/ops}/resize.h (86%) rename mindspore/lite/{c_ops => src/ops}/reverse.cc (94%) rename mindspore/lite/{c_ops => src/ops}/reverse.h (82%) rename mindspore/lite/{c_ops => src/ops}/reverse_sequence.h (84%) rename mindspore/lite/{c_ops => src/ops}/roi_pooling.h (84%) create mode 100644 mindspore/lite/src/ops/round.h rename mindspore/lite/{c_ops => src/ops}/rsqrt.h (72%) rename mindspore/lite/{c_ops => src/ops}/scale.cc (93%) rename mindspore/lite/{c_ops => src/ops}/scale.h (82%) rename mindspore/lite/{c_ops => src/ops}/scatter_nd.h (74%) rename mindspore/lite/{c_ops => src/ops}/shape.h (74%) rename mindspore/lite/{c_ops => src/ops}/sin.h (72%) rename mindspore/lite/{c_ops => src/ops}/slice.h (85%) rename mindspore/lite/{c_ops => src/ops}/softmax.h (83%) rename mindspore/lite/{c_ops => src/ops}/softmax_cross_entropy.cc (93%) rename mindspore/lite/{c_ops => src/ops}/softmax_cross_entropy.h (81%) rename mindspore/lite/{c_ops => src/ops}/space_to_batch.h (87%) rename mindspore/lite/{c_ops => src/ops}/space_to_batch_nd.cc (95%) rename mindspore/lite/{c_ops => src/ops}/space_to_batch_nd.h (83%) rename mindspore/lite/{c_ops => src/ops}/space_to_depth.h (84%) rename mindspore/lite/{c_ops => src/ops}/sparse_to_dense.cc (97%) rename mindspore/lite/{c_ops => src/ops}/sparse_to_dense.h (85%) rename mindspore/lite/{c_ops => src/ops}/split.h (85%) rename mindspore/lite/{c_ops => src/ops}/sqrt.h (72%) rename mindspore/lite/{c_ops => src/ops}/square.h (72%) rename mindspore/lite/{c_ops => src/ops}/squared_difference.h (73%) rename mindspore/lite/{c_ops => src/ops}/squeeze.h (83%) rename mindspore/lite/{c_ops => src/ops}/stack.h (85%) rename mindspore/lite/{c_ops => src/ops}/strided_slice.h (91%) rename mindspore/lite/{c_ops => src/ops}/sub.cc (94%) rename mindspore/lite/{c_ops => src/ops}/sub.h (81%) rename mindspore/lite/{c_ops => src/ops}/tile.h (84%) rename mindspore/lite/{c_ops => src/ops}/topk.h (84%) rename mindspore/lite/{c_ops => src/ops}/transpose.h (84%) rename mindspore/lite/{c_ops => src/ops}/unique.h (83%) rename mindspore/lite/{c_ops => src/ops}/unsqueeze.h (83%) rename mindspore/lite/{c_ops => src/ops}/unstack.h (84%) rename mindspore/lite/{c_ops => src/ops}/upsample.cc (74%) rename mindspore/lite/{c_ops => src/ops}/upsample.h (79%) rename mindspore/lite/{c_ops => src/ops}/where.h (84%) rename mindspore/lite/{c_ops => src/ops}/zeros_like.cc (83%) rename mindspore/lite/{c_ops => src/ops}/zeros_like.h (74%) delete mode 100644 mindspore/lite/src/ops/zeroslike.cc diff --git a/mindspore/lite/c_ops/CMakeLists.txt b/mindspore/lite/c_ops/CMakeLists.txt deleted file mode 100644 index 06ad3db3f3..0000000000 --- a/mindspore/lite/c_ops/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -file(GLOB_RECURSE C_OPS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cc) - -add_library(c_ops_mid OBJECT ${C_OPS_SRC}) \ No newline at end of file diff --git a/mindspore/lite/c_ops/addn.cc b/mindspore/lite/c_ops/addn.cc deleted file mode 100644 index b868ed8aaf..0000000000 --- a/mindspore/lite/c_ops/addn.cc +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/addn.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int AddN::GetN() const { return this->primitive->value.AsAddN()->N; } - -void AddN::SetN(int n) { this->primitive->value.AsAddN()->N = n; } - -#else - -int AddN::GetN() const { return this->primitive->value_as_AddN()->N(); } - -void AddN::SetN(int n) {} -#endif -namespace { -constexpr int kLeastInputNum = 2; -} -int AddN::InferShape(std::vector inputs, std::vector outputs) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs.front(); - MS_ASSERT(input != nullptr); - auto output = outputs.front(); - MS_ASSERT(output != nullptr); - if (inputs.size() < kLeastInputNum) { - MS_LOG(ERROR) << "input size" << inputs.size() << " is error!"; - return 1; - } - for (int i = 1; i < inputs.size(); ++i) { - if (inputs.at(i)->shape() != inputs.at(0)->shape()) { - MS_LOG(ERROR) << "AddN inputs shape is not equal!"; - return 1; - } - if (inputs.at(i)->data_type() != inputs.at(0)->data_type()) { - MS_LOG(ERROR) << "AddN all input data type should be the same!"; - return 1; - } - } - output->SetFormat(input->GetFormat()); - output->set_shape(input->shape()); - output->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/argmax.cc b/mindspore/lite/c_ops/argmax.cc deleted file mode 100644 index fd9e24a871..0000000000 --- a/mindspore/lite/c_ops/argmax.cc +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/argmax.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int ArgMax::GetAxis() const { return this->primitive->value.AsArgMax()->axis; } -bool ArgMax::GetOutMaxValue() const { return this->primitive->value.AsArgMax()->outMaxValue; } -int ArgMax::GetTopK() const { return this->primitive->value.AsArgMax()->topK; } -bool ArgMax::GetKeepDims() const { return this->primitive->value.AsArgMax()->keepDims; } -int ArgMax::GetAxisType() const { return this->primitive->value.AsArgMax()->axisType; } - -void ArgMax::SetAxis(int axis) { this->primitive->value.AsArgMax()->axis = axis; } -void ArgMax::SetOutMaxValue(bool out_max_value) { this->primitive->value.AsArgMax()->outMaxValue = out_max_value; } -void ArgMax::SetTopK(int top_k) { this->primitive->value.AsArgMax()->topK = top_k; } -void ArgMax::SetKeepDims(bool keep_dims) { this->primitive->value.AsArgMax()->keepDims = keep_dims; } -void ArgMax::SetAxisType(int axis_type) { this->primitive->value.AsArgMax()->axisType = axis_type; } - -#else - -int ArgMax::GetAxis() const { return this->primitive->value_as_ArgMax()->axis(); } -bool ArgMax::GetOutMaxValue() const { return this->primitive->value_as_ArgMax()->outMaxValue(); } -int ArgMax::GetTopK() const { return this->primitive->value_as_ArgMax()->topK(); } -bool ArgMax::GetKeepDims() const { return this->primitive->value_as_ArgMax()->keepDims(); } -int ArgMax::GetAxisType() const { return this->primitive->value_as_ArgMax()->axisType(); } - -void ArgMax::SetAxis(int axis) {} -void ArgMax::SetOutMaxValue(bool out_max_value) {} -void ArgMax::SetTopK(int top_k) {} -void ArgMax::SetKeepDims(bool keep_dims) {} -void ArgMax::SetAxisType(int axis_type) {} -#endif -int ArgMax::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "tensor number is error."; - } - - std::vector output_shape(input->shape()); - auto input_shape_size = input->shape().size(); - int axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis(); - if (axis >= input_shape_size || axis < 0) { - MS_LOG(ERROR) << "Invalid axis " << GetAxis() << ", input shape size: " << input_shape_size; - return 1; - } - if (GetTopK() == 1 && !GetKeepDims()) { - output_shape.erase(output_shape.begin() + axis); - } else { - output_shape[axis] = GetTopK(); - } - - output->SetFormat(input->GetFormat()); - output->set_shape(output_shape); - output->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/argmin.cc b/mindspore/lite/c_ops/argmin.cc deleted file mode 100644 index 6a2274af4f..0000000000 --- a/mindspore/lite/c_ops/argmin.cc +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/argmin.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int ArgMin::GetAxis() const { return this->primitive->value.AsArgMin()->axis; } -bool ArgMin::GetOutMaxValue() const { return this->primitive->value.AsArgMin()->outMaxValue; } -int ArgMin::GetTopK() const { return this->primitive->value.AsArgMin()->topK; } -bool ArgMin::GetKeepDims() const { return this->primitive->value.AsArgMin()->keepDims; } -int ArgMin::GetAxisType() const { return this->primitive->value.AsArgMin()->axisType; } - -void ArgMin::SetAxis(int axis) { this->primitive->value.AsArgMin()->axis = axis; } -void ArgMin::SetOutMaxValue(bool out_max_value) { this->primitive->value.AsArgMin()->outMaxValue = out_max_value; } -void ArgMin::SetTopK(int top_k) { this->primitive->value.AsArgMin()->topK = top_k; } -void ArgMin::SetKeepDims(bool keep_dims) { this->primitive->value.AsArgMin()->keepDims = keep_dims; } -void ArgMin::SetAxisType(int axis_type) { this->primitive->value.AsArgMin()->axisType = axis_type; } - -#else - -int ArgMin::GetAxis() const { return this->primitive->value_as_ArgMin()->axis(); } -bool ArgMin::GetOutMaxValue() const { return this->primitive->value_as_ArgMin()->outMaxValue(); } -int ArgMin::GetTopK() const { return this->primitive->value_as_ArgMin()->topK(); } -bool ArgMin::GetKeepDims() const { return this->primitive->value_as_ArgMin()->keepDims(); } -int ArgMin::GetAxisType() const { return this->primitive->value_as_ArgMin()->axisType(); } - -void ArgMin::SetAxis(int axis) {} -void ArgMin::SetOutMaxValue(bool out_max_value) {} -void ArgMin::SetTopK(int top_k) {} -void ArgMin::SetKeepDims(bool keep_dims) {} -void ArgMin::SetAxisType(int axis_type) {} -#endif -int ArgMin::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "tensor number is error."; - } - auto input_shape_size = input->shape().size(); - int axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis(); - if (axis >= input_shape_size || axis < 0) { - MS_LOG(ERROR) << "Invalid axis " << GetAxis() << ", input shape size: " << input_shape_size; - return 1; - } - std::vector output_shape(input->shape()); - if (GetTopK() == 1 && !GetKeepDims()) { - output_shape.erase(output_shape.begin() + axis); - } else { - output_shape[axis] = GetTopK(); - } - - output->SetFormat(input->GetFormat()); - output->set_shape(output_shape); - output->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/arithmetic.cc b/mindspore/lite/c_ops/arithmetic.cc deleted file mode 100644 index 4baefd9777..0000000000 --- a/mindspore/lite/c_ops/arithmetic.cc +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the License); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/arithmetic.h" - -namespace mindspore { -int Arithmetic::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - if (inputs_.size() != kDoubleNum) { - MS_LOG(ERROR) << "The number of input must be " << kDoubleNum; - return 1; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "The number of output must be " << kSingleNum; - return 1; - } - auto input0 = inputs_[0]; - MS_ASSERT(input0 != nullptr); - auto input1 = inputs_[1]; - MS_ASSERT(input1 != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - auto input_shape0 = input0->shape(); - auto input_shape1 = input1->shape(); - auto format = input0->GetFormat(); - in_shape0_.resize(5); - in_shape1_.resize(5); - out_shape_.resize(5); - - ndim_ = input_shape0.size(); - if (input_shape0.size() < input_shape1.size()) { - ndim_ = input_shape1.size(); - auto fill_dim_num = input_shape1.size() - input_shape0.size(); - int j = 0; - for (int i = 0; i < input_shape1.size(); i++) { - if (i < fill_dim_num) { - in_shape0_[i] = 1; - } else { - in_shape0_[i] = input_shape0[j++]; - } - in_shape1_[i] = input_shape1[i]; - } - format = input0->GetFormat(); - } else if (input_shape0.size() > input_shape1.size()) { - ndim_ = input_shape0.size(); - auto fill_dim_num = input_shape0.size() - input_shape1.size(); - int j = 0; - for (int i = 0; i < input_shape0.size(); i++) { - if (i < fill_dim_num) { - in_shape1_[i] = 1; - } else { - in_shape1_[i] = input_shape1[j++]; - } - in_shape0_[i] = input_shape0[i]; - } - } else { - for (int i = 0; i < input_shape0.size(); i++) { - in_shape1_[i] = input_shape1[i]; - in_shape0_[i] = input_shape0[i]; - } - } - - std::vector output_shape; - for (size_t i = 0; i < ndim_; i++) { - if (in_shape0_[i] != in_shape1_[i]) { - if (in_shape0_[i] == 1) { - out_shape_[i] = in_shape1_[i]; - } else if (in_shape1_[i] == 1) { - out_shape_[i] = in_shape0_[i]; - } else { - MS_LOG(ERROR) << "shapes of input tensors can not be broadCasted"; - return -1; - } - broadcasting_ = true; - } else { - out_shape_[i] = in_shape0_[i]; - } - output_shape.push_back(out_shape_[i]); - } - output->SetFormat(format); - output->set_shape(output_shape); - output->set_data_type(input0->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/arithmetic_self.cc b/mindspore/lite/c_ops/arithmetic_self.cc deleted file mode 100644 index 4032b69d6f..0000000000 --- a/mindspore/lite/c_ops/arithmetic_self.cc +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the License); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/arithmetic_self.h" - -namespace mindspore { -int ArithmeticSelf::InferShape(std::vector inputs_, - std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - output->SetFormat(input->GetFormat()); - output->set_shape(input->shape()); - output->set_data_type(input->data_type()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/batch_to_space.cc b/mindspore/lite/c_ops/batch_to_space.cc deleted file mode 100644 index 5019b07286..0000000000 --- a/mindspore/lite/c_ops/batch_to_space.cc +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/batch_to_space.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector BatchToSpace::GetBlockShape() const { return this->primitive->value.AsBatchToSpace()->blockShape; } -std::vector BatchToSpace::GetCrops() const { return this->primitive->value.AsBatchToSpace()->crops; } - -void BatchToSpace::SetBlockShape(const std::vector &block_shape) { - this->primitive->value.AsBatchToSpace()->blockShape = block_shape; -} -void BatchToSpace::SetCrops(const std::vector &crops) { this->primitive->value.AsBatchToSpace()->crops = crops; } - -#else - -std::vector BatchToSpace::GetBlockShape() const { - auto fb_vector = this->primitive->value_as_BatchToSpace()->blockShape(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -std::vector BatchToSpace::GetCrops() const { - auto fb_vector = this->primitive->value_as_BatchToSpace()->crops(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void BatchToSpace::SetBlockShape(const std::vector &block_shape) {} -void BatchToSpace::SetCrops(const std::vector &crops) {} -#endif -namespace { -constexpr int kBatchToSpaceOutputNum = 1; -constexpr int kBatchToSpaceInputNum = 1; -constexpr int kBlockShapeSize = 2; -constexpr int kCropsSize = 4; -} // namespace - -int BatchToSpace::InferShape(std::vector inputs, std::vector outputs) { - MS_ASSERT(this->primitive != nullptr); - if (outputs.size() != kBatchToSpaceOutputNum || inputs.size() != kBatchToSpaceInputNum) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return 1; - } - - auto input = inputs.at(0); - if (input->GetFormat() != schema::Format_NHWC) { - MS_LOG(ERROR) << "batch_to_space only support NHWC now!"; - return 1; - } - auto input_shape = input->shape(); - if (input_shape.size() != kDimension_4d) { - MS_LOG(ERROR) << "input shape dimension size should == " << kDimension_4d; - return 1; - } - - auto block_shape = GetBlockShape(); - if (block_shape.size() != kBlockShapeSize) { - MS_LOG(ERROR) << "Block shape size should be " << kBlockShapeSize; - return 1; - } - auto crops = GetCrops(); - if (crops.size() != kCropsSize) { - MS_LOG(ERROR) << "Crops size should be " << kCropsSize; - return 1; - } - size_t mul_block_shape = 1; - - for (size_t i = 0; i < kBlockShapeSize; ++i) { - if (block_shape[i] <= 0) { - MS_LOG(ERROR) << "Input block_shape should > 0!"; - return 1; - } - if (input_shape[NHWC_N] % block_shape[i]) { - MS_LOG(ERROR) << "Dimension n " << input_shape[NHWC_N] << " can not divide block_shape[" << i << "] " - << block_shape[i]; - return 1; - } - mul_block_shape *= block_shape[i]; - } - - if (input_shape[NHWC_N] < mul_block_shape) { - MS_LOG(ERROR) << "Dimension n " << input_shape[NHWC_N] << " < product of block shape!"; - return 1; - } - for (size_t i = 0; i < kCropsSize; ++i) { - if (crops[i] < 0) { - MS_LOG(ERROR) << "Input crops should >= 0"; - return 1; - } - } - std::vector output_shape(input_shape.size()); - output_shape[NHWC_N] = input_shape[NHWC_N] / mul_block_shape; - output_shape[NHWC_H] = input_shape[NHWC_H] * block_shape[0] - crops[0] - crops[1]; - output_shape[NHWC_W] = input_shape[NHWC_W] * block_shape[1] - crops[2] - crops[3]; - output_shape[NHWC_C] = input_shape[NHWC_C]; - - outputs[0]->SetFormat(input->GetFormat()); - outputs[0]->set_shape(output_shape); - outputs[0]->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/broadcast_to.cc b/mindspore/lite/c_ops/broadcast_to.cc deleted file mode 100644 index 071f3e1fdd..0000000000 --- a/mindspore/lite/c_ops/broadcast_to.cc +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/broadcast_to.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector BroadcastTo::GetDstShape() const { return this->primitive->value.AsBroadcastTo()->dst_shape; } - -void BroadcastTo::SetDstShape(const std::vector &dst_shape) { - this->primitive->value.AsBroadcastTo()->dst_shape = dst_shape; -} - -#else - -std::vector BroadcastTo::GetDstShape() const { - auto fb_vector = this->primitive->value_as_BroadcastTo()->dst_shape(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void BroadcastTo::SetDstShape(const std::vector &dst_shape) {} -#endif -namespace { -constexpr int kBroadcastToInputNum = 1; -constexpr int kBroadcastToOutputNum = 1; -} // namespace - -int BroadcastTo::InferShape(std::vector inputs, std::vector outputs) { - MS_ASSERT(this->primitive != nullptr); - if (inputs.size() != kBroadcastToInputNum || outputs.size() != kBroadcastToOutputNum) { - MS_LOG(ERROR) << "input size:" << inputs.size() << ", output size:" << outputs.size(); - return 1; - } - auto input = inputs.at(0); - std::vector dst_shape(this->primitive->value_as_BroadcastTo()->dst_shape()->begin(), - this->primitive->value_as_BroadcastTo()->dst_shape()->end()); - auto input_shape = input->shape(); - std::vector shape(dst_shape.size()); - int input_shape_index = input_shape.size() - 1; - if (input_shape.size() > dst_shape.size()) { - MS_LOG(ERROR) << "input shape size " << input_shape.size() << " should <= broadcast to shape size " - << dst_shape.size() << "!"; - return 1; - } - - for (int i = dst_shape.size() - 1; i >= 0; --i) { - if (dst_shape[i] < 0) { - MS_LOG(ERROR) << "shape[" << i << "] = " << dst_shape[i] << " ] should be > 0!"; - return 1; - } - if (input_shape_index >= 0) { - auto dim = input_shape[input_shape_index]; - if (dim != dst_shape[i] && dim != 1) { - MS_LOG(ERROR) << "Invalid broadcast shape!"; - return 1; - } - } - shape[i] = dst_shape[i]; - --input_shape_index; - } - outputs[0]->SetFormat(input->GetFormat()); - outputs[0]->set_shape(shape); - outputs[0]->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/cast.cc b/mindspore/lite/c_ops/cast.cc deleted file mode 100644 index feebe75a53..0000000000 --- a/mindspore/lite/c_ops/cast.cc +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/cast.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int Cast::GetSrcT() const { return this->primitive->value.AsCast()->srcT; } -int Cast::GetDstT() const { return this->primitive->value.AsCast()->dstT; } - -void Cast::SetSrcT(int src_t) { this->primitive->value.AsCast()->srcT = src_t; } -void Cast::SetDstT(int dst_t) { this->primitive->value.AsCast()->dstT = dst_t; } - -#else - -int Cast::GetSrcT() const { return this->primitive->value_as_Cast()->srcT(); } -int Cast::GetDstT() const { return this->primitive->value_as_Cast()->dstT(); } - -void Cast::SetSrcT(int src_t) {} -void Cast::SetDstT(int dst_t) {} -#endif -int Cast::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "tensor number is error."; - return 1; - } - - MS_ASSERT(cast_prim != nullptr); - if (input->data_type() != GetSrcT()) { - MS_LOG(ERROR) << "input dataType is error"; - return 1; - } - if (kSupportDataType.find(input->data_type()) == kSupportDataType.end()) { - MS_LOG(ERROR) << "Unsupported input data type " << input->data_type(); - return 1; - } - if (GetDstT() != kNumberTypeFloat && GetDstT() != kNumberTypeFloat32) { - MS_LOG(ERROR) << "Invalid output datatype " << GetDstT(); - return 1; - } - output->SetFormat(input->GetFormat()); - output->set_shape(input->shape()); - output->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/concat.cc b/mindspore/lite/c_ops/concat.cc deleted file mode 100644 index 2de77c2f72..0000000000 --- a/mindspore/lite/c_ops/concat.cc +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/concat.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int Concat::GetAxis() const { return this->primitive->value.AsConcat()->axis; } -int Concat::GetN() const { return this->primitive->value.AsConcat()->n; } - -void Concat::SetAxis(int axis) { this->primitive->value.AsConcat()->axis = axis; } -void Concat::SetN(int n) { this->primitive->value.AsConcat()->n = n; } - -#else - -int Concat::GetAxis() const { return this->primitive->value_as_Concat()->axis(); } -int Concat::GetN() const { return this->primitive->value_as_Concat()->n(); } - -void Concat::SetAxis(int axis) {} -void Concat::SetN(int n) {} -#endif -namespace { -constexpr int kConcatOutputNum = 1; -} -int Concat::InferShape(std::vector inputs_, std::vector outputs_) { - if (this->primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr!"; - return 1; - } - auto input0 = inputs_.front(); - auto output = outputs_.front(); - if (outputs_.size() != kConcatOutputNum) { - MS_LOG(ERROR) << "output size is error"; - return 1; - } - MS_ASSERT(concat_prim != nullptr); - auto input0_shape = inputs_.at(0)->shape(); - int axis = GetAxis() < 0 ? GetAxis() + input0_shape.size() : GetAxis(); - if (axis < 0 || axis >= input0_shape.size()) { - MS_LOG(ERROR) << "Invalid axis: " << axis; - return 1; - } - - auto input0_shape_without_axis = input0_shape; - input0_shape_without_axis.erase(input0_shape_without_axis.begin() + axis); - auto input0_data_type = inputs_.at(0)->data_type(); - schema::Format input0_format = inputs_[0]->GetFormat(); - int output_axis_dim = input0_shape.at(axis); - for (size_t i = 1; i < inputs_.size(); ++i) { - if (inputs_.at(i)->data_type() != input0_data_type) { - MS_LOG(ERROR) << "All inputs should have the same data type!"; - return 1; - } - - if (inputs_.at(i)->GetFormat() != input0_format) { - MS_LOG(ERROR) << "All input format should be the same!"; - return 1; - } - auto shape_tmp = inputs_.at(i)->shape(); - if (shape_tmp.size() != input0_shape.size()) { - MS_LOG(ERROR) << "All inputs should have the same dim num!"; - return 1; - } - auto axis_tmp = shape_tmp[axis]; - shape_tmp.erase(shape_tmp.begin() + axis); - if (input0_shape_without_axis != shape_tmp) { - MS_LOG(ERROR) << "Inputs should have the same dim except axis!"; - return 1; - } - output_axis_dim += axis_tmp; - } - auto output_shape = input0_shape; - output_shape[axis] = output_axis_dim; - outputs_[0]->set_shape(output_shape); - output->set_data_type(input0->data_type()); - output->SetFormat(input0->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/crop.cc b/mindspore/lite/c_ops/crop.cc deleted file mode 100644 index 1332190ec3..0000000000 --- a/mindspore/lite/c_ops/crop.cc +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/crop.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -long Crop::GetAxis() const { return this->primitive->value.AsCrop()->axis; } -std::vector Crop::GetOffsets() const { return this->primitive->value.AsCrop()->offsets; } - -void Crop::SetAxis(long axis) { this->primitive->value.AsCrop()->axis = axis; } -void Crop::SetOffsets(const std::vector &offsets) { this->primitive->value.AsCrop()->offsets = offsets; } - -#else - -long Crop::GetAxis() const { return this->primitive->value_as_Crop()->axis(); } -std::vector Crop::GetOffsets() const { - auto fb_vector = this->primitive->value_as_Crop()->offsets(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void Crop::SetAxis(long axis) {} -void Crop::SetOffsets(const std::vector &offsets) {} -#endif -namespace { -constexpr int kCropOutputNum = 1; -constexpr int kCropInputNum = 2; -} // namespace - -int Crop::InferShape(std::vector inputs, std::vector outputs) { - MS_ASSERT(this->primitive != nullptr); - if (outputs.size() != kCropOutputNum || inputs.size() != kCropInputNum) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return 1; - } - outputs[0]->set_shape(inputs[1]->shape()); - outputs[0]->SetFormat(inputs[0]->GetFormat()); - outputs[0]->set_data_type(inputs[0]->data_type()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/depth_to_space.cc b/mindspore/lite/c_ops/depth_to_space.cc deleted file mode 100644 index 52a7794ad0..0000000000 --- a/mindspore/lite/c_ops/depth_to_space.cc +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/depth_to_space.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int DepthToSpace::GetBlockSize() const { return this->primitive->value.AsDepthToSpace()->blockSize; } -int DepthToSpace::GetFormat() const { return this->primitive->value.AsDepthToSpace()->format; } - -void DepthToSpace::SetBlockSize(int block_size) { this->primitive->value.AsDepthToSpace()->blockSize = block_size; } -void DepthToSpace::SetFormat(int format) { this->primitive->value.AsDepthToSpace()->format = format; } - -#else - -int DepthToSpace::GetBlockSize() const { return this->primitive->value_as_DepthToSpace()->blockSize(); } -int DepthToSpace::GetFormat() const { return this->primitive->value_as_DepthToSpace()->format(); } - -void DepthToSpace::SetBlockSize(int block_size) {} -void DepthToSpace::SetFormat(int format) {} -#endif -namespace { -constexpr int kDepthToSpaceOutputNum = 1; -constexpr int kDepthToSpaceInputNum = 1; -} // namespace - -int DepthToSpace::InferShape(std::vector inputs, std::vector outputs) { - MS_ASSERT(this->primitive != nullptr); - if (outputs.size() != kDepthToSpaceOutputNum || inputs.size() != kDepthToSpaceInputNum) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return 1; - } - - auto input = inputs.at(0); - if (input->GetFormat() != schema::Format_NHWC) { - MS_LOG(ERROR) << "depth_to_space only support NHWC now!"; - return 1; - } - auto input_shape = input->shape(); - if (input_shape.size() != kDimension_4d) { - MS_LOG(ERROR) << "input shape dimension size should == " << kDimension_4d; - return 1; - } - - int32_t block_size = GetBlockSize(); - if (input_shape[NHWC_C] % (block_size * block_size) != 0 || input_shape[NHWC_C] == 0) { - MS_LOG(ERROR) << "input dimension c size " << input_shape[NHWC_C] << " should be mulitple of block_size(" - << block_size << ") * block_size)!"; - return 1; - } - std::vector output_shape(input_shape.size()); - output_shape[NHWC_N] = input_shape[NHWC_N]; - output_shape[NHWC_H] = input_shape[NHWC_H] * block_size; - output_shape[NHWC_W] = input_shape[NHWC_W] * block_size; - output_shape[NHWC_C] = input_shape[NHWC_C] / (block_size * block_size); - outputs[0]->set_shape(output_shape); - outputs[0]->set_data_type(input->data_type()); - outputs[0]->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/embedding_lookup.cc b/mindspore/lite/c_ops/embedding_lookup.cc deleted file mode 100644 index e609560d18..0000000000 --- a/mindspore/lite/c_ops/embedding_lookup.cc +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/embedding_lookup.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -float EmbeddingLookup::GetMaxNorm() const { return this->primitive->value.AsEmbeddingLookup()->maxNorm; } - -void EmbeddingLookup::SetMaxNorm(float max_norm) { this->primitive->value.AsEmbeddingLookup()->maxNorm = max_norm; } - -#else - -float EmbeddingLookup::GetMaxNorm() const { return this->primitive->value_as_EmbeddingLookup()->maxNorm(); } - -void EmbeddingLookup::SetMaxNorm(float max_norm) {} -#endif -int EmbeddingLookup::InferShape(std::vector inputs_, - std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - if (inputs_.size() < kDoubleNum) { - MS_LOG(ERROR) << "Embedding Lookup should have at least two inputs"; - return 1; - } - - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "Embedding Lookup should have one outputs"; - return 1; - } - - auto params_ = inputs_.front(); - MS_ASSERT(params_ != nullptr); - auto ids = inputs_.back(); - MS_ASSERT(ids != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - auto embedding_shape = params_->shape(); - embedding_shape.erase(embedding_shape.begin()); - - std::vector output_shape(ids->shape()); - for (size_t i = 0; i < embedding_shape.size(); ++i) { - output_shape.push_back(embedding_shape.at(i)); - } - - for (int i = 1; i < inputs_.size() - 1; ++i) { - auto embedding_shape_t = inputs_.at(i)->shape(); - embedding_shape_t.erase(embedding_shape_t.begin()); - if (embedding_shape_t != embedding_shape) { - MS_LOG(ERROR) << "The embedded layers should have the same shape"; - return 1; - } - } - - output->set_shape(output_shape); - output->set_data_type(params_->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/expand_dims.cc b/mindspore/lite/c_ops/expand_dims.cc deleted file mode 100644 index 47d8a97bfe..0000000000 --- a/mindspore/lite/c_ops/expand_dims.cc +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/expand_dims.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int ExpandDims::GetDim() const { return this->primitive->value.AsExpandDims()->dim; } - -void ExpandDims::SetDim(int dim) { this->primitive->value.AsExpandDims()->dim = dim; } - -#else - -int ExpandDims::GetDim() const { return this->primitive->value_as_ExpandDims()->dim(); } - -void ExpandDims::SetDim(int dim) {} -#endif -int ExpandDims::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (inputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "input size is invalid"; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "output size is invalid"; - } - - int dim = GetDim(); - if (dim < 0) { - dim += input->shape().size() + 1; - } - if (dim > input->shape().size()) { - MS_LOG(ERROR) << "attribute dim out of range"; - return 1; - } - auto out_shape = input->shape(); - out_shape.insert(out_shape.begin() + dim, 1, 1); - output->set_shape(out_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/fill.cc b/mindspore/lite/c_ops/fill.cc deleted file mode 100644 index 7183bf622e..0000000000 --- a/mindspore/lite/c_ops/fill.cc +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/fill.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector Fill::GetDims() const { return this->primitive->value.AsFill()->dims; } - -void Fill::SetDims(const std::vector &dims) { this->primitive->value.AsFill()->dims = dims; } - -#else - -std::vector Fill::GetDims() const { - auto fb_vector = this->primitive->value_as_Fill()->dims(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void Fill::SetDims(const std::vector &dims) {} -#endif -int Fill::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - auto output = outputs_.front(); - if (input == nullptr || output == nullptr) { - MS_LOG(ERROR) << "Fill input or output is null!"; - return 1; - } - - if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size(); - return 1; - } - - std::vector output_shape; - (void)output_shape.insert(output_shape.begin(), GetDims().begin(), GetDims().end()); - output->set_shape(output_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/flatten.cc b/mindspore/lite/c_ops/flatten.cc deleted file mode 100644 index ce24d2ef55..0000000000 --- a/mindspore/lite/c_ops/flatten.cc +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the License); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/flatten.h" - -namespace mindspore { -int Flatten::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - auto output = outputs_.front(); - if (input == nullptr || output == nullptr) { - MS_LOG(ERROR) << "Flatten input or output is null!"; - return 1; - } - - if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size(); - return 1; - } - - auto input_shape = input->shape(); - std::vector output_shape(2); - output_shape[0] = input_shape[0]; - output_shape[1] = 1; - for (int i = 1; i < input_shape.size(); i++) { - output_shape[1] *= input_shape[i]; - } - output->set_shape(output_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/gather.cc b/mindspore/lite/c_ops/gather.cc deleted file mode 100644 index 2e23c9646b..0000000000 --- a/mindspore/lite/c_ops/gather.cc +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/gather.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int Gather::GetAxis() const { return this->primitive->value.AsGather()->axis; } -int Gather::GetBatchDims() const { return this->primitive->value.AsGather()->batchDims; } - -void Gather::SetAxis(int axis) { this->primitive->value.AsGather()->axis = axis; } -void Gather::SetBatchDims(int batch_dims) { this->primitive->value.AsGather()->batchDims = batch_dims; } - -#else - -int Gather::GetAxis() const { return this->primitive->value_as_Gather()->axis(); } -int Gather::GetBatchDims() const { return this->primitive->value_as_Gather()->batchDims(); } - -void Gather::SetAxis(int axis) {} -void Gather::SetBatchDims(int batch_dims) {} -#endif -int Gather::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - if (inputs_.size() != kDoubleNum) { - MS_LOG(ERROR) << "Gather should have two inputs"; - return 1; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "Gather should have one outputs"; - return 1; - } - - auto input = inputs_.at(0); - MS_ASSERT(input != nullptr); - auto indices = inputs_.at(1); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(input != nullptr); - - int axis = GetAxis(); - int batch_dims = GetBatchDims(); - if (axis < 0) { - axis += input->shape().size(); - } - auto indices_shape = indices->shape(); - int indices_rank = indices_shape.size(); - if (indices_rank < batch_dims + 1) { - MS_LOG(ERROR) << "input[1]'s rank is less than batchDim + 1"; - return 1; - } - if (batch_dims != 0) { - MS_LOG(ERROR) << "batchDims " << batch_dims << " != 0, which is not support"; - return 1; - } - auto in_shape = input->shape(); - int in_rank = in_shape.size(); - if (in_rank < axis + 1) { - MS_LOG(ERROR) << "input[0]'s rank is less than axis + 1"; - return 1; - } - - std::vector out_shape{in_shape}; - out_shape.erase(out_shape.begin() + axis); - for (size_t i = 0; i < indices_rank; i++) { - out_shape.insert(out_shape.begin() + axis, indices_shape[i]); - } - - output->set_shape(out_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/gather_nd.cc b/mindspore/lite/c_ops/gather_nd.cc deleted file mode 100644 index 476b88170d..0000000000 --- a/mindspore/lite/c_ops/gather_nd.cc +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/gather_nd.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int GatherNd::GetBatchDims() const { return this->primitive->value.AsGatherNd()->batchDims; } - -void GatherNd::SetBatchDims(int batch_dims) { this->primitive->value.AsGatherNd()->batchDims = batch_dims; } - -#else - -int GatherNd::GetBatchDims() const { return this->primitive->value_as_GatherNd()->batchDims(); } - -void GatherNd::SetBatchDims(int batch_dims) {} -#endif -int GatherNd::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - if (inputs_.size() != kDoubleNum) { - MS_LOG(ERROR) << "GatherNd should have two inputs"; - return 1; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "GatherNd should have one outputs"; - return 1; - } - - auto input = inputs_.at(0); - MS_ASSERT(input != nullptr); - auto indices = inputs_.at(1); - MS_ASSERT(indices != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - auto in_shape = input->shape(); - int in_rank = in_shape.size(); - auto indices_shape = indices->shape(); - int indices_rank = indices_shape.size(); - - if (indices_shape[indices_rank - 1] > in_rank) { - MS_LOG(ERROR) << "Input of indices data is error!"; - return 1; - } - - std::vector out_shape; - int i = 0; - for (i = 0; i < indices_rank - 1; ++i) { - out_shape.emplace_back(indices_shape[i]); - } - for (i = indices_shape[indices_rank - 1]; i < in_rank; ++i) { - out_shape.emplace_back(in_shape[i]); - } - - output->set_shape(out_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/lstm.cc b/mindspore/lite/c_ops/lstm.cc deleted file mode 100644 index e9e08a1b88..0000000000 --- a/mindspore/lite/c_ops/lstm.cc +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/lstm.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -bool Lstm::GetBidirection() const { return this->primitive->value.AsLstm()->bidirection; } - -void Lstm::SetBidirection(bool bidirection) { this->primitive->value.AsLstm()->bidirection = bidirection; } - -#else - -bool Lstm::GetBidirection() const { return this->primitive->value_as_Lstm()->bidirection(); } - -void Lstm::SetBidirection(bool bidirection) {} -#endif - -const int kLstmInputNum = 6; -const int kLstmOutputNum = 3; -int Lstm::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - if (inputs_.size() != kLstmInputNum || outputs_.size() != kLstmOutputNum) { - MS_LOG(ERROR) << "OpLstm inputs or outputs size error."; - return 1; - } - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto weight_i = inputs_.front(); - MS_ASSERT(input0 != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - std::vector in_shape = input->shape(); - std::vector w_shape = weight_i->shape(); // layer, hidden_size * 4, input_size - if (in_shape.size() != 3 || w_shape.size() != 3) { - MS_LOG(ERROR) << "OpLstm input dims should be 3."; - return 1; - } - - int hidden_size = w_shape[1] / 4; - - // set output - std::vector out_shape(in_shape); - out_shape[2] = hidden_size; - if (GetBidirection()) { - out_shape.insert(out_shape.begin() + 1, 2); - } - output->set_shape(out_shape); - - // set hidden state, cell state - std::vector state_shape(in_shape); - state_shape[0] = GetBidirection() ? 2 : 1; - state_shape[2] = hidden_size; - outputs_[1]->set_shape(state_shape); - outputs_[2]->set_shape(state_shape); - - for (int i = 0; i < kLstmOutputNum; i++) { - outputs_[i]->set_data_type(input->data_type()); - outputs_[i]->SetFormat(input->GetFormat()); - } - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/matmul.cc b/mindspore/lite/c_ops/matmul.cc deleted file mode 100644 index 3265315f40..0000000000 --- a/mindspore/lite/c_ops/matmul.cc +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/matmul.h" -#include - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -bool MatMul::GetTransposeA() const { return this->primitive->value.AsMatMul()->transposeA; } -bool MatMul::GetTransposeB() const { return this->primitive->value.AsMatMul()->transposeB; } - -void MatMul::SetTransposeA(bool transpose_a) { this->primitive->value.AsMatMul()->transposeA = transpose_a; } -void MatMul::SetTransposeB(bool transpose_b) { this->primitive->value.AsMatMul()->transposeB = transpose_b; } - -#else - -bool MatMul::GetTransposeA() const { return this->primitive->value_as_MatMul()->transposeA(); } -bool MatMul::GetTransposeB() const { return this->primitive->value_as_MatMul()->transposeB(); } - -void MatMul::SetTransposeA(bool transpose_a) {} -void MatMul::SetTransposeB(bool transpose_b) {} -#endif -int MatMul::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - if (inputs_.size() != kDoubleNum) { - MS_LOG(ERROR) << "OpMatMul inputs size: " << inputs_.size(); - return 1; - } - auto input0 = inputs_.front(); - MS_ASSERT(input0 != nullptr); - auto input1 = inputs_.at(1); - MS_ASSERT(input1 != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - std::vector a_shape = input0->shape(); - std::vector b_shape = input1->shape(); - if (a_shape.size() < 2 || b_shape.size() < 2) { - MS_LOG(ERROR) << "inputs shape is invalid"; - return 1; - } - - for (int i = 0; i < a_shape.size() - 2; ++i) { - if (a_shape[i] != b_shape[i]) { - MS_LOG(ERROR) << "Op MatMul's dimensions must be equal"; - return 1; - } - } - - if (GetTransposeA()) { - std::swap(a_shape[a_shape.size() - 1], a_shape[a_shape.size() - 2]); - } - if (GetTransposeB()) { - std::swap(b_shape[b_shape.size() - 1], b_shape[b_shape.size() - 2]); - } - std::vector c_shape(a_shape); - c_shape[c_shape.size() - 1] = b_shape[b_shape.size() - 1]; - output->set_shape(c_shape); - output->set_data_type(input0->data_type()); - output->SetFormat(input0->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/mean.cc b/mindspore/lite/c_ops/mean.cc deleted file mode 100644 index ada5739a56..0000000000 --- a/mindspore/lite/c_ops/mean.cc +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/mean.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector Mean::GetAxis() const { return this->primitive->value.AsMean()->axis; } -bool Mean::GetKeepDims() const { return this->primitive->value.AsMean()->keepDims; } - -void Mean::SetAxis(const std::vector &axis) { this->primitive->value.AsMean()->axis = axis; } -void Mean::SetKeepDims(bool keep_dims) { this->primitive->value.AsMean()->keepDims = keep_dims; } - -#else - -std::vector Mean::GetAxis() const { - auto fb_vector = this->primitive->value_as_Mean()->axis(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -bool Mean::GetKeepDims() const { return this->primitive->value_as_Mean()->keepDims(); } - -void Mean::SetAxis(const std::vector &axis) {} -void Mean::SetKeepDims(bool keep_dims) {} -#endif -namespace { -constexpr size_t kInputSize = 1; -constexpr size_t kOutputSize = 1; -} // namespace -int Mean::InferShape(std::vector inputs_, std::vector outputs_) { - if (inputs_.size() != kInputSize || outputs_.size() != kOutputSize) { - return 1; - } - auto input = inputs_.front(); - auto output = outputs_.front(); - if (input == nullptr || output == nullptr) { - return 1; - } - if (this->primitive == nullptr) { - return 1; - } - - bool keep_dims = static_cast(GetKeepDims()); - std::vector in_shape = input->shape(); - std::vector out_shape; - const auto &axes = GetAxis(); - auto num_axes = axes.size(); - // reduce on all axes - if (num_axes == 0) { - if (keep_dims) { - for (auto i = 0; i < in_shape.size(); i++) { - out_shape.push_back(1); - } - } - output->set_shape(out_shape); - output->set_data_type(input->data_type()); - return 0; - } - - // reduce on selected axes - for (size_t i = 0; i < in_shape.size(); i++) { - bool reduce_axis = false; - for (int idx = 0; idx < num_axes; ++idx) { - if (static_cast(axes[idx]) == i) { - reduce_axis = true; - break; - } - } - if (reduce_axis) { - if (keep_dims) { - out_shape.push_back(1); - } - } else { - out_shape.push_back(in_shape[i]); - } - } - output->set_shape(out_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/nchw2nhwc.cc b/mindspore/lite/c_ops/nchw2nhwc.cc deleted file mode 100644 index 8732e76523..0000000000 --- a/mindspore/lite/c_ops/nchw2nhwc.cc +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the License); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/nchw2nhwc.h" - -namespace mindspore { -int Nchw2Nhwc::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - std::vector nchw_shape = input->shape(); - if (nchw_shape.size() != 4) { - output->set_shape(nchw_shape); - } else { - std::vector nhwc_shape{nchw_shape}; - nhwc_shape[NHWC_N] = nchw_shape[NCHW_N]; - nhwc_shape[NHWC_H] = nchw_shape[NCHW_H]; - nhwc_shape[NHWC_W] = nchw_shape[NCHW_W]; - nhwc_shape[NHWC_C] = nchw_shape[NCHW_C]; - output->set_shape(nhwc_shape); - } - output->SetFormat(schema::Format_NHWC); - output->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/nhwc2nchw.cc b/mindspore/lite/c_ops/nhwc2nchw.cc deleted file mode 100644 index 009e161b81..0000000000 --- a/mindspore/lite/c_ops/nhwc2nchw.cc +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the License); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/nhwc2nchw.h" - -namespace mindspore { -int Nhwc2Nchw::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - std::vector nhwc_shape = input->shape(); - if (nhwc_shape.size() != 4) { - output->set_shape(nhwc_shape); - } else { - std::vector nchw_shape{nhwc_shape}; - nchw_shape[NCHW_N] = nhwc_shape[NHWC_N]; - nchw_shape[NCHW_C] = nhwc_shape[NHWC_C]; - nchw_shape[NCHW_H] = nhwc_shape[NHWC_H]; - nchw_shape[NCHW_W] = nhwc_shape[NHWC_W]; - output->set_shape(nchw_shape); - } - output->SetFormat(schema::Format_NCHW); - output->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/one_hot.cc b/mindspore/lite/c_ops/one_hot.cc deleted file mode 100644 index 2df75ac1d7..0000000000 --- a/mindspore/lite/c_ops/one_hot.cc +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/one_hot.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int OneHot::GetAxis() const { return this->primitive->value.AsOneHot()->axis; } - -void OneHot::SetAxis(int axis) { this->primitive->value.AsOneHot()->axis = axis; } - -#else - -int OneHot::GetAxis() const { return this->primitive->value_as_OneHot()->axis(); } - -void OneHot::SetAxis(int axis) {} -#endif -namespace { -constexpr size_t kOneHotInputNum = 4; -} -int OneHot::InferShape(std::vector inputs, std::vector outputs) { - if (this->primitive == nullptr) { - return 1; - } - - int axis = GetAxis(); - - // indices, depth, on_value, off_value - if (inputs.size() != kOneHotInputNum) { - MS_LOG(ERROR) << "OneHot got inputs num " << inputs.size() << ", should be " << kOneHotInputNum; - return 1; - } - auto depth_tensor = inputs.at(1); - if (depth_tensor == nullptr) { - return 1; - } - const int *depth = static_cast(depth_tensor->Data()); - - auto input = inputs.front(); - if (input == nullptr) { - return 1; - } - const auto input_shape = input->shape(); - int input_rank = static_cast(input_shape.size()); - if (axis < 0) { - axis += input_rank + 1; - } - std::vector output_shape(input_shape); - output_shape.insert(output_shape.cbegin() + axis, *depth); - - auto output = outputs.front(); - if (output == nullptr) { - return 1; - } - output->set_shape(output_shape); - - auto on_value = inputs.at(2); - if (on_value == nullptr) { - return 1; - } - output->set_data_type(on_value->data_type()); - output->SetFormat(on_value->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/pad.cc b/mindspore/lite/c_ops/pad.cc deleted file mode 100644 index 240480e730..0000000000 --- a/mindspore/lite/c_ops/pad.cc +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/pad.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector Pad::GetPaddings() const { return this->primitive->value.AsPad()->paddings; } -int Pad::GetPaddingMode() const { return this->primitive->value.AsPad()->paddingMode; } -float Pad::GetConstantValue() const { return this->primitive->value.AsPad()->constantValue; } - -void Pad::SetPaddings(const std::vector &paddings) { this->primitive->value.AsPad()->paddings = paddings; } -void Pad::SetPaddingMode(int padding_mode) { this->primitive->value.AsPad()->paddingMode = padding_mode; } -void Pad::SetConstantValue(float constant_value) { this->primitive->value.AsPad()->constantValue = constant_value; } - -#else - -std::vector Pad::GetPaddings() const { - auto fb_vector = this->primitive->value_as_Pad()->paddings(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -int Pad::GetPaddingMode() const { return this->primitive->value_as_Pad()->paddingMode(); } -float Pad::GetConstantValue() const { return this->primitive->value_as_Pad()->constantValue(); } - -void Pad::SetPaddings(const std::vector &paddings) {} -void Pad::SetPaddingMode(int padding_mode) {} -void Pad::SetConstantValue(float constant_value) {} -#endif -namespace { -const size_t kPaddingsSize = 8; -const size_t kInputRank = 4; -} // namespace -int Pad::InferShape(std::vector inputs, std::vector outputs) { - MS_ASSERT(this->primitive != nullptr); - if (this->primitive == nullptr) { - return 1; - } - - auto paddings = GetPaddings(); - - auto input = inputs.front(); - if (input == nullptr) { - return 1; - } - auto input_shape = input->shape(); - std::vector output_shape; - MS_ASSERT(input->shape().size() <= kInputRank); - for (size_t i = 0; i < input_shape.size(); i++) { - auto paddings_index = i + kInputRank - input_shape.size(); - auto shape = input_shape[i] + (paddings)[2 * paddings_index] + (paddings)[2 * paddings_index + 1]; - output_shape.push_back(shape); - } - - auto output = outputs.front(); - if (output == nullptr) { - return 1; - } - output->SetFormat(input->GetFormat()); - output->set_shape(output_shape); - output->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/pooling.cc b/mindspore/lite/c_ops/pooling.cc deleted file mode 100644 index 48cb4afaf9..0000000000 --- a/mindspore/lite/c_ops/pooling.cc +++ /dev/null @@ -1,139 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/pooling.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int Pooling::GetFormat() const { return this->primitive->value.AsPooling()->format; } -int Pooling::GetPoolingMode() const { return this->primitive->value.AsPooling()->poolingMode; } -bool Pooling::GetGlobal() const { return this->primitive->value.AsPooling()->global; } -int Pooling::GetWindowW() const { return this->primitive->value.AsPooling()->windowW; } -int Pooling::GetWindowH() const { return this->primitive->value.AsPooling()->windowH; } -int Pooling::GetStrideW() const { return this->primitive->value.AsPooling()->strideW; } -int Pooling::GetStrideH() const { return this->primitive->value.AsPooling()->strideH; } -int Pooling::GetPadMode() const { return this->primitive->value.AsPooling()->padMode; } -int Pooling::GetPadUp() const { return this->primitive->value.AsPooling()->padUp; } -int Pooling::GetPadDown() const { return this->primitive->value.AsPooling()->padDown; } -int Pooling::GetPadLeft() const { return this->primitive->value.AsPooling()->padLeft; } -int Pooling::GetPadRight() const { return this->primitive->value.AsPooling()->padRight; } -int Pooling::GetRoundMode() const { return this->primitive->value.AsPooling()->roundMode; } - -void Pooling::SetFormat(int format) { this->primitive->value.AsPooling()->format = (schema::Format)format; } -void Pooling::SetPoolingMode(int pooling_mode) { - this->primitive->value.AsPooling()->poolingMode = (schema::PoolMode)pooling_mode; -} -void Pooling::SetGlobal(bool global) { this->primitive->value.AsPooling()->global = global; } -void Pooling::SetWindowW(int window_w) { this->primitive->value.AsPooling()->windowW = window_w; } -void Pooling::SetWindowH(int window_h) { this->primitive->value.AsPooling()->windowH = window_h; } -void Pooling::SetStrideW(int stride_w) { this->primitive->value.AsPooling()->strideW = stride_w; } -void Pooling::SetStrideH(int stride_h) { this->primitive->value.AsPooling()->strideH = stride_h; } -void Pooling::SetPadMode(int pad_mode) { this->primitive->value.AsPooling()->padMode = (schema::PadMode)pad_mode; } -void Pooling::SetPadUp(int pad_up) { this->primitive->value.AsPooling()->padUp = pad_up; } -void Pooling::SetPadDown(int pad_down) { this->primitive->value.AsPooling()->padDown = pad_down; } -void Pooling::SetPadLeft(int pad_left) { this->primitive->value.AsPooling()->padLeft = pad_left; } -void Pooling::SetPadRight(int pad_right) { this->primitive->value.AsPooling()->padRight = pad_right; } -void Pooling::SetRoundMode(int round_mode) { - this->primitive->value.AsPooling()->roundMode = (schema::RoundMode)round_mode; -} - -#else - -int Pooling::GetFormat() const { return this->primitive->value_as_Pooling()->format(); } -int Pooling::GetPoolingMode() const { return this->primitive->value_as_Pooling()->poolingMode(); } -bool Pooling::GetGlobal() const { return this->primitive->value_as_Pooling()->global(); } -int Pooling::GetWindowW() const { return this->primitive->value_as_Pooling()->windowW(); } -int Pooling::GetWindowH() const { return this->primitive->value_as_Pooling()->windowH(); } -int Pooling::GetStrideW() const { return this->primitive->value_as_Pooling()->strideW(); } -int Pooling::GetStrideH() const { return this->primitive->value_as_Pooling()->strideH(); } -int Pooling::GetPadMode() const { return this->primitive->value_as_Pooling()->padMode(); } -int Pooling::GetPadUp() const { return this->primitive->value_as_Pooling()->padUp(); } -int Pooling::GetPadDown() const { return this->primitive->value_as_Pooling()->padDown(); } -int Pooling::GetPadLeft() const { return this->primitive->value_as_Pooling()->padLeft(); } -int Pooling::GetPadRight() const { return this->primitive->value_as_Pooling()->padRight(); } -int Pooling::GetRoundMode() const { return this->primitive->value_as_Pooling()->roundMode(); } - -void Pooling::SetFormat(int format) {} -void Pooling::SetPoolingMode(int pooling_mode) {} -void Pooling::SetGlobal(bool global) {} -void Pooling::SetWindowW(int window_w) {} -void Pooling::SetWindowH(int window_h) {} -void Pooling::SetStrideW(int stride_w) {} -void Pooling::SetStrideH(int stride_h) {} -void Pooling::SetPadMode(int pad_mode) {} -void Pooling::SetPadUp(int pad_up) {} -void Pooling::SetPadDown(int pad_down) {} -void Pooling::SetPadLeft(int pad_left) {} -void Pooling::SetPadRight(int pad_right) {} -void Pooling::SetRoundMode(int round_mode) {} -#endif -int Pooling::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - int input_h = input->shape().at(1); - int input_w = input->shape().at(2); - - MS_ASSERT(pooling_prim != nullptr); - auto window_h = GetWindowH(); - auto window_w = GetWindowH(); - if (GetGlobal()) { - window_h = input_h; - window_w = input_w; - } - - int output_h = 0; - int output_w = 0; - pad_l_ = GetPadLeft(); - pad_u_ = GetPadUp(); - pad_d_ = GetPadDown(); - pad_r_ = GetPadRight(); - if ((schema::PadMode)GetPadMode() == schema::PadMode_SAME) { - output_w = std::ceil(static_cast(input_w) / static_cast(GetStrideW())); - output_h = std::ceil(static_cast(input_h) / static_cast(GetStrideH())); - auto pad_h_all = ((output_h - 1) * GetStrideH() + (window_h - 1) + 1 - input_h); - auto pad_w_all = ((output_w - 1) * GetStrideW() + (window_w - 1) + 1 - input_w); - pad_u_ = pad_h_all / 2; - pad_d_ = pad_h_all - pad_u_; - pad_l_ = pad_w_all / 2; - pad_r_ = pad_w_all - pad_l_; - } else { - auto round_mode = GetRoundMode(); - if (round_mode == schema::RoundMode_FLOOR) { - output_h = std::floor(static_cast(input_h + pad_u_ + pad_d_ - window_h) / GetStrideH()) + 1; - output_w = std::floor(static_cast(input_w + pad_l_ + pad_r_ - window_w) / GetStrideW()) + 1; - } else if (round_mode == schema::RoundMode_CEIL) { - output_h = std::ceil(static_cast(input_h + pad_u_ + pad_d_ - window_h) / GetStrideH()) + 1; - output_w = std::ceil(static_cast(input_w + pad_l_ + pad_r_ - window_w) / GetStrideW()) + 1; - } else { - MS_LOG(ERROR) << "unsupported round mode."; - } - } - - // todo: fmk type - auto input_shape = input->shape(); - input_shape.at(1) = output_h; - input_shape.at(2) = output_w; - output->set_shape(input_shape); - output->set_data_type(input->data_type()); - - // todo: temp fix - output->SetFormat(schema::Format_NHWC); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/power.cc b/mindspore/lite/c_ops/power.cc deleted file mode 100644 index 34490936bc..0000000000 --- a/mindspore/lite/c_ops/power.cc +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/power.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -float Power::GetPower() const { return this->primitive->value.AsPower()->power; } -float Power::GetScale() const { return this->primitive->value.AsPower()->scale; } -float Power::GetShift() const { return this->primitive->value.AsPower()->shift; } - -void Power::SetPower(float power) { this->primitive->value.AsPower()->power = power; } -void Power::SetScale(float scale) { this->primitive->value.AsPower()->scale = scale; } -void Power::SetShift(float shift) { this->primitive->value.AsPower()->shift = shift; } - -#else - -float Power::GetPower() const { return this->primitive->value_as_Power()->power(); } -float Power::GetScale() const { return this->primitive->value_as_Power()->scale(); } -float Power::GetShift() const { return this->primitive->value_as_Power()->shift(); } - -void Power::SetPower(float power) {} -void Power::SetScale(float scale) {} -void Power::SetShift(float shift) {} -#endif -int Power::InferShape(std::vector inputs, std::vector outputs) { - MS_ASSERT(this->primitive != nullptr); - auto x_tensor = inputs[0]; - MS_ASSERT(x_tensor != nullptr); - lite::tensor::Tensor *exp_tensor = nullptr; - if (inputs.size() == 2) { - exp_tensor = inputs[1]; - MS_ASSERT(exp_tensor != nullptr); - } - auto output_tensor = outputs[0]; - MS_ASSERT(output_tensor != nullptr); - if (exp_tensor != nullptr) { - if (exp_tensor->shape() != x_tensor->shape() || exp_tensor->data_type() != x_tensor->data_type()) { - MS_LOG(ERROR) << "Power inputs shape or type is not equal!"; - return 1; - } - } - - output_tensor->SetFormat(x_tensor->GetFormat()); - output_tensor->set_shape(x_tensor->shape()); - output_tensor->set_data_type(x_tensor->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/prior_box.cc b/mindspore/lite/c_ops/prior_box.cc deleted file mode 100644 index 5de4f2b257..0000000000 --- a/mindspore/lite/c_ops/prior_box.cc +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/prior_box.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector PriorBox::GetMinSizes() const { return this->primitive->value.AsPriorBox()->max_sizes; } -std::vector PriorBox::GetMaxSizes() const { return this->primitive->value.AsPriorBox()->max_sizes; } -std::vector PriorBox::GetAspectRatios() const { return this->primitive->value.AsPriorBox()->aspect_ratios; } -std::vector PriorBox::GetVariances() const { return this->primitive->value.AsPriorBox()->variances; } -int PriorBox::GetImageSizeW() const { return this->primitive->value.AsPriorBox()->image_size_w; } -int PriorBox::GetImageSizeH() const { return this->primitive->value.AsPriorBox()->image_size_h; } -float PriorBox::GetStepW() const { return this->primitive->value.AsPriorBox()->step_w; } -float PriorBox::GetStepH() const { return this->primitive->value.AsPriorBox()->step_h; } -bool PriorBox::GetClip() const { return this->primitive->value.AsPriorBox()->clip; } -bool PriorBox::GetFlip() const { return this->primitive->value.AsPriorBox()->flip; } -float PriorBox::GetOffset() const { return this->primitive->value.AsPriorBox()->offset; } - -void PriorBox::SetMinSizes(const std::vector &min_sizes) { - this->primitive->value.AsPriorBox()->min_sizes = min_sizes; -} -void PriorBox::SetMaxSizes(const std::vector &max_sizes) { - this->primitive->value.AsPriorBox()->max_sizes = max_sizes; -} -void PriorBox::SetAspectRatios(const std::vector &aspect_ratios) { - this->primitive->value.AsPriorBox()->aspect_ratios = aspect_ratios; -} -void PriorBox::SetVariances(const std::vector &variances) { - this->primitive->value.AsPriorBox()->variances = variances; -} -void PriorBox::SetImageSizeW(int image_size_w) { this->primitive->value.AsPriorBox()->image_size_w = image_size_w; } -void PriorBox::SetImageSizeH(int image_size_h) { this->primitive->value.AsPriorBox()->image_size_h = image_size_h; } -void PriorBox::SetStepW(float step_w) { this->primitive->value.AsPriorBox()->step_w = step_w; } -void PriorBox::SetStepH(float step_h) { this->primitive->value.AsPriorBox()->step_h = step_h; } -void PriorBox::SetClip(bool clip) { this->primitive->value.AsPriorBox()->clip = clip; } -void PriorBox::SetFlip(bool flip) { this->primitive->value.AsPriorBox()->flip = flip; } -void PriorBox::SetOffset(float offset) { this->primitive->value.AsPriorBox()->offset = offset; } - -#else - -std::vector PriorBox::GetMinSizes() const { - auto fb_vector = this->primitive->value_as_PriorBox()->min_sizes(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -std::vector PriorBox::GetMaxSizes() const { - auto fb_vector = this->primitive->value_as_PriorBox()->max_sizes(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -std::vector PriorBox::GetAspectRatios() const { - auto fb_vector = this->primitive->value_as_PriorBox()->aspect_ratios(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -std::vector PriorBox::GetVariances() const { - auto fb_vector = this->primitive->value_as_PriorBox()->variances(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -int PriorBox::GetImageSizeW() const { return this->primitive->value_as_PriorBox()->image_size_w(); } -int PriorBox::GetImageSizeH() const { return this->primitive->value_as_PriorBox()->image_size_h(); } -float PriorBox::GetStepW() const { return this->primitive->value_as_PriorBox()->step_w(); } -float PriorBox::GetStepH() const { return this->primitive->value_as_PriorBox()->step_h(); } -bool PriorBox::GetClip() const { return this->primitive->value_as_PriorBox()->clip(); } -bool PriorBox::GetFlip() const { return this->primitive->value_as_PriorBox()->flip(); } -float PriorBox::GetOffset() const { return this->primitive->value_as_PriorBox()->offset(); } - -void PriorBox::SetMinSizes(const std::vector &min_sizes) {} -void PriorBox::SetMaxSizes(const std::vector &max_sizes) {} -void PriorBox::SetAspectRatios(const std::vector &aspect_ratios) {} -void PriorBox::SetVariances(const std::vector &variances) {} -void PriorBox::SetImageSizeW(int image_size_w) {} -void PriorBox::SetImageSizeH(int image_size_h) {} -void PriorBox::SetStepW(float step_w) {} -void PriorBox::SetStepH(float step_h) {} -void PriorBox::SetClip(bool clip) {} -void PriorBox::SetFlip(bool flip) {} -void PriorBox::SetOffset(float offset) {} -#endif -namespace { -constexpr int kPriorBoxPoints = 4; -constexpr int kPriorBoxN = 1; -constexpr int kPriorBoxW = 1; -constexpr int kPriorBoxC = 2; -} // namespace - -int PriorBox::InferShape(std::vector inputs_, std::vector outputs_) { - std::vector different_aspect_ratios{1.0f}; - auto aspect_ratios = GetAspectRatios(); - MS_ASSERT(aspect_ratios != nullptr); - for (auto i = 0; i < aspect_ratios.size(); i++) { - float ratio = (aspect_ratios)[i]; - bool exist = std::any_of(different_aspect_ratios.begin(), different_aspect_ratios.end(), - [&](float v) { return abs(ratio - v) < 1e-6; }); - if (!exist) { - different_aspect_ratios.emplace_back(ratio); - if (GetFlip()) { - different_aspect_ratios.emplace_back(1.0f / ratio); - } - } - } - int32_t num_priors_box = GetMinSizes().size() * different_aspect_ratios.size() + GetMaxSizes().size(); - auto input = inputs_.at(0); - MS_ASSERT(input != nullptr); - int32_t h = input->Height() * input->Width() * num_priors_box * kPriorBoxPoints; - - std::vector output_shape{kPriorBoxN, h, kPriorBoxW, kPriorBoxC}; - auto output = outputs_.at(0); - MS_ASSERT(output != nullptr); - - output->set_shape(output_shape); - output->set_data_type(kNumberTypeFloat32); - output->SetFormat(input->GetFormat()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/quant_dtype_cast.cc b/mindspore/lite/c_ops/quant_dtype_cast.cc deleted file mode 100644 index 3e08e6f1c5..0000000000 --- a/mindspore/lite/c_ops/quant_dtype_cast.cc +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/quant_dtype_cast.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int QuantDTypeCast::GetSrcT() const { return this->primitive->value.AsQuantDTypeCast()->srcT; } -int QuantDTypeCast::GetDstT() const { return this->primitive->value.AsQuantDTypeCast()->dstT; } - -void QuantDTypeCast::SetSrcT(int src_t) { this->primitive->value.AsQuantDTypeCast()->srcT = src_t; } -void QuantDTypeCast::SetDstT(int dst_t) { this->primitive->value.AsQuantDTypeCast()->dstT = dst_t; } - -#else - -int QuantDTypeCast::GetSrcT() const { return this->primitive->value_as_QuantDTypeCast()->srcT(); } -int QuantDTypeCast::GetDstT() const { return this->primitive->value_as_QuantDTypeCast()->dstT(); } - -void QuantDTypeCast::SetSrcT(int src_t) {} -void QuantDTypeCast::SetDstT(int dst_t) {} -#endif -int QuantDTypeCast::InferShape(std::vector inputs_, - std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_shape(input->shape()); - auto param = primitive->value_as_QuantDTypeCast(); - MS_ASSERT(input->data_type() == param->srcT); - output->set_data_type(static_cast(param->dstT())); - output->SetFormat(input->GetFormat()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/range.cc b/mindspore/lite/c_ops/range.cc deleted file mode 100644 index e436fedd91..0000000000 --- a/mindspore/lite/c_ops/range.cc +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/range.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int Range::GetDType() const { return this->primitive->value.AsRange()->dType; } -int Range::GetStart() const { return this->primitive->value.AsRange()->start; } -int Range::GetLimit() const { return this->primitive->value.AsRange()->limit; } -int Range::GetDelta() const { return this->primitive->value.AsRange()->delta; } - -void Range::SetDType(int d_type) { this->primitive->value.AsRange()->dType = d_type; } -void Range::SetStart(int start) { this->primitive->value.AsRange()->start = start; } -void Range::SetLimit(int limit) { this->primitive->value.AsRange()->limit = limit; } -void Range::SetDelta(int delta) { this->primitive->value.AsRange()->delta = delta; } - -#else - -int Range::GetDType() const { return this->primitive->value_as_Range()->dType(); } -int Range::GetStart() const { return this->primitive->value_as_Range()->start(); } -int Range::GetLimit() const { return this->primitive->value_as_Range()->limit(); } -int Range::GetDelta() const { return this->primitive->value_as_Range()->delta(); } - -void Range::SetDType(int d_type) {} -void Range::SetStart(int start) {} -void Range::SetLimit(int limit) {} -void Range::SetDelta(int delta) {} -#endif -int Range::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - int shape_size = std::ceil(static_cast(GetLimit() - GetStart()) / GetDelta()); - std::vector in_shape(1); - in_shape.push_back(shape_size); - output->set_shape(in_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/rank.cc b/mindspore/lite/c_ops/rank.cc deleted file mode 100644 index dfe568aca0..0000000000 --- a/mindspore/lite/c_ops/rank.cc +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the License); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/rank.h" - -namespace mindspore { -int Rank::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - std::vector in_shape(1, 1); - output->set_shape(in_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/reduce.cc b/mindspore/lite/c_ops/reduce.cc deleted file mode 100644 index 445a506de3..0000000000 --- a/mindspore/lite/c_ops/reduce.cc +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/reduce.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector Reduce::GetAxes() const { return this->primitive->value.AsReduce()->axes; } -int Reduce::GetKeepDims() const { return this->primitive->value.AsReduce()->keepDims; } -int Reduce::GetMode() const { return this->primitive->value.AsReduce()->mode; } - -void Reduce::SetAxes(const std::vector &axes) { this->primitive->value.AsReduce()->axes = axes; } -void Reduce::SetKeepDims(int keep_dims) { this->primitive->value.AsReduce()->keepDims = keep_dims; } -void Reduce::SetMode(int mode) { this->primitive->value.AsReduce()->mode = (schema::ReduceMode)mode; } - -#else - -std::vector Reduce::GetAxes() const { - auto fb_vector = this->primitive->value_as_Reduce()->axes(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -int Reduce::GetKeepDims() const { return this->primitive->value_as_Reduce()->keepDims(); } -int Reduce::GetMode() const { return this->primitive->value_as_Reduce()->mode(); } - -void Reduce::SetAxes(const std::vector &axes) {} -void Reduce::SetKeepDims(int keep_dims) {} -void Reduce::SetMode(int mode) {} -#endif -namespace { -constexpr size_t kInputSize = 1; -constexpr size_t kOutputSize = 1; -} // namespace -int Reduce::InferShape(std::vector inputs_, std::vector outputs_) { - if (inputs_.size() != kInputSize || outputs_.size() != kOutputSize) { - return 1; - } - auto input = inputs_.front(); - auto output = outputs_.front(); - if (input == nullptr || output == nullptr) { - return 1; - } - if (this->primitive == nullptr) { - return 1; - } - - bool keep_dims = static_cast(GetKeepDims()); - std::vector in_shape = input->shape(); - std::vector out_shape; - const auto &axes = GetAxes(); - auto num_axes = axes.size(); - // reduce on all axes - if (num_axes == 0) { - if (keep_dims) { - for (auto i = 0; i < in_shape.size(); i++) { - out_shape.push_back(1); - } - } - output->set_shape(out_shape); - output->set_data_type(input->data_type()); - return 0; - } - - // reduce on selected axes - for (size_t i = 0; i < in_shape.size(); i++) { - bool reduce_axis = false; - for (int idx = 0; idx < num_axes; ++idx) { - if (static_cast((axes)[idx]) == i) { - reduce_axis = true; - break; - } - } - if (reduce_axis) { - if (keep_dims) { - out_shape.push_back(1); - } - } else { - out_shape.push_back(in_shape[i]); - } - } - output->set_shape(out_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/reshape.cc b/mindspore/lite/c_ops/reshape.cc deleted file mode 100644 index 83789cede7..0000000000 --- a/mindspore/lite/c_ops/reshape.cc +++ /dev/null @@ -1,153 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/reshape.h" -#include - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int Reshape::GetFormat() const { return this->primitive->value.AsReshape()->format; } -std::vector Reshape::GetShape() const { return this->primitive->value.AsReshape()->shape; } - -void Reshape::SetFormat(int format) { this->primitive->value.AsReshape()->format = format; } -void Reshape::SetShape(const std::vector &shape) { this->primitive->value.AsReshape()->shape = shape; } - -#else - -int Reshape::GetFormat() const { return this->primitive->value_as_Reshape()->format(); } -std::vector Reshape::GetShape() const { - auto fb_vector = this->primitive->value_as_Reshape()->shape(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void Reshape::SetFormat(int format) {} -void Reshape::SetShape(const std::vector &shape) {} -#endif - -int Reshape::CalNewShape(const lite::tensor::Tensor *in_tensor, std::vector *out_shape) const { - size_t in_shape_size = 1; - for (size_t i = 0; i < in_tensor->shape().size(); i++) { - in_shape_size *= in_tensor->shape()[i]; - } - - int64_t inferIndex = -1; - size_t out_shapeSize = 1; - for (size_t i = 0; i < out_shape->size(); i++) { - if (out_shape->at(i) == -1) { - if (inferIndex == -1) { - inferIndex = i; - } else { - MS_LOG(ERROR) << "output shape should has no more than one dim which need infer"; - return 1; - } - } else if (out_shape->at(i) < 0) { - MS_LOG(ERROR) << "output shape dim should be non-negative"; - return 1; - } else if (out_shape->at(i) == 0) { - out_shape->at(i) = in_tensor->shape().at(i); - out_shapeSize *= out_shape->at(i); - } else { - out_shapeSize *= out_shape->at(i); - } - } - - if (inferIndex == -1 && out_shapeSize != in_shape_size) { - MS_LOG(ERROR) << "output shapeSize: " << out_shapeSize << " should be equal to input shapeSize: " << in_shape_size; - return 1; - } - if (inferIndex != -1) { - out_shape->at(inferIndex) = in_shape_size / out_shapeSize; - } - return 0; -} - -template -void CalShape(const T *data, const std::vector &inputs, std::vector *out_shape, - int shape_size) { - int input_count = inputs[0]->ElementsNum(); - - int index = 0; - int size = 1; - for (size_t i = 0; i < shape_size; i++) { - if (data[i] == -1) { - index = i; - } else { - size *= data[i]; - } - out_shape->push_back(data[i]); - } - if (data[index] == -1) { - (*out_shape)[index] = input_count / size; - } -} - -int Reshape::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - std::vector out_shape; - if (inputs_.size() == kDoubleNum) { - auto shape_tensor = inputs_.at(1); - if (shape_tensor->Data() == nullptr) { - MS_LOG(INFO) << "Do infer shape in runtime."; - return 1; - } - size_t shape_size = shape_tensor->ElementsNum(); - switch (shape_tensor->data_type()) { - case kNumberTypeInt8: { - auto data = reinterpret_cast(shape_tensor->Data()); - CalShape(data, inputs_, &out_shape, shape_size); - } break; - case kNumberTypeInt32: { - auto data = reinterpret_cast(shape_tensor->Data()); - CalShape(data, inputs_, &out_shape, shape_size); - } break; - case kNumberTypeFloat: { - auto data = reinterpret_cast(shape_tensor->Data()); - CalShape(data, inputs_, &out_shape, shape_size); - } break; - case kNumberTypeUInt32: { - auto data = reinterpret_cast(shape_tensor->Data()); - CalShape(data, inputs_, &out_shape, shape_size); - } break; - default: { - MS_LOG(ERROR) << "Reshape weight tensor has unsupported dataType: " << shape_tensor->data_type(); - return 1; - } - } - } else if (inputs_.size() == kSingleNum) { - std::copy(GetShape().begin(), GetShape().end(), std::back_inserter(out_shape)); - } else { - MS_LOG(ERROR) << "inputs tensor size invalid."; - return 1; - } - - auto ret = CalNewShape(inputs_.front(), &out_shape); - if (ret != 0) { - MS_LOG(ERROR) << "CalNewShape error"; - return ret; - } - - output->set_shape(out_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/resize.cc b/mindspore/lite/c_ops/resize.cc deleted file mode 100644 index 02860e8ed3..0000000000 --- a/mindspore/lite/c_ops/resize.cc +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/resize.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int Resize::GetFormat() const { return this->primitive->value.AsResize()->format; } -int Resize::GetMethod() const { return this->primitive->value.AsResize()->method; } -long Resize::GetNewHeight() const { return this->primitive->value.AsResize()->newHeight; } -long Resize::GetNewWidth() const { return this->primitive->value.AsResize()->newWidth; } -bool Resize::GetAlignCorners() const { return this->primitive->value.AsResize()->alignCorners; } -bool Resize::GetPreserveAspectRatio() const { return this->primitive->value.AsResize()->preserveAspectRatio; } - -void Resize::SetFormat(int format) { this->primitive->value.AsResize()->format = (schema::Format)format; } -void Resize::SetMethod(int method) { this->primitive->value.AsResize()->method = (schema::ResizeMethod)method; } -void Resize::SetNewHeight(long new_height) { this->primitive->value.AsResize()->newHeight = new_height; } -void Resize::SetNewWidth(long new_width) { this->primitive->value.AsResize()->newWidth = new_width; } -void Resize::SetAlignCorners(bool align_corners) { this->primitive->value.AsResize()->alignCorners = align_corners; } -void Resize::SetPreserveAspectRatio(bool preserve_aspect_ratio) { - this->primitive->value.AsResize()->preserveAspectRatio = preserve_aspect_ratio; -} - -#else - -int Resize::GetFormat() const { return this->primitive->value_as_Resize()->format(); } -int Resize::GetMethod() const { return this->primitive->value_as_Resize()->method(); } -long Resize::GetNewHeight() const { return this->primitive->value_as_Resize()->newHeight(); } -long Resize::GetNewWidth() const { return this->primitive->value_as_Resize()->newWidth(); } -bool Resize::GetAlignCorners() const { return this->primitive->value_as_Resize()->alignCorners(); } -bool Resize::GetPreserveAspectRatio() const { return this->primitive->value_as_Resize()->preserveAspectRatio(); } - -void Resize::SetFormat(int format) {} -void Resize::SetMethod(int method) {} -void Resize::SetNewHeight(long new_height) {} -void Resize::SetNewWidth(long new_width) {} -void Resize::SetAlignCorners(bool align_corners) {} -void Resize::SetPreserveAspectRatio(bool preserve_aspect_ratio) {} -#endif -namespace { -constexpr int kInputRank = 4; -} // namespace -int Resize::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - if (input == nullptr) { - return 1; - } - MS_ASSERT(input->shape().size() == kInputRank); - - auto output = outputs_.front(); - if (output == nullptr) { - return 1; - } - auto new_height = GetNewHeight(); - auto new_width = GetNewWidth(); - - std::vector output_shape; - output_shape.push_back(input->Batch()); - output_shape.push_back(new_height); - output_shape.push_back(new_width); - output_shape.push_back(input->Channel()); - output->set_shape(output_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/reverse_sequence.cc b/mindspore/lite/c_ops/reverse_sequence.cc deleted file mode 100644 index 36a01468f9..0000000000 --- a/mindspore/lite/c_ops/reverse_sequence.cc +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/reverse_sequence.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int ReverseSequence::GetSeqAxis() const { return this->primitive->value.AsReverseSequence()->seqAxis; } -int ReverseSequence::GetBatchAxis() const { return this->primitive->value.AsReverseSequence()->batchAxis; } -std::vector ReverseSequence::GetSeqLengths() const { - return this->primitive->value.AsReverseSequence()->seqLengths; -} - -void ReverseSequence::SetSeqAxis(int seq_axis) { this->primitive->value.AsReverseSequence()->seqAxis = seq_axis; } -void ReverseSequence::SetBatchAxis(int batch_axis) { - this->primitive->value.AsReverseSequence()->batchAxis = batch_axis; -} -void ReverseSequence::SetSeqLengths(const std::vector &seq_lengths) { - this->primitive->value.AsReverseSequence()->seqLengths = seq_lengths; -} - -#else - -int ReverseSequence::GetSeqAxis() const { return this->primitive->value_as_ReverseSequence()->seqAxis(); } -int ReverseSequence::GetBatchAxis() const { return this->primitive->value_as_ReverseSequence()->batchAxis(); } -std::vector ReverseSequence::GetSeqLengths() const { - auto fb_vector = this->primitive->value_as_ReverseSequence()->seqLengths(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void ReverseSequence::SetSeqAxis(int seq_axis) {} -void ReverseSequence::SetBatchAxis(int batch_axis) {} -void ReverseSequence::SetSeqLengths(const std::vector &seq_lengths) {} -#endif -int ReverseSequence::InferShape(std::vector inputs, - std::vector outputs) { - auto input = inputs.front(); - auto output = outputs.front(); - MS_ASSERT(input != nullptr); - MS_ASSERT(output != nullptr); - - output->set_shape(input->shape()); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/roi_pooling.cc b/mindspore/lite/c_ops/roi_pooling.cc deleted file mode 100644 index 912faf34fd..0000000000 --- a/mindspore/lite/c_ops/roi_pooling.cc +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/roi_pooling.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int ROIPooling::GetPooledH() const { return this->primitive->value.AsROIPooling()->pooledH; } -int ROIPooling::GetPooledW() const { return this->primitive->value.AsROIPooling()->pooledW; } -float ROIPooling::GetScale() const { return this->primitive->value.AsROIPooling()->scale; } - -void ROIPooling::SetPooledH(int pooled_h) { this->primitive->value.AsROIPooling()->pooledH = pooled_h; } -void ROIPooling::SetPooledW(int pooled_w) { this->primitive->value.AsROIPooling()->pooledW = pooled_w; } -void ROIPooling::SetScale(float scale) { this->primitive->value.AsROIPooling()->scale = scale; } - -#else - -int ROIPooling::GetPooledH() const { return this->primitive->value_as_ROIPooling()->pooledH(); } -int ROIPooling::GetPooledW() const { return this->primitive->value_as_ROIPooling()->pooledW(); } -float ROIPooling::GetScale() const { return this->primitive->value_as_ROIPooling()->scale(); } - -void ROIPooling::SetPooledH(int pooled_h) {} -void ROIPooling::SetPooledW(int pooled_w) {} -void ROIPooling::SetScale(float scale) {} -#endif - -int ROIPooling::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - if (inputs_.size() != kDoubleNum) { - MS_LOG(ERROR) << "inputs number is not equal to " << kDoubleNum; - return 1; - } - auto input = inputs_.front(); - if (input == nullptr) { - return 1; - } - auto roi = inputs_.at(1); - if (roi == nullptr) { - return 1; - } - auto output = outputs_.front(); - if (output == nullptr) { - return 1; - } - - auto new_h = GetPooledH(); - auto new_w = GetPooledW(); - - auto shape_data = roi->shape(); - - std::vector output_shape; - output_shape.push_back(shape_data[0]); - output_shape.push_back(new_h); - output_shape.push_back(new_w); - output_shape.push_back(input->Channel()); - output->set_shape(output_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/scatter_nd.cc b/mindspore/lite/c_ops/scatter_nd.cc deleted file mode 100644 index e5f09fe496..0000000000 --- a/mindspore/lite/c_ops/scatter_nd.cc +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the License); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/scatter_nd.h" - -namespace mindspore { -namespace { -constexpr int kScatterNDInputNum = 3; -constexpr int kScatterNDOutputNum = 1; -constexpr int kScatterShapeIndex = 0; -constexpr int kScatterIndicesIndex = 1; -constexpr int kScatterUpdateIndex = 2; -} // namespace - -int ScatterND::InferShape(std::vector inputs_, std::vector outputs_) { - if (inputs_.size() != kScatterNDInputNum) { - MS_LOG(ERROR) << "inputs number is not equal to " << kScatterNDInputNum; - return 1; - } - if (outputs_.size() != kScatterNDOutputNum) { - MS_LOG(ERROR) << "outputs number is not equal to " << kScatterNDInputNum; - return 1; - } - auto shape = inputs_.at(kScatterShapeIndex); - if (shape == nullptr) { - MS_LOG(ERROR) << "shape null pointer dereferencing."; - return 1; - } - auto indices = inputs_.at(kScatterIndicesIndex); - if (indices == nullptr) { - MS_LOG(ERROR) << "indices null pointer dereferencing."; - return 1; - } - auto update = inputs_.at(kScatterUpdateIndex); - if (update == nullptr) { - MS_LOG(ERROR) << "update null pointer dereferencing."; - return 1; - } - auto output = outputs_.front(); - auto shape_data = reinterpret_cast(shape->Data()); - std::vector out_shape(shape_data, shape_data + shape->DataSize()); - output->set_shape(out_shape); - output->set_data_type(update->data_type()); - output->SetFormat(update->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/shape.cc b/mindspore/lite/c_ops/shape.cc deleted file mode 100644 index 377a03ccf4..0000000000 --- a/mindspore/lite/c_ops/shape.cc +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/shape.h" - -namespace mindspore { -namespace { -constexpr int kShapeInputNum = 1; -constexpr int kShapeOutputNum = 1; -} // namespace -int Shape::InferShape(std::vector inputs_, std::vector outputs_) { - if (inputs_.size() != kShapeInputNum) { - MS_LOG(ERROR) << "inputs to Shape operator should be 1, but " << inputs_.size() << " is given."; - return 1; - } - if (outputs_.size() != kShapeOutputNum) { - MS_LOG(ERROR) << "outputs to Shape operator should be 1, but " << outputs_.size() << " is given."; - return 1; - } - - auto in_tensor = inputs_.front(); - auto out_tensor = outputs_.front(); - std::vector out_shape; - out_shape.push_back(static_cast(in_tensor->shape().size())); - - auto ret_shape = out_tensor->set_shape(out_shape); - if (ret_shape != 1 || size_t(out_tensor->shape()[0]) != in_tensor->shape().size()) { - MS_LOG(ERROR) << "Set shape fails."; - return 1; - } - auto ret_dtype = out_tensor->set_data_type(in_tensor->data_type()); - if (ret_dtype != in_tensor->data_type()) { - MS_LOG(ERROR) << "Set datatype fails."; - return 1; - } - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/slice.cc b/mindspore/lite/c_ops/slice.cc deleted file mode 100644 index 855ae6f444..0000000000 --- a/mindspore/lite/c_ops/slice.cc +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/slice.h" - -namespace mindspore { -namespace { -constexpr int kSliceInputNum = 1; -constexpr int kSliceOutputNum = 1; -} // namespace -#ifdef PRIMITIVE_WRITEABLE -int SliceOp::GetFormat() const { return this->primitive->value.AsSlice()->format; } -std::vector SliceOp::GetBegin() const { return this->primitive->value.AsSlice()->begin; } -std::vector SliceOp::GetSize() const { return this->primitive->value.AsSlice()->size; } - -void SliceOp::SetFormat(int format) { this->primitive->value.AsSlice()->format = format; } -void SliceOp::SetBegin(const std::vector &begin) { this->primitive->value.AsSlice()->begin = begin; } -void SliceOp::SetSize(const std::vector &size) { this->primitive->value.AsSlice()->size = size; } - -#else - -int SliceOp::GetFormat() const { return this->primitive->value_as_Slice()->format(); } -std::vector SliceOp::GetBegin() const { - auto fb_vector = this->primitive->value_as_Slice()->begin(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -std::vector SliceOp::GetSize() const { - auto fb_vector = this->primitive->value_as_Slice()->size(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void SliceOp::SetFormat(int format) {} -void SliceOp::SetBegin(const std::vector &begin) {} -void SliceOp::SetSize(const std::vector &size) {} -#endif - -int SliceOp::InferShape(std::vector inputs, std::vector outputs) { - MS_ASSERT(this->primitive != nullptr); - if (inputs.size() != kSliceInputNum || outputs.size() != kSliceOutputNum) { - MS_LOG(ERROR) << "input size:" << inputs.size() << ",output size:" << outputs.size(); - return 1; - } - auto input = inputs.at(0); - auto input_shape = input->shape(); - std::vector slice_begin(GetBegin().begin(), GetBegin().end()); - std::vector slice_size(GetSize().begin(), GetSize().end()); - std::vector output_shape(input_shape.size()); - for (int i = 0; i < input_shape.size(); ++i) { - if (slice_size[i] < 0 && slice_size[i] != -1) { - MS_LOG(ERROR) << "Invalid size input!size[" << i << "]=" << slice_size[i]; - return 1; - } - if (slice_begin[i] < 0) { - MS_LOG(ERROR) << "Invalid begin input " << slice_begin[i] << " which should be >= 0"; - return 1; - } - if (input_shape[i] <= slice_begin[i]) { - MS_LOG(ERROR) << "Invalid begin input!begin[" << i << "]=" << slice_begin[i] - << " which should be <= " << input_shape[i]; - return 1; - } - if (slice_size[i] > (input_shape[i] - slice_begin[i])) { - MS_LOG(ERROR) << "Invalid size input " << slice_size[i] - << " which should be <= " << input_shape[i] - slice_begin[i]; - return 1; - } - - output_shape[i] = slice_size[i] < 0 ? input_shape[i] - slice_begin[i] : slice_size[i]; - } - - outputs[0]->set_shape(output_shape); - outputs[0]->set_data_type(input->data_type()); - outputs[0]->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/softmax.cc b/mindspore/lite/c_ops/softmax.cc deleted file mode 100644 index 68d3baaedf..0000000000 --- a/mindspore/lite/c_ops/softmax.cc +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/softmax.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int SoftMax::GetAxis() const { return this->primitive->value.AsSoftMax()->axis; } - -void SoftMax::SetAxis(int axis) { this->primitive->value.AsSoftMax()->axis = axis; } - -#else - -int SoftMax::GetAxis() const { return this->primitive->value_as_SoftMax()->axis(); } - -void SoftMax::SetAxis(int axis) {} -#endif -int SoftMax::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_shape(input->shape()); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/space_to_batch.cc b/mindspore/lite/c_ops/space_to_batch.cc deleted file mode 100644 index d49b867f17..0000000000 --- a/mindspore/lite/c_ops/space_to_batch.cc +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/space_to_batch.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector SpaceToBatch::GetBlockShape() const { return this->primitive->value.AsSpaceToBatch()->blockShape; } -std::vector SpaceToBatch::GetPaddings() const { return this->primitive->value.AsSpaceToBatch()->paddings; } - -void SpaceToBatch::SetBlockShape(const std::vector &block_shape) { - this->primitive->value.AsSpaceToBatch()->blockShape = block_shape; -} -void SpaceToBatch::SetPaddings(const std::vector &paddings) { - this->primitive->value.AsSpaceToBatch()->paddings = paddings; -} - -#else - -std::vector SpaceToBatch::GetBlockShape() const { - auto fb_vector = this->primitive->value_as_SpaceToBatch()->blockShape(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -std::vector SpaceToBatch::GetPaddings() const { - auto fb_vector = this->primitive->value_as_SpaceToBatch()->paddings(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void SpaceToBatch::SetBlockShape(const std::vector &block_shape) {} -void SpaceToBatch::SetPaddings(const std::vector &paddings) {} -#endif -namespace { -constexpr int kSpaceToBatchNDOutputNum = 1; -constexpr int kSpaceToBatchNDInputNum = 1; -constexpr int kBlockSizesSize = 2; -constexpr int kPaddingsSize = 4; -} // namespace - -int SpaceToBatch::InferShape(std::vector inputs, std::vector outputs) { - MS_ASSERT(this->primitive != nullptr); - if (outputs.size() != kSpaceToBatchNDOutputNum || inputs.size() != kSpaceToBatchNDInputNum) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return 1; - } - - auto input = inputs.at(0); - if (input->GetFormat() != schema::Format_NHWC) { - MS_LOG(ERROR) << "space_to_batch only support NHWC now!"; - return 1; - } - auto input_shape = input->shape(); - if (input_shape.size() != kDimension_4d) { - MS_LOG(ERROR) << "input shape dimension size should == " << kDimension_4d; - return 1; - } - - if (GetBlockShape().size() != kBlockSizesSize) { - MS_LOG(ERROR) << "Block shape size should be " << kBlockSizesSize; - return 1; - } - if (GetPaddings().size() != kPaddingsSize) { - MS_LOG(ERROR) << "Crops size should be " << kPaddingsSize; - return 1; - } - - for (int &iter : GetBlockShape()) { - block_sizes_.emplace_back(iter); - } - - in_shape_.clear(); - padded_in_shape_.clear(); - paddings_.clear(); - in_shape_.emplace_back(input_shape.at(NHWC_N)); - padded_in_shape_.emplace_back(input_shape.at(NHWC_N)); - for (int i = 0; i < kBlockSizesSize; i++) { - in_shape_.emplace_back(input_shape.at(i + 1)); - padded_in_shape_.emplace_back(input_shape.at(i + 1) + (paddings_.at(2 * i) + paddings_.at(2 * i + 1))); - paddings_.emplace_back(paddings_.at(2 * i)); - paddings_.emplace_back(paddings_.at(2 * i + 1)); - if (paddings_.back() % block_sizes_.at(i)) { - MS_LOG(ERROR) << "Padded shape does not divide block size " << block_sizes_.at(i); - return 1; - } - } - in_shape_.emplace_back(input_shape.at(NHWC_C)); - padded_in_shape_.emplace_back(input_shape.at(NHWC_C)); - - std::vector output_shape(input_shape.size()); - output_shape[NHWC_N] = input_shape[NHWC_N] * (block_sizes_[NHWC_N] * block_sizes_[NHWC_H]); - output_shape[NHWC_H] = input_shape[NHWC_H] / block_sizes_[NHWC_N]; - output_shape[NHWC_W] = input_shape[NHWC_W] / block_sizes_[NHWC_H]; - output_shape[NHWC_C] = input_shape[NHWC_C]; - outputs[0]->set_shape(output_shape); - outputs[0]->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/space_to_depth.cc b/mindspore/lite/c_ops/space_to_depth.cc deleted file mode 100644 index 7e65b0c569..0000000000 --- a/mindspore/lite/c_ops/space_to_depth.cc +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/space_to_depth.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int SpaceToDepth::GetBlockSize() const { return this->primitive->value.AsSpaceToDepth()->blockSize; } -int SpaceToDepth::GetFormat() const { return this->primitive->value.AsSpaceToDepth()->format; } - -void SpaceToDepth::SetBlockSize(int block_size) { this->primitive->value.AsSpaceToDepth()->blockSize = block_size; } -void SpaceToDepth::SetFormat(int format) { this->primitive->value.AsSpaceToDepth()->format = format; } - -#else - -int SpaceToDepth::GetBlockSize() const { return this->primitive->value_as_SpaceToDepth()->blockSize(); } -int SpaceToDepth::GetFormat() const { return this->primitive->value_as_SpaceToDepth()->format(); } - -void SpaceToDepth::SetBlockSize(int block_size) {} -void SpaceToDepth::SetFormat(int format) {} -#endif -namespace { -constexpr int kSpaceToDepthOutputNum = 1; -constexpr int kSpaceToDepthInputNum = 1; -} // namespace - -int SpaceToDepth::InferShape(std::vector inputs, std::vector outputs) { - MS_ASSERT(this->primitive != nullptr); - if (outputs.size() != kSpaceToDepthOutputNum || inputs.size() != kSpaceToDepthInputNum) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return 1; - } - - auto input = inputs.at(0); - if (input->GetFormat() != schema::Format_NHWC) { - MS_LOG(ERROR) << "space_to_depth only support NHWC now!"; - return 1; - } - auto input_shape = input->shape(); - if (input_shape.size() != kDimension_4d) { - MS_LOG(ERROR) << "input shape dimension size should == " << kDimension_4d; - return 1; - } - - int32_t block_size = GetBlockSize(); - if (input_shape[NHWC_C] % (block_size * block_size) != 0 || input_shape[NHWC_C] == 0) { - MS_LOG(ERROR) << "input dimension c size " << input_shape[NHWC_C] << " should be mulitple of block_size(" - << block_size << ") * block_size)!"; - return 1; - } - std::vector output_shape(input_shape.size()); - output_shape[NHWC_N] = input_shape[NHWC_N]; - output_shape[NHWC_H] = input_shape[NHWC_H] / block_size; - output_shape[NHWC_W] = input_shape[NHWC_W] / block_size; - output_shape[NHWC_C] = input_shape[NHWC_C] * (block_size * block_size); - outputs[0]->set_shape(output_shape); - outputs[0]->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/split.cc b/mindspore/lite/c_ops/split.cc deleted file mode 100644 index 5f59a80967..0000000000 --- a/mindspore/lite/c_ops/split.cc +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/split.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int Split::GetNumberSplit() const { return this->primitive->value.AsSplit()->numberSplit; } -std::vector Split::GetSizeSplits() const { return this->primitive->value.AsSplit()->sizeSplits; } -int Split::GetSplitDim() const { return this->primitive->value.AsSplit()->splitDim; } - -void Split::SetNumberSplit(int number_split) { this->primitive->value.AsSplit()->numberSplit = number_split; } -void Split::SetSizeSplits(const std::vector &size_splits) { - this->primitive->value.AsSplit()->sizeSplits = size_splits; -} -void Split::SetSplitDim(int split_dim) { this->primitive->value.AsSplit()->splitDim = split_dim; } - -#else - -int Split::GetNumberSplit() const { return this->primitive->value_as_Split()->numberSplit(); } -std::vector Split::GetSizeSplits() const { - auto fb_vector = this->primitive->value_as_Split()->sizeSplits(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -int Split::GetSplitDim() const { return this->primitive->value_as_Split()->splitDim(); } - -void Split::SetNumberSplit(int number_split) {} -void Split::SetSizeSplits(const std::vector &size_splits) {} -void Split::SetSplitDim(int split_dim) {} -#endif -namespace { -constexpr int kSplitInputNum = 1; -} // namespace -int Split::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - - MS_ASSERT(spilt_prim != nullptr); - if (inputs_.size() != kSplitInputNum) { - MS_LOG(ERROR) << "inputs number is not equal to " << kSplitInputNum; - return 1; - } - auto output = outputs_.front(); - if (output == nullptr) { - MS_LOG(ERROR) << "output null pointer dereferencing."; - return 1; - } - int number_split = GetNumberSplit(); - if (outputs_.size() != number_split) { - MS_LOG(ERROR) << "outputs number is not equal to " << number_split; - return 1; - } - int split_dim = GetSplitDim(); - std::vector input_shape = input->shape(); - std::vector size_split; - size_split.insert(size_split.begin(), GetSizeSplits().begin(), GetSizeSplits().end()); - - for (int i = 0; i < number_split; ++i) { - std::vector output_shape; - output_shape.insert(output_shape.begin(), input_shape.begin(), input_shape.end()); - auto split_dim_i = size_split.empty() ? input_shape[split_dim] / number_split : size_split[i]; - output_shape[split_dim] = split_dim_i; - outputs_[i]->set_shape(output_shape); - outputs_[i]->set_data_type(input->data_type()); - outputs_[i]->SetFormat(input->GetFormat()); - } - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/squeeze.cc b/mindspore/lite/c_ops/squeeze.cc deleted file mode 100644 index bced5e173c..0000000000 --- a/mindspore/lite/c_ops/squeeze.cc +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/squeeze.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector Squeeze::GetAxis() const { return this->primitive->value.AsSqueeze()->axis; } - -void Squeeze::SetAxis(const std::vector &axis) { this->primitive->value.AsSqueeze()->axis = axis; } - -#else - -std::vector Squeeze::GetAxis() const { - auto fb_vector = this->primitive->value_as_Squeeze()->axis(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void Squeeze::SetAxis(const std::vector &axis) {} -#endif -namespace { -constexpr int kSqueezeInputNum = 1; -constexpr int kSqueezeOutputNum = 1; -} // namespace -int Squeeze::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - if (kSqueezeInputNum != inputs_.size()) { - MS_LOG(ERROR) << "Add should has " << kSqueezeInputNum << " inputs"; - return -1; - } - if (kSqueezeOutputNum != outputs_.size()) { - MS_LOG(ERROR) << "Add should has " << kSqueezeOutputNum << " outputs"; - return -1; - } - auto *in_tensor = inputs_.front(); - auto in_shape = in_tensor->shape(); - std::vector out_shape; - - // todo: getAxis - auto axis = GetAxis(); - std::vector axes_; - for (auto iter = axis.begin(); iter != axis.end(); iter++) { - axes_.push_back(*iter); - } - - if (axes_.size() == 0) { - for (int i = 0; i < in_shape.size(); i++) { - if (in_shape[i] != 1) { - out_shape.push_back(in_shape[i]); - } - } - } else { - int axisIdx = 0; - for (int i = 0; i < in_shape.size(); i++) { - if (axisIdx < axes_.size() && axes_[axisIdx] == i) { - MS_ASSERT(in_shape[i] == 1); - axisIdx++; - continue; - } else { - out_shape.push_back(in_shape[i]); - } - } - } - - outputs_.front()->set_shape(out_shape); - outputs_.front()->set_data_type(in_tensor->data_type()); - outputs_.front()->SetFormat(in_tensor->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/stack.cc b/mindspore/lite/c_ops/stack.cc deleted file mode 100644 index 417d368588..0000000000 --- a/mindspore/lite/c_ops/stack.cc +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/stack.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int Stack::GetAxis() const { return this->primitive->value.AsStack()->axis; } -int Stack::GetN() const { return this->primitive->value.AsStack()->n; } -std::vector Stack::GetIsScale() const { return this->primitive->value.AsStack()->isScale; } - -void Stack::SetAxis(int axis) { this->primitive->value.AsStack()->axis = axis; } -void Stack::SetN(int n) { this->primitive->value.AsStack()->n = n; } -void Stack::SetIsScale(const std::vector &is_scale) { this->primitive->value.AsStack()->isScale = is_scale; } - -#else - -int Stack::GetAxis() const { return this->primitive->value_as_Stack()->axis(); } -int Stack::GetN() const { return this->primitive->value_as_Stack()->n(); } -std::vector Stack::GetIsScale() const { - auto fb_vector = this->primitive->value_as_Stack()->isScale(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void Stack::SetAxis(int axis) {} -void Stack::SetN(int n) {} -void Stack::SetIsScale(const std::vector &is_scale) {} -#endif -namespace { -constexpr int kStackOutputNum = 1; -constexpr int kStackMinInputNum = 2; -} // namespace - -int Stack::InferShape(std::vector inputs, std::vector outputs) { - MS_ASSERT(this->primitive != nullptr); - if (outputs.size() != kStackOutputNum) { - MS_LOG(ERROR) << "Invalid output size:" << outputs.size(); - return 1; - } - if (inputs.size() < kStackMinInputNum) { - MS_LOG(ERROR) << "Invalid input size " << inputs.size(); - return 1; - } - auto input = inputs.at(0); - auto input_shape = input->shape(); - - std::vector output_shape = input_shape; - int axis = GetAxis() < 0 ? GetAxis() + input_shape.size() : GetAxis(); - if (axis < 0 || axis > input_shape.size()) { - MS_LOG(ERROR) << "Invalid axis " << GetAxis(); - return 1; - } - schema::Format input0_format = input->GetFormat(); - for (size_t i = 1; i < inputs.size(); ++i) { - if (inputs[i]->GetFormat() != input0_format) { - MS_LOG(ERROR) << "All inputs should have the same format!"; - return 1; - } - - auto input_shape_tmp = inputs[i]->shape(); - if (input_shape_tmp.size() != input_shape.size()) { - MS_LOG(ERROR) << "All input shape size should be the same!"; - return 1; - } - for (size_t j = 0; j < input_shape.size(); ++j) { - if (input_shape_tmp[j] != input_shape[j]) { - MS_LOG(ERROR) << "All input shape should be the same!"; - return 1; - } - } - } - - output_shape.insert(output_shape.begin() + axis, inputs.size()); - outputs[0]->set_shape(output_shape); - outputs[0]->set_data_type(input->data_type()); - outputs[0]->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/strided_slice.cc b/mindspore/lite/c_ops/strided_slice.cc deleted file mode 100644 index 112f037782..0000000000 --- a/mindspore/lite/c_ops/strided_slice.cc +++ /dev/null @@ -1,221 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/strided_slice.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int StridedSlice::GetBeginMask() const { return this->primitive->value.AsStridedSlice()->beginMask; } -int StridedSlice::GetEndMask() const { return this->primitive->value.AsStridedSlice()->endMask; } -int StridedSlice::GetEllipsisMask() const { return this->primitive->value.AsStridedSlice()->ellipsisMask; } -int StridedSlice::GetNewAxisMask() const { return this->primitive->value.AsStridedSlice()->newAxisMask; } -int StridedSlice::GetShrinkAxisMask() const { return this->primitive->value.AsStridedSlice()->shrinkAxisMask; } -std::vector StridedSlice::GetBegin() const { return this->primitive->value.AsStridedSlice()->begin; } -std::vector StridedSlice::GetEnd() const { return this->primitive->value.AsStridedSlice()->end; } -std::vector StridedSlice::GetStride() const { return this->primitive->value.AsStridedSlice()->stride; } -std::vector StridedSlice::GetIsScale() const { return this->primitive->value.AsStridedSlice()->isScale; } - -void StridedSlice::SetBeginMask(int begin_mask) { this->primitive->value.AsStridedSlice()->beginMask = begin_mask; } -void StridedSlice::SetEndMask(int end_mask) { this->primitive->value.AsStridedSlice()->endMask = end_mask; } -void StridedSlice::SetEllipsisMask(int ellipsis_mask) { - this->primitive->value.AsStridedSlice()->ellipsisMask = ellipsis_mask; -} -void StridedSlice::SetNewAxisMask(int new_axis_mask) { - this->primitive->value.AsStridedSlice()->newAxisMask = new_axis_mask; -} -void StridedSlice::SetShrinkAxisMask(int shrink_axis_mask) { - this->primitive->value.AsStridedSlice()->shrinkAxisMask = shrink_axis_mask; -} -void StridedSlice::SetBegin(const std::vector &begin) { this->primitive->value.AsStridedSlice()->begin = begin; } -void StridedSlice::SetEnd(const std::vector &end) { this->primitive->value.AsStridedSlice()->end = end; } -void StridedSlice::SetStride(const std::vector &stride) { - this->primitive->value.AsStridedSlice()->stride = stride; -} -void StridedSlice::SetIsScale(const std::vector &is_scale) { - this->primitive->value.AsStridedSlice()->isScale = is_scale; -} - -#else - -int StridedSlice::GetBeginMask() const { return this->primitive->value_as_StridedSlice()->beginMask(); } -int StridedSlice::GetEndMask() const { return this->primitive->value_as_StridedSlice()->endMask(); } -int StridedSlice::GetEllipsisMask() const { return this->primitive->value_as_StridedSlice()->ellipsisMask(); } -int StridedSlice::GetNewAxisMask() const { return this->primitive->value_as_StridedSlice()->newAxisMask(); } -int StridedSlice::GetShrinkAxisMask() const { return this->primitive->value_as_StridedSlice()->shrinkAxisMask(); } -std::vector StridedSlice::GetBegin() const { - auto fb_vector = this->primitive->value_as_StridedSlice()->begin(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -std::vector StridedSlice::GetEnd() const { - auto fb_vector = this->primitive->value_as_StridedSlice()->end(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -std::vector StridedSlice::GetStride() const { - auto fb_vector = this->primitive->value_as_StridedSlice()->stride(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -std::vector StridedSlice::GetIsScale() const { - auto fb_vector = this->primitive->value_as_StridedSlice()->isScale(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void StridedSlice::SetBeginMask(int begin_mask) {} -void StridedSlice::SetEndMask(int end_mask) {} -void StridedSlice::SetEllipsisMask(int ellipsis_mask) {} -void StridedSlice::SetNewAxisMask(int new_axis_mask) {} -void StridedSlice::SetShrinkAxisMask(int shrink_axis_mask) {} -void StridedSlice::SetBegin(const std::vector &begin) {} -void StridedSlice::SetEnd(const std::vector &end) {} -void StridedSlice::SetStride(const std::vector &stride) {} -void StridedSlice::SetIsScale(const std::vector &is_scale) {} -#endif -namespace { -constexpr int kStridedSliceOutputNum = 1; -constexpr int kStridedSliceInputNum = 1; -} // namespace - -void StridedSlice::ApplyNewAxisMask() { - for (int i = 0; i < new_axis_mask_.size(); i++) { - if (new_axis_mask_.at(i)) { - ndim_ += 1; - in_shape_.insert(in_shape_.begin() + i, 1); - begins_.at(i) = 0; - ends_.at(i) = 1; - strides_.at(i) = 1; - - begins_.emplace_back(0); - ends_.emplace_back(in_shape_.at(ndim_ - 1)); - strides_.emplace_back(1); - - begins_mask_.at(i) = false; - ends_mask_.at(i) = false; - ellipsis_mask_.at(i) = false; - shrink_axis_mask_.at(i) = false; - } - } -} - -std::vector StridedSlice::ApplyShrinkMask(std::vector out_shape) { - auto old_out_shape = out_shape; - out_shape.clear(); - for (int i = 0; i < shrink_axis_mask_.size(); i++) { - if (shrink_axis_mask_.at(i)) { - ends_.at(i) = begins_.at(i) + 1; - strides_.at(i) = 1; - } else { - out_shape.emplace_back(old_out_shape.at(i)); - } - } - for (int i = shrink_axis_mask_.size(); i < old_out_shape.size(); i++) { - out_shape.emplace_back(old_out_shape.at(i)); - } - return out_shape; -} - -/*only one bit will be used if multiple bits are true.*/ -void StridedSlice::ApplyEllipsisMask() { - for (int i = 0; i < ellipsis_mask_.size(); i++) { - if (ellipsis_mask_.at(i)) { - begins_.at(i) = 0; - ends_.at(i) = in_shape_.at(i); - break; - } - } -} - -void StridedSlice::ApplyBeginMask() { - for (int i = 0; i < ndim_; i++) { - if (begins_mask_.at(i)) { - begins_.at(i) = 0; - } - } -} - -void StridedSlice::ApplyEndMask() { - for (int i = 0; i < ndim_; i++) { - if (ends_mask_.at(i)) { - ends_.at(i) = in_shape_.at(i); - } - } -} - -int StridedSlice::InferShape(std::vector inputs, std::vector outputs) { - MS_ASSERT(this->primitive != nullptr); - if (outputs.size() != kStridedSliceOutputNum) { - MS_LOG(ERROR) << "Invalid output size:" << outputs.size(); - return 1; - } - if (inputs.size() != kStridedSliceInputNum) { - MS_LOG(ERROR) << "Invalid input size " << inputs.size(); - return 1; - } - auto input = inputs.at(0); - MS_ASSERT(input != nullptr); - auto input_shape = input->shape(); - std::vector output_shape; - ndim_ = static_cast(GetBegin().size()); - - MS_ASSERT(ndim_ == static_cast(strided_slice_prim->end()->size())); - MS_ASSERT(ndim_ == static_cast(strided_slice_prim->stride()->size())); - MS_ASSERT(ndim_ == static_cast(input_shape.size())); - - for (int i = 0; i < ndim_; i++) { - in_shape_.emplace_back(input_shape.at(i)); - begins_.emplace_back((GetBegin())[i]); - ends_.emplace_back((GetEnd())[i]); - strides_.emplace_back((GetStride())[i]); - } - - // set all mask to original input shape - begins_mask_.resize(ndim_); - ends_mask_.resize(ndim_); - ellipsis_mask_.resize(ndim_); - new_axis_mask_.resize(ndim_); - shrink_axis_mask_.resize(ndim_); - - // convert bit to vector - for (int i = 0; i < ndim_; i++) { - begins_mask_.at(i) = static_cast(GetBeginMask()) & (1 << i); - ends_mask_.at(i) = static_cast(GetEndMask()) & (1 << i); - ellipsis_mask_.at(i) = static_cast(GetEllipsisMask()) & (1 << i); - new_axis_mask_.at(i) = static_cast(GetNewAxisMask()) & (1 << i); - shrink_axis_mask_.at(i) = static_cast(GetShrinkAxisMask()) & (1 << i); - } - - ApplyNewAxisMask(); - ApplyBeginMask(); - ApplyEndMask(); - ApplyEllipsisMask(); - - output_shape.clear(); - output_shape.resize(in_shape_.size()); - for (int i = 0; i < in_shape_.size(); i++) { - if (i < ndim_ && new_axis_mask_.at(i)) { - output_shape.at(i) = 1; - } else { - output_shape.at(i) = (ends_.at(i) - begins_.at(i)) / strides_.at(i); - } - } - - output_shape = ApplyShrinkMask(output_shape); - - outputs.front()->set_shape(output_shape); - outputs.front()->set_data_type(input->data_type()); - outputs[0]->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/tile.cc b/mindspore/lite/c_ops/tile.cc deleted file mode 100644 index 5cb201497f..0000000000 --- a/mindspore/lite/c_ops/tile.cc +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/tile.h" -#include - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector Tile::GetMultiples() const { return this->primitive->value.AsTile()->multiples; } - -void Tile::SetMultiples(const std::vector &multiples) { this->primitive->value.AsTile()->multiples = multiples; } - -#else - -std::vector Tile::GetMultiples() const { - auto fb_vector = this->primitive->value_as_Tile()->multiples(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void Tile::SetMultiples(const std::vector &multiples) {} -#endif -int Tile::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - std::vector out_shape; - std::vector multiples; - std::copy(GetMultiples().begin(), GetMultiples().end(), std::back_inserter(multiples)); - for (size_t i = 0; i < input->shape().size(); ++i) { - int tmp = input->shape()[i] * multiples[i]; - out_shape.push_back(tmp); - } - - output->SetFormat(input->GetFormat()); - output->set_shape(out_shape); - output->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/topk.cc b/mindspore/lite/c_ops/topk.cc deleted file mode 100644 index 44fe02acac..0000000000 --- a/mindspore/lite/c_ops/topk.cc +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/topk.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int TopK::GetK() const { return this->primitive->value.AsTopK()->k; } -bool TopK::GetSorted() const { return this->primitive->value.AsTopK()->sorted; } - -void TopK::SetK(int k) { this->primitive->value.AsTopK()->k = k; } -void TopK::SetSorted(bool sorted) { this->primitive->value.AsTopK()->sorted = sorted; } - -#else - -int TopK::GetK() const { return this->primitive->value_as_TopK()->k(); } -bool TopK::GetSorted() const { return this->primitive->value_as_TopK()->sorted(); } - -void TopK::SetK(int k) {} -void TopK::SetSorted(bool sorted) {} -#endif -int TopK::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - if (inputs_.size() != kSingleNum || outputs_.size() != kDoubleNum) { - MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size(); - return 1; - } - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output0 = outputs_.front(); - MS_ASSERT(output0 != nullptr); - auto output1 = outputs_.at(1); - MS_ASSERT(output1 != nullptr); - - MS_ASSERT(topk_prim != nullptr); - - auto out_shape = input->shape(); - out_shape[out_shape.size() - 1] = GetK(); - - output0->set_shape(out_shape); - output0->set_data_type(input->data_type()); - output0->SetFormat(input->GetFormat()); - - output1->set_shape(out_shape); - output1->set_data_type(kNumberTypeInt32); - output1->SetFormat(input->GetFormat()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/transpose.cc b/mindspore/lite/c_ops/transpose.cc deleted file mode 100644 index a7d1a028bc..0000000000 --- a/mindspore/lite/c_ops/transpose.cc +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/transpose.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector Transpose::GetPerm() const { return this->primitive->value.AsTranspose()->perm; } -bool Transpose::GetConjugate() const { return this->primitive->value.AsTranspose()->conjugate; } - -void Transpose::SetPerm(const std::vector &perm) { this->primitive->value.AsTranspose()->perm = perm; } -void Transpose::SetConjugate(bool conjugate) { this->primitive->value.AsTranspose()->conjugate = conjugate; } - -#else - -std::vector Transpose::GetPerm() const { - auto fb_vector = this->primitive->value_as_Transpose()->perm(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} -bool Transpose::GetConjugate() const { return this->primitive->value_as_Transpose()->conjugate(); } - -void Transpose::SetPerm(const std::vector &perm) {} -void Transpose::SetConjugate(bool conjugate) {} -#endif -int Transpose::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - MS_ASSERT(inputs_.size() == kSingleNum); - MS_ASSERT(outputs_.size() == kSingleNum); - - int conjugate = GetConjugate(); - if (conjugate) { - MS_LOG(ERROR) << "Transpose conjugate is not support currently"; - return 1; - } - std::vector perm; - perm.insert(perm.begin(), GetPerm().begin(), GetPerm().end()); - - std::vector in_shape = input->shape(); - std::vector out_shape; - out_shape.resize(perm.size()); - for (int i = 0; i < perm.size(); ++i) { - out_shape[i] = in_shape[perm[i]]; - } - - output->set_shape(out_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/unique.cc b/mindspore/lite/c_ops/unique.cc deleted file mode 100644 index 58cc3e7135..0000000000 --- a/mindspore/lite/c_ops/unique.cc +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/unique.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int Unique::GetOutType() const { return this->primitive->value.AsUnique()->outType; } - -void Unique::SetOutType(int out_type) { this->primitive->value.AsUnique()->outType = out_type; } - -#else - -int Unique::GetOutType() const { return this->primitive->value_as_Unique()->outType(); } - -void Unique::SetOutType(int out_type) {} -#endif -int Unique::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - if (inputs_.size() != kSingleNum || outputs_.size() != kDoubleNum) { - MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size(); - return 1; - } - auto &input = inputs_.at(0); - MS_ASSERT(input != nullptr); - auto &output0 = outputs_.at(0); - MS_ASSERT(output0 != nullptr); - auto &output1 = outputs_.at(1); - MS_ASSERT(output1 != nullptr); - output0->set_shape(input->shape()); - output0->set_data_type(input->data_type()); - output1->set_shape(input->shape()); - output1->set_data_type(kNumberTypeInt32); - output1->SetFormat(input->GetFormat()); - output0->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/unsqueeze.cc b/mindspore/lite/c_ops/unsqueeze.cc deleted file mode 100644 index 27ad72490c..0000000000 --- a/mindspore/lite/c_ops/unsqueeze.cc +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/unsqueeze.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector Unsqueeze::GetAxis() const { return this->primitive->value.AsUnsqueeze()->axis; } - -void Unsqueeze::SetAxis(const std::vector &axis) { this->primitive->value.AsUnsqueeze()->axis = axis; } - -#else -bool predicate(int n) { return n != 1; } -std::vector Unsqueeze::GetAxis() const { - auto fb_vector = this->primitive->value_as_Unsqueeze()->axis(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void Unsqueeze::SetAxis(const std::vector &axis) {} -#endif -int Unsqueeze::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (inputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "input size is invalid"; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "output size is invalid"; - } - - auto dims = GetAxis().data(); - auto in_shape = input->shape(); - auto in_rank = in_shape.size(); - auto dim_rank = GetAxis().size(); - std::vector out_shape; - - if (dim_rank == 0) { - std::copy_if(in_shape.begin(), in_shape.end(), out_shape.begin(), [](int n) -> bool { return n != 1; }); - } else { - auto sz = in_rank + dim_rank; - int in_itr = 0; - int ax_itr = 0; - for (int i = 0; i < sz; i++) { - if (ax_itr < dim_rank && dims[ax_itr] == i) { - out_shape.emplace_back(1); - ax_itr++; - } else if (ax_itr < dim_rank && dims[ax_itr] + sz == i) { - out_shape.emplace_back(1); - ax_itr++; - } else { - if (in_shape[in_itr] > 1) { - out_shape.emplace_back(in_shape[in_itr]); - } - in_itr++; - } - } - } - - output->SetFormat(input->GetFormat()); - output->set_shape(out_shape); - output->set_data_type(input->data_type()); - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/unstack.cc b/mindspore/lite/c_ops/unstack.cc deleted file mode 100644 index 337586b173..0000000000 --- a/mindspore/lite/c_ops/unstack.cc +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/unstack.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -int Unstack::GetNum() const { return this->primitive->value.AsUnstack()->num; } -int Unstack::GetAxis() const { return this->primitive->value.AsUnstack()->axis; } - -void Unstack::SetNum(int num) { this->primitive->value.AsUnstack()->num = num; } -void Unstack::SetAxis(int axis) { this->primitive->value.AsUnstack()->axis = axis; } - -#else - -int Unstack::GetNum() const { return this->primitive->value_as_Unstack()->num(); } -int Unstack::GetAxis() const { return this->primitive->value_as_Unstack()->axis(); } - -void Unstack::SetNum(int num) {} -void Unstack::SetAxis(int axis) {} -#endif -int Unstack::InferShape(std::vector inputs, std::vector outputs) { - auto input = inputs.at(0); - MS_ASSERT(input != nullptr); - auto input_shape = input->shape(); - int axis = GetAxis() < 0 ? GetAxis() + input_shape.size() : GetAxis(); - if (axis < 0 || axis >= input_shape.size()) { - MS_LOG(ERROR) << "Invalid axis " << GetAxis(); - return 1; - } - - std::vector output_shape; - for (size_t i = 0; i < input_shape.size(); ++i) { - if (i != axis) { - output_shape.push_back(input_shape.at(i)); - } - } - for (auto &out : outputs) { - MS_ASSERT(out != nullptr); - out->set_shape(output_shape); - out->set_data_type(input->data_type()); - out->SetFormat(input->GetFormat()); - } - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/c_ops/where.cc b/mindspore/lite/c_ops/where.cc deleted file mode 100644 index 6b9de95b02..0000000000 --- a/mindspore/lite/c_ops/where.cc +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "c_ops/where.h" - -namespace mindspore { -#ifdef PRIMITIVE_WRITEABLE -std::vector Where::GetCondition() const { return this->primitive->value.AsWhere()->condition; } - -void Where::SetCondition(const std::vector &condition) { - this->primitive->value.AsWhere()->condition = condition; -} - -#else - -std::vector Where::GetCondition() const { - auto fb_vector = this->primitive->value_as_Where()->condition(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -void Where::SetCondition(const std::vector &condition) {} -#endif -int Where::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "where input or output number invalid, Input size:" << inputs_.size() - << ", output size: " << outputs_.size(); - return 1; - } - if (inputs_.size() < 3) { - MS_LOG(ERROR) << "Input shape tensors should b"; - return 1; - } - auto input0 = inputs_.at(0); - auto input1 = inputs_.at(1); - auto input2 = inputs_.at(2); - int num = input0->ElementsNum(); - int num1 = input1->ElementsNum(); - int num2 = input2->ElementsNum(); - int nummax = num > num1 ? num : (num1 > num2 ? num1 : num2); - - auto shape_tmp = inputs_.at(0)->shape(); - auto shape_tmp1 = inputs_.at(1)->shape(); - auto shape_tmp2 = inputs_.at(2)->shape(); - int axisout = 0; - int temp = 0; - for (int j = 0; j < shape_tmp.size(); j++) { - if (shape_tmp[j] == shape_tmp1[j] && shape_tmp[j] != shape_tmp2[j]) { - axisout = j; - break; - } - if (shape_tmp[j] == shape_tmp2[j] && shape_tmp[j] != shape_tmp1[j]) { - axisout = j; - break; - } - if (shape_tmp1[j] == shape_tmp2[j] && shape_tmp[j] != shape_tmp1[j]) { - axisout = j; - break; - } - temp += 1; - if (temp == shape_tmp.size()) { - outputs_[0]->set_shape(shape_tmp); - output->set_data_type(input->data_type()); - return 0; - } - } - - auto output_shape = shape_tmp; - output_shape[axisout] = nummax; - outputs_[0]->set_shape(output_shape); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return 0; -} -} // namespace mindspore diff --git a/mindspore/lite/include/model.h b/mindspore/lite/include/model.h index 67fdbb4ff0..daae904b80 100644 --- a/mindspore/lite/include/model.h +++ b/mindspore/lite/include/model.h @@ -21,6 +21,7 @@ #include #include #include "schema/model_generated.h" +#include "src/ops/primitive_c.h" namespace mindspore { #define MS_API __attribute__((visibility("default"))) @@ -34,7 +35,7 @@ class ModelImpl; /// \brief Primitive defined as prototype of operator. /// /// \note List public class and interface for reference. -class Primitive; +class PrimitiveC; /// \brief Model defined model in MindSpore Lite for managing graph. class MS_API Model { @@ -60,7 +61,7 @@ class MS_API Model { /// \param[in] name Define name of primitive to be returned. /// /// \return the pointer of MindSpore Lite Primitive. - lite::Primitive *GetOp(const std::string &name) const; + PrimitiveC *GetOp(const std::string &name) const; /// \brief Get graph defined in flatbuffers. /// @@ -97,7 +98,7 @@ class MS_API ModelBuilder { /// \param[in] inputs Define input edge of primitive to be added. /// /// \return ID of the added primitive. - virtual std::string AddOp(const lite::Primitive &op, const std::vector &inputs) = 0; + virtual std::string AddOp(const PrimitiveC &op, const std::vector &inputs) = 0; /// \brief Finish constructing the model. /// diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index a1d5c6152f..b3e7f96f70 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -1,64 +1,64 @@ set(LITE_SRC - ${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc - ${CMAKE_CURRENT_SOURCE_DIR}/common/ms_tensor_utils.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/allocator.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/runtime_api.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/thread_pool.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/workspace_pool.cc - ${CMAKE_CURRENT_SOURCE_DIR}/ir/tensor.cc - ${CMAKE_CURRENT_SOURCE_DIR}/context.cc - ${CMAKE_CURRENT_SOURCE_DIR}/executor.cc - ${CMAKE_CURRENT_SOURCE_DIR}/kernel_registry.cc - ${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel.cc - ${CMAKE_CURRENT_SOURCE_DIR}/populate_parameter.cc - ${CMAKE_CURRENT_SOURCE_DIR}/scheduler.cc - ) + ${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc + ${CMAKE_CURRENT_SOURCE_DIR}/common/ms_tensor_utils.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/allocator.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/runtime_api.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/thread_pool.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/workspace_pool.cc + ${CMAKE_CURRENT_SOURCE_DIR}/ir/tensor.cc + ${CMAKE_CURRENT_SOURCE_DIR}/context.cc + ${CMAKE_CURRENT_SOURCE_DIR}/executor.cc + ${CMAKE_CURRENT_SOURCE_DIR}/kernel_registry.cc + ${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel.cc + ${CMAKE_CURRENT_SOURCE_DIR}/populate_parameter.cc + ${CMAKE_CURRENT_SOURCE_DIR}/scheduler.cc + ) if (SUPPORT_GPU) list(APPEND LITE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/subgraph_opencl_kernel.cc) list(APPEND LITE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/utils.cc) -endif() +endif () set(LITE_SRC - ${LITE_SRC} - ${CMAKE_CURRENT_SOURCE_DIR}/lite_session.cc - ${CMAKE_CURRENT_SOURCE_DIR}/model.cc - ) + ${LITE_SRC} + ${CMAKE_CURRENT_SOURCE_DIR}/lite_session.cc + ${CMAKE_CURRENT_SOURCE_DIR}/model.cc + ) if (SUPPORT_GPU) - set(LITE_SRC - ${LITE_SRC} - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_executor.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_allocator.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_runtime.cc - ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_wrapper.cc - ) + set(LITE_SRC + ${LITE_SRC} + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_executor.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_allocator.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_runtime.cc + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_wrapper.cc + ) endif () set(ANF_SRC - ${ANF_SRC} - ${CMAKE_CURRENT_SOURCE_DIR}/ir/meta_tensor_extends.cc - ) + ${ANF_SRC} + ${CMAKE_CURRENT_SOURCE_DIR}/ir/meta_tensor_extends.cc + ) add_library(mindspore-lite SHARED ${LITE_SRC} ${ANF_SRC}) target_link_libraries(mindspore-lite - cpu_kernel_mid_ - ops_mid_ - ) + cpu_kernel_mid_ + c_ops_mid + ) add_subdirectory(runtime/kernel/arm) if (PLATFORM_ARM32 OR PLATFORM_ARM64) - target_link_libraries(mindspore-lite log) -endif() + target_link_libraries(mindspore-lite log) +endif () if (BUILD_MINDDATA) - target_link_libraries(mindspore-lite minddata-eager minddata-lite) + target_link_libraries(mindspore-lite minddata-eager minddata-lite) endif () add_subdirectory(ops) -if("${CMAKE_BUILD_TYPE}" STREQUAL "Release" AND (PLATFORM_ARM64 OR PLATFORM_ARM32)) -add_custom_command(TARGET mindspore-lite POST_BUILD - COMMAND ${ANDROID_NDK}/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/aarch64-linux-android/bin/strip - ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so) -endif() +if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release" AND (PLATFORM_ARM64 OR PLATFORM_ARM32)) + add_custom_command(TARGET mindspore-lite POST_BUILD + COMMAND ${ANDROID_NDK}/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/aarch64-linux-android/bin/strip + ${TOP_DIR}/mindspore/lite/build/src/libmindspore-lite.so) +endif () diff --git a/mindspore/lite/src/executor.cc b/mindspore/lite/src/executor.cc index bcd20cf07c..5314c368a4 100644 --- a/mindspore/lite/src/executor.cc +++ b/mindspore/lite/src/executor.cc @@ -49,7 +49,13 @@ int Executor::Run(std::vector &in_tensors, std::vectorname(); return ret; } - + MS_LOG(INFO) << "out_tensors"; + auto tensors = kernel->out_tensors(); + MS_LOG(INFO) << kernel->name(); + for (int i = 0; i < tensors.size(); ++i) { + auto tensor = tensors[i]; + MS_LOG(INFO) << tensor->ToString(); + } if (after != nullptr) { if (!after(PackToMSTensors(kernel->in_tensors()), PackToMSTensors(kernel->out_tensors()), {kernel->name(), kernel->type_str()})) { diff --git a/mindspore/lite/src/ir/primitive_value.cc b/mindspore/lite/src/ir/primitive_value.cc deleted file mode 100644 index ebd5d4d615..0000000000 --- a/mindspore/lite/src/ir/primitive_value.cc +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ir/primitive_value.h" - - diff --git a/mindspore/lite/src/ir/primitive_value.h b/mindspore/lite/src/ir/primitive_value.h deleted file mode 100644 index 66202d15e6..0000000000 --- a/mindspore/lite/src/ir/primitive_value.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_ANF_IMPORTER_PRIMITIVE_H_ -#define MINDSPORE_LITE_SRC_ANF_IMPORTER_PRIMITIVE_H_ - -#include "ir/value.h" -#include "src/ops/ops.h" - -namespace mindspore::lite { -class PrimitiveValue : public Value { - public: - explicit PrimitiveValue(const lite::Primitive *prim) : primitive(prim) {} - - const lite::Primitive *GetPrimitive() const { - return this->primitive; - } - MS_DECLARE_PARENT(PrimitiveValue, Value) - bool operator==(const Value &rhs) const override { - if (rhs.isa()) { - auto other_prim = static_cast(rhs); - return *this == other_prim; - } else { - return false; - } - } - - protected: - const lite::Primitive *primitive = nullptr; -}; -} // namespace mindspore::lite - -#endif // MINDSPORE_LITE_SRC_ANF_IMPORTER_PRIMITIVE_H_ - diff --git a/mindspore/lite/src/kernel_registry.cc b/mindspore/lite/src/kernel_registry.cc index f4fb136231..fa2ec89c0b 100644 --- a/mindspore/lite/src/kernel_registry.cc +++ b/mindspore/lite/src/kernel_registry.cc @@ -124,13 +124,13 @@ const kernel::KernelCreator *KernelRegistry::GetCreatorArrays() { return creator kernel::LiteKernel *KernelRegistry::GetKernel(const std::vector &in_tensors, const std::vector &out_tensors, - const lite::Primitive *primitive, const Context *ctx, + const PrimitiveC *primitive, const Context *ctx, const kernel::KernelKey &key) { MS_EXCEPTION_IF_NULL(primitive); MS_EXCEPTION_IF_NULL(ctx); auto parameter = kernel::PopulateParameter(primitive); if (parameter == nullptr) { - MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " << schema::EnumNamePrimitiveType(primitive->Type()); + MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " << primitive->Type(); return nullptr; } auto creator = GetCreator(key); diff --git a/mindspore/lite/src/kernel_registry.h b/mindspore/lite/src/kernel_registry.h index 2c1d608378..507138c65c 100644 --- a/mindspore/lite/src/kernel_registry.h +++ b/mindspore/lite/src/kernel_registry.h @@ -40,7 +40,7 @@ class KernelRegistry { kernel::KernelCreator creator); bool Merge(const std::unordered_map &newCreators); kernel::LiteKernel *GetKernel(const std::vector &in_tensors, - const std::vector &out_tensors, const lite::Primitive *primitive, + const std::vector &out_tensors, const PrimitiveC *primitive, const Context *ctx, const kernel::KernelKey &key); protected: diff --git a/mindspore/lite/src/lite_kernel.h b/mindspore/lite/src/lite_kernel.h index 784d423409..6fc6608177 100644 --- a/mindspore/lite/src/lite_kernel.h +++ b/mindspore/lite/src/lite_kernel.h @@ -24,8 +24,8 @@ #include "src/runtime/kernel/arm/nnacl/op_base.h" #include "include/context.h" #include "src/ir/tensor.h" -#include "src/ops/ops.h" #include "include/errorcode.h" +#include "src/ops/primitive_c.h" #ifdef ENABLE_FP16 using FLOAT_t = float16_t; @@ -59,7 +59,7 @@ class LiteKernel { LiteKernel() = default; LiteKernel(OpParameter *parameter, const std::vector &in_tensors, const std::vector &out_tensors, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : op_parameter_(parameter), in_tensors_(in_tensors), out_tensors_(out_tensors), @@ -81,7 +81,7 @@ class LiteKernel { virtual int Prepare() { if (!InferShapeDone()) { - (const_cast(primitive_))->InferShape(in_tensors_, out_tensors_); + (const_cast(primitive_))->InferShape(in_tensors_, out_tensors_); if (need_reinit_) { Init(); } @@ -154,7 +154,7 @@ class LiteKernel { void set_need_reinit() { need_reinit_ = true; } - const lite::Primitive *GetPrimitive() const { return primitive_; } + const mindspore::lite::PrimitiveC *GetPrimitive() const { return primitive_; } protected: bool InferShapeDone() { return !(primitive_ != nullptr && !primitive_->GetInferFlag()) && true; } @@ -162,7 +162,7 @@ class LiteKernel { KernelKey desc_; std::string name_; OpParameter *op_parameter_ = nullptr; - const lite::Primitive *primitive_ = nullptr; + const mindspore::lite::PrimitiveC *primitive_ = nullptr; const lite::Context *context_ = nullptr; // tensor will free in ~lite_session() std::vector in_tensors_; @@ -181,7 +181,7 @@ class SubGraphKernel : public LiteKernel { const std::vector &in_kernels, const std::vector &out_kernels, const std::vector &nodes, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(nullptr, inputs, outputs, ctx, primitive), nodes_(nodes) { in_kernels_ = in_kernels; out_kernels_ = out_kernels; @@ -198,7 +198,8 @@ class SubGraphKernel : public LiteKernel { typedef LiteKernel *(*KernelCreator)(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, - const lite::Context *ctx, const KernelKey &desc, const lite::Primitive *primitive); + const lite::Context *ctx, const KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive); class LiteKernelUtil { public: diff --git a/mindspore/lite/src/model.cc b/mindspore/lite/src/model.cc index 0fce835c43..a480e2408e 100644 --- a/mindspore/lite/src/model.cc +++ b/mindspore/lite/src/model.cc @@ -14,9 +14,101 @@ * limitations under the License. */ +#include "src/ops/unique.h" +#include "src/ops/space_to_batch.h" +#include "src/ops/conv2d.h" +#include "src/ops/roi_pooling.h" +#include "src/ops/topk.h" +#include "src/ops/broadcast_to.h" +#include "src/ops/unsqueeze.h" +#include "src/ops/unstack.h" +#include "src/ops/depth_to_space.h" +#include "src/ops/batch_to_space.h" +#include "src/ops/prior_box.h" +#include "src/ops/lstm.h" +#include "src/ops/softmax.h" +#include "src/ops/activation.h" +#include "src/ops/deconv2d.h" +#include "src/ops/reduce.h" +#include "src/ops/pooling.h" +#include "src/ops/fused_batchnorm.h" +#include "src/ops/batch_norm.h" +#include "src/ops/power.h" +#include "src/ops/range.h" +#include "src/ops/add.h" +#include "src/ops/sub.h" +#include "src/ops/div.h" +#include "src/ops/bias_add.h" +#include "src/ops/expand_dims.h" +#include "src/ops/full_connection.h" +#include "src/ops/shape.h" +#include "src/ops/elu.h" +#include "src/ops/embedding_lookup.h" +#include "src/ops/quant_dtype_cast.h" +#include "src/ops/matmul.h" +#include "src/ops/resize.h" +#include "src/ops/tile.h" +#include "src/ops/one_hot.h" +#include "src/ops/space_to_depth.h" +#include "src/ops/split.h" +#include "src/ops/argmax.h" +#include "src/ops/argmin.h" +#include "src/ops/cast.h" +#include "src/ops/reshape.h" +#include "src/ops/scale.h" +#include "src/ops/concat.h" +#include "src/ops/nchw2nhwc.h" +#include "src/ops/slice.h" +#include "src/ops/squeeze.h" +#include "src/ops/flatten.h" +#include "src/ops/mean.h" +#include "src/ops/nhwc2nchw.h" +#include "src/ops/stack.h" +#include "src/ops/crop.h" +#include "src/ops/addn.h" +#include "src/ops/gather.h" +#include "src/ops/gather_nd.h" +#include "src/ops/local_response_normalization.h" +#include "src/ops/pad.h" +#include "src/ops/prelu.h" +#include "src/ops/caffe_p_relu.h" +#include "src/ops/reverse_sequence.h" +#include "src/ops/dedepthwise_conv2d.h" +#include "src/ops/depthwise_conv2d.h" +#include "src/ops/mul.h" +#include "src/ops/eltwise.h" +#include "src/ops/fill.h" +#include "src/ops/transpose.h" +#include "src/ops/log.h" +#include "src/ops/abs.h" +#include "src/ops/sin.h" +#include "src/ops/cos.h" +#include "src/ops/sqrt.h" +#include "src/ops/square.h" +#include "src/ops/exp.h" +#include "src/ops/rsqrt.h" +#include "src/ops/maximum.h" +#include "src/ops/minimum.h" +#include "src/ops/strided_slice.h" +#include "src/ops/reverse.h" +#include "src/ops/logical_and.h" +#include "src/ops/logical_or.h" +#include "src/ops/logical_not.h" +#include "src/ops/floor_div.h" +#include "src/ops/floor_mod.h" +#include "src/ops/equal.h" +#include "src/ops/not_equal.h" +#include "src/ops/less.h" +#include "src/ops/less_equal.h" +#include "src/ops/greater_equal.h" +#include "src/ops/greater.h" +#include "src/ops/floor.h" +#include "src/ops/squared_difference.h" +#include "src/ops/ceil.h" +#include "src/ops/round.h" +#include "src/ops/primitive_c.h" #include "include/model.h" #include "utils/log_adapter.h" -#include "src/ops/ops.h" namespace mindspore::lite { @@ -28,19 +120,19 @@ class ModelImpl { meta_graph_ = schema::GetMetaGraph(model_buf); } virtual ~ModelImpl(); - lite::Primitive *GetOp(const std::string &name) const; + PrimitiveC *GetOp(const std::string &name) const; const schema::MetaGraph *meta_graph() const; void FreeMetaGraph(); int BuildOps(); protected: - lite::Primitive *CopyPrimitive(const schema::Primitive *src_prim); + PrimitiveC *CopyPrimitive(const schema::Primitive *src_prim); protected: const char *model_buf_; size_t buf_size_; const schema::MetaGraph *meta_graph_ = nullptr; - std::map ops_; + std::map ops_; }; ModelImpl *ModelImpl::Import(const char *model_buf, size_t size) { @@ -72,7 +164,7 @@ ModelImpl *ModelImpl::Import(const char *model_buf, size_t size) { return model; } -lite::Primitive *ModelImpl::GetOp(const std::string &name) const { +PrimitiveC *ModelImpl::GetOp(const std::string &name) const { auto iter = ops_.find(name); if (iter == ops_.end()) { return nullptr; @@ -96,178 +188,178 @@ void ModelImpl::FreeMetaGraph() { const schema::MetaGraph *ModelImpl::meta_graph() const { return this->meta_graph_; } -lite::Primitive *ModelImpl::CopyPrimitive(const schema::Primitive *src_prim) { +PrimitiveC *ModelImpl::CopyPrimitive(const schema::Primitive *src_prim) { MS_EXCEPTION_IF_NULL(src_prim); auto op_type = src_prim->value_type(); switch (op_type) { case schema::PrimitiveType_SoftMax: - return new lite::SoftMax(const_cast(src_prim)); + return new SoftMax(const_cast(src_prim)); case schema::PrimitiveType_Activation: - return new lite::Activation(const_cast(src_prim)); + return new Activation(const_cast(src_prim)); case schema::PrimitiveType_Conv2D: - return new lite::Conv2D(const_cast(src_prim)); + return new Conv2D(const_cast(src_prim)); case schema::PrimitiveType_DeConv2D: - return new lite::DeConv2D(const_cast(src_prim)); + return new DeConv2D(const_cast(src_prim)); case schema::PrimitiveType_Reduce: - return new lite::Reduce(const_cast(src_prim)); + return new Reduce(const_cast(src_prim)); case schema::PrimitiveType_Pooling: - return new lite::Pooling(const_cast(src_prim)); + return new Pooling(const_cast(src_prim)); case schema::PrimitiveType_DepthwiseConv2D: - return new lite::DepthwiseConv2D(const_cast(src_prim)); + return new DepthwiseConv2D(const_cast(src_prim)); case schema::PrimitiveType_FusedBatchNorm: - return new lite::FusedBatchNorm(const_cast(src_prim)); + return new FusedBatchNorm(const_cast(src_prim)); case schema::PrimitiveType_BatchNorm: - return new lite::BatchNorm(const_cast(src_prim)); + return new BatchNorm(const_cast(src_prim)); case schema::PrimitiveType_FullConnection: - return new lite::FullConnection(const_cast(src_prim)); + return new FullConnection(const_cast(src_prim)); case schema::PrimitiveType_Power: - return new lite::Power(const_cast(src_prim)); + return new Power(const_cast(src_prim)); case schema::PrimitiveType_Range: - return new lite::Range(const_cast(src_prim)); + return new Range(const_cast(src_prim)); case schema::PrimitiveType_Mul: - return new lite::Mul(const_cast(src_prim)); + return new Mul(const_cast(src_prim)); case schema::PrimitiveType_Add: - return new lite::Add(const_cast(src_prim)); + return new Add(const_cast(src_prim)); case schema::PrimitiveType_Sub: - return new lite::Sub(const_cast(src_prim)); + return new Sub(const_cast(src_prim)); case schema::PrimitiveType_Div: - return new lite::Div(const_cast(src_prim)); + return new Div(const_cast(src_prim)); case schema::PrimitiveType_BiasAdd: - return new lite::BiasAdd(const_cast(src_prim)); + return new BiasAdd(const_cast(src_prim)); case schema::PrimitiveType_ExpandDims: - return new lite::ExpandDims(const_cast(src_prim)); + return new ExpandDims(const_cast(src_prim)); case schema::PrimitiveType_ArgMax: - return new lite::ArgMax(const_cast(src_prim)); + return new ArgMax(const_cast(src_prim)); case schema::PrimitiveType_ArgMin: - return new lite::ArgMin(const_cast(src_prim)); + return new ArgMin(const_cast(src_prim)); case schema::PrimitiveType_Cast: - return new lite::Cast(const_cast(src_prim)); + return new Cast(const_cast(src_prim)); case schema::PrimitiveType_Reshape: - return new lite::Reshape(const_cast(src_prim)); + return new Reshape(const_cast(src_prim)); case schema::PrimitiveType_Scale: - return new lite::Scale(const_cast(src_prim)); + return new Scale(const_cast(src_prim)); case schema::PrimitiveType_Eltwise: - return new lite::Eltwise(const_cast(src_prim)); + return new Eltwise(const_cast(src_prim)); case schema::PrimitiveType_Concat: - return new lite::Concat(const_cast(src_prim)); + return new Concat(const_cast(src_prim)); case schema::PrimitiveType_Fill: - return new lite::Fill(const_cast(src_prim)); + return new Fill(const_cast(src_prim)); case schema::PrimitiveType_Transpose: - return new lite::Transpose(const_cast(src_prim)); + return new Transpose(const_cast(src_prim)); case schema::PrimitiveType_Slice: - return new lite::Slice(const_cast(src_prim)); + return new SliceOp(const_cast(src_prim)); case schema::PrimitiveType_Squeeze: - return new lite::Squeeze(const_cast(src_prim)); + return new Squeeze(const_cast(src_prim)); case schema::PrimitiveType_Nchw2Nhwc: - return new lite::Nchw2Nhwc(const_cast(src_prim)); + return new Nchw2Nhwc(const_cast(src_prim)); case schema::PrimitiveType_Nhwc2Nchw: - return new lite::Nhwc2Nchw(const_cast(src_prim)); + return new Nhwc2Nchw(const_cast(src_prim)); case schema::PrimitiveType_Flatten: - return new lite::Flatten(const_cast(src_prim)); + return new Flatten(const_cast(src_prim)); case schema::PrimitiveType_Mean: - return new lite::Mean(const_cast(src_prim)); + return new Mean(const_cast(src_prim)); case schema::PrimitiveType_Stack: - return new lite::Stack(const_cast(src_prim)); + return new Stack(const_cast(src_prim)); case schema::PrimitiveType_Crop: - return new lite::Crop(const_cast(src_prim)); + return new Crop(const_cast(src_prim)); case schema::PrimitiveType_SquaredDifference: - return new lite::SquaredDifference(const_cast(src_prim)); + return new SquaredDifference(const_cast(src_prim)); case schema::PrimitiveType_AddN: - return new lite::AddN(const_cast(src_prim)); + return new AddN(const_cast(src_prim)); case schema::PrimitiveType_Abs: - return new lite::Abs(const_cast(src_prim)); + return new Abs(const_cast(src_prim)); case schema::PrimitiveType_Sin: - return new lite::Sin(const_cast(src_prim)); + return new Sin(const_cast(src_prim)); case schema::PrimitiveType_Cos: - return new lite::Cos(const_cast(src_prim)); + return new Cos(const_cast(src_prim)); case schema::PrimitiveType_Log: - return new lite::Log(const_cast(src_prim)); + return new Log(const_cast(src_prim)); case schema::PrimitiveType_Sqrt: - return new lite::Sqrt(const_cast(src_prim)); + return new Sqrt(const_cast(src_prim)); case schema::PrimitiveType_Rsqrt: - return new lite::Rsqrt(const_cast(src_prim)); + return new Rsqrt(const_cast(src_prim)); case schema::PrimitiveType_Square: - return new lite::Square(const_cast(src_prim)); + return new Square(const_cast(src_prim)); case schema::PrimitiveType_Exp: - return new lite::Exp(const_cast(src_prim)); + return new Exp(const_cast(src_prim)); case schema::PrimitiveType_Gather: - return new lite::Gather(const_cast(src_prim)); + return new Gather(const_cast(src_prim)); case schema::PrimitiveType_GatherNd: - return new lite::GatherNd(const_cast(src_prim)); + return new GatherNd(const_cast(src_prim)); case schema::PrimitiveType_LocalResponseNormalization: - return new lite::LocalResponseNormalization(const_cast(src_prim)); + return new LocalResponseNormalization(const_cast(src_prim)); case schema::PrimitiveType_Maximum: - return new lite::Maximum(const_cast(src_prim)); + return new Maximum(const_cast(src_prim)); case schema::PrimitiveType_Minimum: - return new lite::Minimum(const_cast(src_prim)); + return new Minimum(const_cast(src_prim)); case schema::PrimitiveType_Pad: - return new lite::Pad(const_cast(src_prim)); + return new Pad(const_cast(src_prim)); case schema::PrimitiveType_StridedSlice: - return new lite::StridedSlice(const_cast(src_prim)); + return new StridedSlice(const_cast(src_prim)); case schema::PrimitiveType_Prelu: - return new lite::Prelu(const_cast(src_prim)); + return new Prelu(const_cast(src_prim)); case schema::PrimitiveType_CaffePReLU: - return new lite::CaffePReLU(const_cast(src_prim)); + return new CaffePReLU(const_cast(src_prim)); case schema::PrimitiveType_Round: - return new lite::Round(const_cast(src_prim)); + return new Round(const_cast(src_prim)); case schema::PrimitiveType_Reverse: - return new lite::Reverse(const_cast(src_prim)); + return new Reverse(const_cast(src_prim)); case schema::PrimitiveType_ReverseSequence: - return new lite::ReverseSequence(const_cast(src_prim)); + return new ReverseSequence(const_cast(src_prim)); case schema::PrimitiveType_LogicalAnd: - return new lite::LogicalAnd(const_cast(src_prim)); + return new LogicalAnd(const_cast(src_prim)); case schema::PrimitiveType_LogicalOr: - return new lite::LogicalOr(const_cast(src_prim)); + return new LogicalOr(const_cast(src_prim)); case schema::PrimitiveType_LogicalNot: - return new lite::LogicalNot(const_cast(src_prim)); + return new LogicalNot(const_cast(src_prim)); case schema::PrimitiveType_FloorDiv: - return new lite::FloorDiv(const_cast(src_prim)); + return new FloorDiv(const_cast(src_prim)); case schema::PrimitiveType_FloorMod: - return new lite::FloorMod(const_cast(src_prim)); + return new FloorMod(const_cast(src_prim)); case schema::PrimitiveType_Equal: - return new lite::Equal(const_cast(src_prim)); + return new Equal(const_cast(src_prim)); case schema::PrimitiveType_NotEqual: - return new lite::NotEqual(const_cast(src_prim)); + return new NotEqual(const_cast(src_prim)); case schema::PrimitiveType_Less: - return new lite::Less(const_cast(src_prim)); + return new Less(const_cast(src_prim)); case schema::PrimitiveType_LessEqual: - return new lite::LessEqual(const_cast(src_prim)); + return new LessEqual(const_cast(src_prim)); case schema::PrimitiveType_Greater: - return new lite::Greater(const_cast(src_prim)); + return new Greater(const_cast(src_prim)); case schema::PrimitiveType_GreaterEqual: - return new lite::GreaterEqual(const_cast(src_prim)); + return new GreaterEqual(const_cast(src_prim)); case schema::PrimitiveType_Floor: - return new lite::Floor(const_cast(src_prim)); + return new Floor(const_cast(src_prim)); case schema::PrimitiveType_Ceil: - return new lite::Ceil(const_cast(src_prim)); + return new Ceil(const_cast(src_prim)); case schema::PrimitiveType_Split: - return new lite::Split(const_cast(src_prim)); + return new Split(const_cast(src_prim)); case schema::PrimitiveType_OneHot: - return new lite::OneHot(const_cast(src_prim)); + return new OneHot(const_cast(src_prim)); case schema::PrimitiveType_SpaceToDepth: - return new lite::SpaceToDepth(const_cast(src_prim)); + return new SpaceToDepth(const_cast(src_prim)); case schema::PrimitiveType_Tile: - return new lite::Tile(const_cast(src_prim)); + return new Tile(const_cast(src_prim)); case schema::PrimitiveType_Resize: - return new lite::Resize(const_cast(src_prim)); + return new Resize(const_cast(src_prim)); case schema::PrimitiveType_Unstack: - return new lite::Unstack(const_cast(src_prim)); + return new Unstack(const_cast(src_prim)); case schema::PrimitiveType_Unique: - return new lite::Unique(const_cast(src_prim)); + return new Unique(const_cast(src_prim)); case schema::PrimitiveType_TopK: - return new lite::TopK(const_cast(src_prim)); + return new TopK(const_cast(src_prim)); case schema::PrimitiveType_MatMul: - return new lite::MatMul(const_cast(src_prim)); + return new MatMul(const_cast(src_prim)); case schema::PrimitiveType_QuantDTypeCast: - return new lite::QuantDTypeCast(const_cast(src_prim)); + return new QuantDTypeCast(const_cast(src_prim)); case schema::PrimitiveType_EmbeddingLookup: - return new lite::EmbeddingLookup(const_cast(src_prim)); + return new EmbeddingLookup(const_cast(src_prim)); case schema::PrimitiveType_Elu: - return new lite::Elu(const_cast(src_prim)); + return new Elu(const_cast(src_prim)); case schema::PrimitiveType_DeDepthwiseConv2D: - return new lite::DeconvDepthwiseConv2D(const_cast(src_prim)); + return new DeDepthwiseConv2D(const_cast(src_prim)); case schema::PrimitiveType_Shape: - return new lite::Shape(const_cast(src_prim)); + return new Shape(const_cast(src_prim)); default: break; } @@ -334,9 +426,9 @@ Model *Model::Import(const char *model_buf, size_t size) { Model::~Model() { delete (this->model_impl_); } -lite::Primitive *Model::GetOp(const std::string &name) const { +mindspore::lite::PrimitiveC *Model::GetOp(const std::string &name) const { MS_EXCEPTION_IF_NULL(model_impl_); - return const_cast(model_impl_->GetOp(name)); + return const_cast(model_impl_->GetOp(name)); } void Model::FreeMetaGraph() { diff --git a/mindspore/lite/src/ops/CMakeLists.txt b/mindspore/lite/src/ops/CMakeLists.txt index c468336fca..06ad3db3f3 100644 --- a/mindspore/lite/src/ops/CMakeLists.txt +++ b/mindspore/lite/src/ops/CMakeLists.txt @@ -1,3 +1,3 @@ -file(GLOB_RECURSE OPS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cc) +file(GLOB_RECURSE C_OPS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cc) -add_library(ops_mid_ OBJECT ${OPS_SRC}) \ No newline at end of file +add_library(c_ops_mid OBJECT ${C_OPS_SRC}) \ No newline at end of file diff --git a/mindspore/lite/c_ops/abs.h b/mindspore/lite/src/ops/abs.h similarity index 72% rename from mindspore/lite/c_ops/abs.h rename to mindspore/lite/src/ops/abs.h index 17ad481607..82deee4452 100644 --- a/mindspore/lite/c_ops/abs.h +++ b/mindspore/lite/src/ops/abs.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic_self.h" +#include "src/ops/arithmetic_self.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_ABS_H_ namespace mindspore { +namespace lite { class Abs : public ArithmeticSelf { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Abs(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - explicit Abs(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} -#endif + explicit Abs(OriginPrimitive *primitive) : ArithmeticSelf(primitive) {} }; +} // namespace lite } // namespace mindspore - #endif // LITE_MINDSPORE_LITE_C_OPS_ABS_H_ diff --git a/mindspore/lite/c_ops/activation.cc b/mindspore/lite/src/ops/activation.cc similarity index 95% rename from mindspore/lite/c_ops/activation.cc rename to mindspore/lite/src/ops/activation.cc index 1a0bcaead1..f983b3d772 100644 --- a/mindspore/lite/c_ops/activation.cc +++ b/mindspore/lite/src/ops/activation.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/activation.h" +#include "src/ops/activation.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int Activation::GetType() const { return this->primitive->value.AsActivation()->type; } float Activation::GetAlpha() const { return this->primitive->value.AsActivation()->alpha; } @@ -32,4 +33,5 @@ float Activation::GetAlpha() const { return this->primitive->value_as_Activation void Activation::SetType(int type) {} void Activation::SetAlpha(float alpha) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/activation.h b/mindspore/lite/src/ops/activation.h similarity index 82% rename from mindspore/lite/c_ops/activation.h rename to mindspore/lite/src/ops/activation.h index 4c2bf33ae8..a05fa5700a 100644 --- a/mindspore/lite/c_ops/activation.h +++ b/mindspore/lite/src/ops/activation.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,18 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_H_ namespace mindspore { +namespace lite { class Activation : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Activation(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Activation(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Activation(OriginPrimitive *primitive) : PrimitiveC(primitive) {} int GetType() const; float GetAlpha() const; void SetType(int type); void SetAlpha(float alpha); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_H_ diff --git a/mindspore/lite/c_ops/activation_grad.cc b/mindspore/lite/src/ops/activation_grad.cc similarity index 93% rename from mindspore/lite/c_ops/activation_grad.cc rename to mindspore/lite/src/ops/activation_grad.cc index 5632cad14d..a27d41ccbc 100644 --- a/mindspore/lite/c_ops/activation_grad.cc +++ b/mindspore/lite/src/ops/activation_grad.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/activation_grad.h" +#include "src/ops/activation_grad.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int ActivationGrad::GetType() const { return this->primitive->value.AsActivationGrad()->type; } @@ -30,4 +31,5 @@ int ActivationGrad::GetType() const { return this->primitive->value_as_Activatio void ActivationGrad::SetType(int type) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/activation_grad.h b/mindspore/lite/src/ops/activation_grad.h similarity index 81% rename from mindspore/lite/c_ops/activation_grad.h rename to mindspore/lite/src/ops/activation_grad.h index be51505afa..f0fd0a329a 100644 --- a/mindspore/lite/c_ops/activation_grad.h +++ b/mindspore/lite/src/ops/activation_grad.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,16 +29,14 @@ #define LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_GRAD_H_ namespace mindspore { +namespace lite { class ActivationGrad : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit ActivationGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit ActivationGrad(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit ActivationGrad(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int GetType() const; void SetType(int type); }; +} // namespace lite } // namespace mindspore - #endif // LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_GRAD_H_ diff --git a/mindspore/lite/c_ops/add.cc b/mindspore/lite/src/ops/add.cc similarity index 94% rename from mindspore/lite/c_ops/add.cc rename to mindspore/lite/src/ops/add.cc index e69010a4a5..5a51f15760 100644 --- a/mindspore/lite/c_ops/add.cc +++ b/mindspore/lite/src/ops/add.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/add.h" +#include "src/ops/add.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int Add::GetActivationType() const { return this->primitive->value.AsAdd()->activationType; } @@ -30,4 +31,5 @@ int Add::GetActivationType() const { return this->primitive->value_as_Add()->act void Add::SetActivationType(int activation_type) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/add.h b/mindspore/lite/src/ops/add.h similarity index 83% rename from mindspore/lite/c_ops/add.h rename to mindspore/lite/src/ops/add.h index 70be03b5c3..444ef32652 100644 --- a/mindspore/lite/c_ops/add.h +++ b/mindspore/lite/src/ops/add.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -28,16 +28,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_ADD_H_ namespace mindspore { +namespace lite { class Add : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Add(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit Add(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit Add(OriginPrimitive *primitive) : Arithmetic(primitive) {} + int GetActivationType() const; void SetActivationType(int activation_type); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_ADD_H_ diff --git a/mindspore/lite/src/ops/addn.cc b/mindspore/lite/src/ops/addn.cc index a6fab39cfc..c4ad7cdbfb 100644 --- a/mindspore/lite/src/ops/addn.cc +++ b/mindspore/lite/src/ops/addn.cc @@ -14,12 +14,22 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/addn.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int AddN::GetN() const { return this->primitive->value.AsAddN()->N; } + +void AddN::SetN(int n) { this->primitive->value.AsAddN()->N = n; } + +#else + +int AddN::GetN() const { return this->primitive->value_as_AddN()->N(); } + +void AddN::SetN(int n) {} +#endif -namespace mindspore::lite { namespace { constexpr int kLeastInputNum = 2; } @@ -48,5 +58,5 @@ int AddN::InferShape(std::vector inputs, std::vectorset_data_type(input->data_type()); return RET_OK; } -} // namespace mindspore::lite - +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/addn.h b/mindspore/lite/src/ops/addn.h similarity index 83% rename from mindspore/lite/c_ops/addn.h rename to mindspore/lite/src/ops/addn.h index 19e845c44f..8de647e12d 100644 --- a/mindspore/lite/c_ops/addn.h +++ b/mindspore/lite/src/ops/addn.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_ADD_N_H_ namespace mindspore { +namespace lite { class AddN : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit AddN(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit AddN(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit AddN(OriginPrimitive *primitive) : PrimitiveC(primitive) {} int InferShape(std::vector inputs_, std::vector outputs_) override; int GetN() const; void SetN(int n); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_ADD_N_H_ diff --git a/mindspore/lite/src/ops/argmax.cc b/mindspore/lite/src/ops/argmax.cc index b1179400f7..208511c8fc 100644 --- a/mindspore/lite/src/ops/argmax.cc +++ b/mindspore/lite/src/ops/argmax.cc @@ -14,12 +14,38 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/argmax.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int ArgMax::GetAxis() const { return this->primitive->value.AsArgMax()->axis; } +bool ArgMax::GetOutMaxValue() const { return this->primitive->value.AsArgMax()->outMaxValue; } +int ArgMax::GetTopK() const { return this->primitive->value.AsArgMax()->topK; } +bool ArgMax::GetKeepDims() const { return this->primitive->value.AsArgMax()->keepDims; } +int ArgMax::GetAxisType() const { return this->primitive->value.AsArgMax()->axisType; } + +void ArgMax::SetAxis(int axis) { this->primitive->value.AsArgMax()->axis = axis; } +void ArgMax::SetOutMaxValue(bool out_max_value) { this->primitive->value.AsArgMax()->outMaxValue = out_max_value; } +void ArgMax::SetTopK(int top_k) { this->primitive->value.AsArgMax()->topK = top_k; } +void ArgMax::SetKeepDims(bool keep_dims) { this->primitive->value.AsArgMax()->keepDims = keep_dims; } +void ArgMax::SetAxisType(int axis_type) { this->primitive->value.AsArgMax()->axisType = axis_type; } + +#else + +int ArgMax::GetAxis() const { return this->primitive->value_as_ArgMax()->axis(); } +bool ArgMax::GetOutMaxValue() const { return this->primitive->value_as_ArgMax()->outMaxValue(); } +int ArgMax::GetTopK() const { return this->primitive->value_as_ArgMax()->topK(); } +bool ArgMax::GetKeepDims() const { return this->primitive->value_as_ArgMax()->keepDims(); } +int ArgMax::GetAxisType() const { return this->primitive->value_as_ArgMax()->axisType(); } + +void ArgMax::SetAxis(int axis) {} +void ArgMax::SetOutMaxValue(bool out_max_value) {} +void ArgMax::SetTopK(int top_k) {} +void ArgMax::SetKeepDims(bool keep_dims) {} +void ArgMax::SetAxisType(int axis_type) {} +#endif -namespace mindspore::lite { int ArgMax::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -30,7 +56,6 @@ int ArgMax::InferShape(std::vector inputs_, std::vectorprimitive->value_as_ArgMax(); - std::vector output_shape(input->shape()); auto input_shape_size = input->shape().size(); int axis = argmax_prim->axis() < 0 ? argmax_prim->axis() + input_shape_size : argmax_prim->axis(); @@ -43,11 +68,10 @@ int ArgMax::InferShape(std::vector inputs_, std::vectortopK(); } - output->SetFormat(input->GetFormat()); output->set_shape(output_shape); output->set_data_type(input->data_type()); return RET_OK; } -} // namespace mindspore::lite - +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/argmax.h b/mindspore/lite/src/ops/argmax.h similarity index 86% rename from mindspore/lite/c_ops/argmax.h rename to mindspore/lite/src/ops/argmax.h index fc4e305bd5..a80566d97a 100644 --- a/mindspore/lite/c_ops/argmax.h +++ b/mindspore/lite/src/ops/argmax.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_ARG_MAX_H_ namespace mindspore { +namespace lite { class ArgMax : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit ArgMax(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit ArgMax(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit ArgMax(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; bool GetOutMaxValue() const; @@ -48,6 +46,7 @@ class ArgMax : public PrimitiveC { void SetKeepDims(bool keep_dims); void SetAxisType(int axis_type); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_ARG_MAX_H_ diff --git a/mindspore/lite/src/ops/argmin.cc b/mindspore/lite/src/ops/argmin.cc index 124a6134ba..c6584d1594 100644 --- a/mindspore/lite/src/ops/argmin.cc +++ b/mindspore/lite/src/ops/argmin.cc @@ -14,12 +14,38 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/argmin.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int ArgMin::GetAxis() const { return this->primitive->value.AsArgMin()->axis; } +bool ArgMin::GetOutMaxValue() const { return this->primitive->value.AsArgMin()->outMaxValue; } +int ArgMin::GetTopK() const { return this->primitive->value.AsArgMin()->topK; } +bool ArgMin::GetKeepDims() const { return this->primitive->value.AsArgMin()->keepDims; } +int ArgMin::GetAxisType() const { return this->primitive->value.AsArgMin()->axisType; } + +void ArgMin::SetAxis(int axis) { this->primitive->value.AsArgMin()->axis = axis; } +void ArgMin::SetOutMaxValue(bool out_max_value) { this->primitive->value.AsArgMin()->outMaxValue = out_max_value; } +void ArgMin::SetTopK(int top_k) { this->primitive->value.AsArgMin()->topK = top_k; } +void ArgMin::SetKeepDims(bool keep_dims) { this->primitive->value.AsArgMin()->keepDims = keep_dims; } +void ArgMin::SetAxisType(int axis_type) { this->primitive->value.AsArgMin()->axisType = axis_type; } + +#else + +int ArgMin::GetAxis() const { return this->primitive->value_as_ArgMin()->axis(); } +bool ArgMin::GetOutMaxValue() const { return this->primitive->value_as_ArgMin()->outMaxValue(); } +int ArgMin::GetTopK() const { return this->primitive->value_as_ArgMin()->topK(); } +bool ArgMin::GetKeepDims() const { return this->primitive->value_as_ArgMin()->keepDims(); } +int ArgMin::GetAxisType() const { return this->primitive->value_as_ArgMin()->axisType(); } + +void ArgMin::SetAxis(int axis) {} +void ArgMin::SetOutMaxValue(bool out_max_value) {} +void ArgMin::SetTopK(int top_k) {} +void ArgMin::SetKeepDims(bool keep_dims) {} +void ArgMin::SetAxisType(int axis_type) {} +#endif -namespace mindspore::lite { int ArgMin::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -42,10 +68,10 @@ int ArgMin::InferShape(std::vector inputs_, std::vectortopK(); } - output->SetFormat(input->GetFormat()); output->set_shape(output_shape); output->set_data_type(input->data_type()); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/argmin.h b/mindspore/lite/src/ops/argmin.h similarity index 86% rename from mindspore/lite/c_ops/argmin.h rename to mindspore/lite/src/ops/argmin.h index 8442f583fb..adb5c92bf3 100644 --- a/mindspore/lite/c_ops/argmin.h +++ b/mindspore/lite/src/ops/argmin.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_ARG_MIN_H_ namespace mindspore { +namespace lite { class ArgMin : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit ArgMin(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit ArgMin(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit ArgMin(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; bool GetOutMaxValue() const; @@ -48,6 +46,7 @@ class ArgMin : public PrimitiveC { void SetKeepDims(bool keep_dims); void SetAxisType(int axis_type); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_ARG_MIN_H_ diff --git a/mindspore/lite/src/ops/arithmetic.cc b/mindspore/lite/src/ops/arithmetic.cc index 963040d143..cdd775dc79 100644 --- a/mindspore/lite/src/ops/arithmetic.cc +++ b/mindspore/lite/src/ops/arithmetic.cc @@ -14,13 +14,14 @@ * limitations under the License. */ -#include "src/ops/ops.h" +#include "src/ops/arithmetic.h" #include "include/errorcode.h" #include "utils/log_adapter.h" #include "src/ir/tensor.h" -namespace mindspore::lite { -int Arithmetic::InferShape(std::vector inputs_, std::vector outputs_) { +namespace mindspore { +namespace lite { +int Arithmetic::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); if (inputs_.size() != kDoubleNum) { MS_LOG(ERROR) << "The number of input must be " << kDoubleNum; @@ -103,5 +104,5 @@ int Arithmetic::InferShape(std::vector inputs_, std::vectorset_shape(output_shape); return RET_OK; } -} // namespace mindspore::lite - +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/arithmetic.h b/mindspore/lite/src/ops/arithmetic.h similarity index 86% rename from mindspore/lite/c_ops/arithmetic.h rename to mindspore/lite/src/ops/arithmetic.h index 9aef72a0f4..880d542226 100644 --- a/mindspore/lite/c_ops/arithmetic.h +++ b/mindspore/lite/src/ops/arithmetic.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_H_ namespace mindspore { +namespace lite { class Arithmetic : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Arithmetic(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Arithmetic(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Arithmetic(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; bool Broadcasting() { return this->broadcasting_; } int NDims() { return this->ndim_; } @@ -50,6 +48,7 @@ class Arithmetic : public PrimitiveC { std::vector in_shape1_; std::vector out_shape_; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_H_ diff --git a/mindspore/lite/src/ops/arithmetic_self.cc b/mindspore/lite/src/ops/arithmetic_self.cc index ccbeb9191f..f4facb7692 100644 --- a/mindspore/lite/src/ops/arithmetic_self.cc +++ b/mindspore/lite/src/ops/arithmetic_self.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,12 +14,13 @@ * limitations under the License. */ -#include "src/ops/ops.h" +#include "src/ops/arithmetic_self.h" #include "include/errorcode.h" #include "utils/log_adapter.h" -#include "src/ir/tensor.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { + int ArithmeticSelf::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -32,7 +33,7 @@ int ArithmeticSelf::InferShape(std::vector inputs_, std::vecto return RET_OK; } output->set_shape(input->shape()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/arithmetic_self.h b/mindspore/lite/src/ops/arithmetic_self.h similarity index 82% rename from mindspore/lite/c_ops/arithmetic_self.h rename to mindspore/lite/src/ops/arithmetic_self.h index 4b70b62168..3cc7a748cc 100644 --- a/mindspore/lite/c_ops/arithmetic_self.h +++ b/mindspore/lite/src/ops/arithmetic_self.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -28,15 +28,14 @@ #define LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_SELF_H_ namespace mindspore { +namespace lite { class ArithmeticSelf : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit ArithmeticSelf(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit ArithmeticSelf(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit ArithmeticSelf(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_SELF_H_ diff --git a/mindspore/lite/c_ops/batch_norm.cc b/mindspore/lite/src/ops/batch_norm.cc similarity index 93% rename from mindspore/lite/c_ops/batch_norm.cc rename to mindspore/lite/src/ops/batch_norm.cc index 9d4fe336a0..fc7026ce11 100644 --- a/mindspore/lite/c_ops/batch_norm.cc +++ b/mindspore/lite/src/ops/batch_norm.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/batch_norm.h" +#include "src/ops/batch_norm.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE float BatchNorm::GetEpsilon() const { return this->primitive->value.AsBatchNorm()->epsilon; } @@ -28,4 +29,5 @@ float BatchNorm::GetEpsilon() const { return this->primitive->value_as_BatchNorm void BatchNorm::SetEpsilon(float epsilon) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/batch_norm.h b/mindspore/lite/src/ops/batch_norm.h similarity index 82% rename from mindspore/lite/c_ops/batch_norm.h rename to mindspore/lite/src/ops/batch_norm.h index 6d660fb6d2..8745f96503 100644 --- a/mindspore/lite/c_ops/batch_norm.h +++ b/mindspore/lite/src/ops/batch_norm.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,16 +29,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_BATCH_NORM_H_ namespace mindspore { +namespace lite { class BatchNorm : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit BatchNorm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit BatchNorm(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit BatchNorm(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + float GetEpsilon() const; void SetEpsilon(float epsilon); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_BATCH_NORM_H_ diff --git a/mindspore/lite/src/ops/batch_to_space.cc b/mindspore/lite/src/ops/batch_to_space.cc index e2095afad3..d9f9ebf792 100644 --- a/mindspore/lite/src/ops/batch_to_space.cc +++ b/mindspore/lite/src/ops/batch_to_space.cc @@ -14,12 +14,37 @@ * limitations under the License. */ -#include "src/ops/ops.h" +#include "src/ops/batch_to_space.h" +#include "src/common/common.h" #include "include/errorcode.h" #include "utils/log_adapter.h" #include "src/ir/tensor.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector BatchToSpace::GetBlockShape() const { return this->primitive->value.AsBatchToSpace()->blockShape; } +std::vector BatchToSpace::GetCrops() const { return this->primitive->value.AsBatchToSpace()->crops; } + +void BatchToSpace::SetBlockShape(const std::vector &block_shape) { + this->primitive->value.AsBatchToSpace()->blockShape = block_shape; +} +void BatchToSpace::SetCrops(const std::vector &crops) { this->primitive->value.AsBatchToSpace()->crops = crops; } + +#else + +std::vector BatchToSpace::GetBlockShape() const { + auto fb_vector = this->primitive->value_as_BatchToSpace()->blockShape(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +std::vector BatchToSpace::GetCrops() const { + auto fb_vector = this->primitive->value_as_BatchToSpace()->crops(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void BatchToSpace::SetBlockShape(const std::vector &block_shape) {} +void BatchToSpace::SetCrops(const std::vector &crops) {} +#endif namespace { constexpr int kBatchToSpaceOutputNum = 1; constexpr int kBatchToSpaceInputNum = 1; @@ -27,7 +52,7 @@ constexpr int kBlockShapeSize = 2; constexpr int kCropsSize = 4; } // namespace -int BatchToSpace::InferShape(std::vector inputs, std::vector outputs) { +int BatchToSpace::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive != nullptr); if (outputs.size() != kBatchToSpaceOutputNum || inputs.size() != kBatchToSpaceInputNum) { MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); @@ -49,49 +74,50 @@ int BatchToSpace::InferShape(std::vector inputs, std::vectorprimitive->value_as_BatchToSpace(); - auto block_shape = prim->blockShape(); - if (block_shape->size() != kBlockShapeSize) { + + auto block_shape = GetBlockShape(); + if (block_shape.size() != kBlockShapeSize) { MS_LOG(ERROR) << "Block shape size should be " << kBlockShapeSize; return RET_PARAM_INVALID; } - auto crops = prim->crops(); - if (crops->size() != kCropsSize) { + auto crops = GetCrops(); + if (crops.size() != kCropsSize) { MS_LOG(ERROR) << "Crops size should be " << kCropsSize; return RET_PARAM_INVALID; } size_t mul_block_shape = 1; for (size_t i = 0; i < kBlockShapeSize; ++i) { - if (block_shape->Get(i) <= 0) { + if (block_shape[i] <= 0) { MS_LOG(ERROR) << "Input block_shape should > 0!"; return RET_PARAM_INVALID; } - if (input_shape[kNHWC_n_index] % block_shape->Get(i)) { - MS_LOG(ERROR) << "Dimension n " << input_shape[kNHWC_n_index] << " can not divide block_shape[" << i << "] " - << block_shape->Get(i); - return RET_PARAM_INVALID; + if (input_shape[NHWC_N] % block_shape[i]) { + MS_LOG(ERROR) << "Dimension n " << input_shape[NHWC_N] << " can not divide block_shape[" << i << "] " + << block_shape[i]; + return 1; } - mul_block_shape *= block_shape->Get(i); + mul_block_shape *= block_shape[i]; } - if (input_shape[kNHWC_n_index] < mul_block_shape) { - MS_LOG(ERROR) << "Dimension n " << input_shape[kNHWC_n_index] << " < product of block shape!"; + if (input_shape[NHWC_N] < mul_block_shape) { + MS_LOG(ERROR) << "Dimension n " << input_shape[NHWC_N] << " < product of block shape!"; return RET_PARAM_INVALID; } for (size_t i = 0; i < kCropsSize; ++i) { - if (crops->Get(i) < 0) { + if (crops[i] < 0) { MS_LOG(ERROR) << "Input crops should >= 0"; return RET_PARAM_INVALID; } } std::vector output_shape(input_shape.size()); - output_shape[kNHWC_n_index] = input_shape[kNHWC_n_index] / mul_block_shape; - output_shape[kNHWC_h_index] = input_shape[kNHWC_h_index] * block_shape->Get(0) - crops->Get(0) - crops->Get(1); - output_shape[kNHWC_w_index] = input_shape[kNHWC_w_index] * block_shape->Get(1) - crops->Get(2) - crops->Get(3); - output_shape[kNHWC_c_index] = input_shape[kNHWC_c_index]; + output_shape[NHWC_N] = input_shape[NHWC_N] / mul_block_shape; + output_shape[NHWC_H] = input_shape[NHWC_H] * block_shape[0] - crops[0] - crops[1]; + output_shape[NHWC_W] = input_shape[NHWC_W] * block_shape[1] - crops[2] - crops[3]; + output_shape[NHWC_C] = input_shape[NHWC_C]; outputs[0]->set_shape(output_shape); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/batch_to_space.h b/mindspore/lite/src/ops/batch_to_space.h similarity index 84% rename from mindspore/lite/c_ops/batch_to_space.h rename to mindspore/lite/src/ops/batch_to_space.h index d0fb08cf59..18c3ff239c 100644 --- a/mindspore/lite/c_ops/batch_to_space.h +++ b/mindspore/lite/src/ops/batch_to_space.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_BATCH_TO_SPACE_H_ namespace mindspore { +namespace lite { class BatchToSpace : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit BatchToSpace(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit BatchToSpace(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit BatchToSpace(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetBlockShape() const; std::vector GetCrops() const; void SetBlockShape(const std::vector &block_shape); void SetCrops(const std::vector &crops); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_BATCH_TO_SPACE_H_ diff --git a/mindspore/lite/c_ops/bias_add.cc b/mindspore/lite/src/ops/bias_add.cc similarity index 94% rename from mindspore/lite/c_ops/bias_add.cc rename to mindspore/lite/src/ops/bias_add.cc index 4bb6f1d9a0..24c0f707ad 100644 --- a/mindspore/lite/c_ops/bias_add.cc +++ b/mindspore/lite/src/ops/bias_add.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/bias_add.h" +#include "src/ops/bias_add.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE std::vector BiasAdd::GetAxis() const { return this->primitive->value.AsBiasAdd()->axis; } @@ -31,4 +32,5 @@ std::vector BiasAdd::GetAxis() const { void BiasAdd::SetAxis(const std::vector &axis) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/bias_add.h b/mindspore/lite/src/ops/bias_add.h similarity index 82% rename from mindspore/lite/c_ops/bias_add.h rename to mindspore/lite/src/ops/bias_add.h index b5241ff904..e9d7ad0a60 100644 --- a/mindspore/lite/c_ops/bias_add.h +++ b/mindspore/lite/src/ops/bias_add.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,16 +29,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_BIAS_ADD_H_ namespace mindspore { +namespace lite { class BiasAdd : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit BiasAdd(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit BiasAdd(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit BiasAdd(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + std::vector GetAxis() const; void SetAxis(const std::vector &axis); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_BIAS_ADD_H_ diff --git a/mindspore/lite/c_ops/bias_grad.cc b/mindspore/lite/src/ops/bias_grad.cc similarity index 94% rename from mindspore/lite/c_ops/bias_grad.cc rename to mindspore/lite/src/ops/bias_grad.cc index fec73f758f..9912a4747f 100644 --- a/mindspore/lite/c_ops/bias_grad.cc +++ b/mindspore/lite/src/ops/bias_grad.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/bias_grad.h" +#include "src/ops/bias_grad.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE std::vector BiasGrad::GetAxis() const { return this->primitive->value.AsBiasGrad()->axis; } @@ -31,4 +32,5 @@ std::vector BiasGrad::GetAxis() const { void BiasGrad::SetAxis(const std::vector &axis) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/bias_grad.h b/mindspore/lite/src/ops/bias_grad.h similarity index 82% rename from mindspore/lite/c_ops/bias_grad.h rename to mindspore/lite/src/ops/bias_grad.h index bf2fa9ad9b..bf135f3d0a 100644 --- a/mindspore/lite/c_ops/bias_grad.h +++ b/mindspore/lite/src/ops/bias_grad.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,16 +29,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_BIAS_GRAD_H_ namespace mindspore { +namespace lite { class BiasGrad : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit BiasGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit BiasGrad(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit BiasGrad(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + std::vector GetAxis() const; void SetAxis(const std::vector &axis); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_BIAS_GRAD_H_ diff --git a/mindspore/lite/c_ops/bn_grad_input.cc b/mindspore/lite/src/ops/bn_grad_input.cc similarity index 94% rename from mindspore/lite/c_ops/bn_grad_input.cc rename to mindspore/lite/src/ops/bn_grad_input.cc index 2b116d9eb3..cbc851f887 100644 --- a/mindspore/lite/c_ops/bn_grad_input.cc +++ b/mindspore/lite/src/ops/bn_grad_input.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/bn_grad_input.h" +#include "src/ops/bn_grad_input.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE float BNGradInput::GetEps() const { return this->primitive->value.AsBNGradInput()->eps; } int BNGradInput::GetChannels() const { return this->primitive->value.AsBNGradInput()->channels; } @@ -32,4 +33,5 @@ int BNGradInput::GetChannels() const { return this->primitive->value_as_BNGradIn void BNGradInput::SetEps(float eps) {} void BNGradInput::SetChannels(int channels) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/bn_grad_input.h b/mindspore/lite/src/ops/bn_grad_input.h similarity index 82% rename from mindspore/lite/c_ops/bn_grad_input.h rename to mindspore/lite/src/ops/bn_grad_input.h index 11ff9bdb5f..0476469699 100644 --- a/mindspore/lite/c_ops/bn_grad_input.h +++ b/mindspore/lite/src/ops/bn_grad_input.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,18 +29,17 @@ #define LITE_MINDSPORE_LITE_C_OPS_B_N_GRAD_INPUT_H_ namespace mindspore { +namespace lite { class BNGradInput : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit BNGradInput(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit BNGradInput(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit BNGradInput(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + float GetEps() const; int GetChannels() const; void SetEps(float eps); void SetChannels(int channels); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_B_N_GRAD_INPUT_H_ diff --git a/mindspore/lite/src/ops/broadcast_to.cc b/mindspore/lite/src/ops/broadcast_to.cc index 51e5914677..8ac0ce3b9f 100644 --- a/mindspore/lite/src/ops/broadcast_to.cc +++ b/mindspore/lite/src/ops/broadcast_to.cc @@ -14,22 +14,36 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/broadcast_to.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector BroadcastTo::GetDstShape() const { return this->primitive->value.AsBroadcastTo()->dst_shape; } + +void BroadcastTo::SetDstShape(const std::vector &dst_shape) { + this->primitive->value.AsBroadcastTo()->dst_shape = dst_shape; +} + +#else + +std::vector BroadcastTo::GetDstShape() const { + auto fb_vector = this->primitive->value_as_BroadcastTo()->dst_shape(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void BroadcastTo::SetDstShape(const std::vector &dst_shape) {} +#endif namespace { constexpr int kBroadcastToInputNum = 1; constexpr int kBroadcastToOutputNum = 1; } // namespace -int BroadcastTo::InferShape(std::vector inputs, std::vector outputs) { +int BroadcastTo::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive != nullptr); if (inputs.size() != kBroadcastToInputNum || outputs.size() != kBroadcastToOutputNum) { MS_LOG(ERROR) << "input size:" << inputs.size() << ", output size:" << outputs.size(); - return RET_PARAM_INVALID; + return 1; } auto input = inputs.at(0); std::vector dst_shape(this->primitive->value_as_BroadcastTo()->dst_shape()->begin(), @@ -40,19 +54,19 @@ int BroadcastTo::InferShape(std::vector inputs, std::vector dst_shape.size()) { MS_LOG(ERROR) << "input shape size " << input_shape.size() << " should <= broadcast to shape size " << dst_shape.size() << "!"; - return RET_PARAM_INVALID; + return 1; } for (int i = dst_shape.size() - 1; i >= 0; --i) { if (dst_shape[i] < 0) { MS_LOG(ERROR) << "shape[" << i << "] = " << dst_shape[i] << " ] should be > 0!"; - return RET_PARAM_INVALID; + return 1; } if (input_shape_index >= 0) { auto dim = input_shape[input_shape_index]; if (dim != dst_shape[i] && dim != 1) { MS_LOG(ERROR) << "Invalid broadcast shape!"; - return RET_PARAM_INVALID; + return 1; } } shape[i] = dst_shape[i]; @@ -61,6 +75,7 @@ int BroadcastTo::InferShape(std::vector inputs, std::vectorSetFormat(input->GetFormat()); outputs[0]->set_shape(shape); outputs[0]->set_data_type(input->data_type()); - return RET_OK; + return 0; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/broadcast_to.h b/mindspore/lite/src/ops/broadcast_to.h similarity index 83% rename from mindspore/lite/c_ops/broadcast_to.h rename to mindspore/lite/src/ops/broadcast_to.h index 94f95bf090..8a268f3292 100644 --- a/mindspore/lite/c_ops/broadcast_to.h +++ b/mindspore/lite/src/ops/broadcast_to.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_BROADCAST_TO_H_ namespace mindspore { +namespace lite { class BroadcastTo : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit BroadcastTo(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit BroadcastTo(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit BroadcastTo(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetDstShape() const; void SetDstShape(const std::vector &dst_shape); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_BROADCAST_TO_H_ diff --git a/mindspore/lite/c_ops/caffe_p_relu.cc b/mindspore/lite/src/ops/caffe_p_relu.cc similarity index 93% rename from mindspore/lite/c_ops/caffe_p_relu.cc rename to mindspore/lite/src/ops/caffe_p_relu.cc index bc176215ea..0623e8abc6 100644 --- a/mindspore/lite/c_ops/caffe_p_relu.cc +++ b/mindspore/lite/src/ops/caffe_p_relu.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/caffe_p_relu.h" +#include "src/ops/caffe_p_relu.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE bool CaffePReLU::GetChannelShared() const { return this->primitive->value.AsCaffePReLU()->channelShared; } @@ -30,4 +31,5 @@ bool CaffePReLU::GetChannelShared() const { return this->primitive->value_as_Caf void CaffePReLU::SetChannelShared(bool channel_shared) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/caffe_p_relu.h b/mindspore/lite/src/ops/caffe_p_relu.h similarity index 80% rename from mindspore/lite/c_ops/caffe_p_relu.h rename to mindspore/lite/src/ops/caffe_p_relu.h index b4f5002884..76f44d52f5 100644 --- a/mindspore/lite/c_ops/caffe_p_relu.h +++ b/mindspore/lite/src/ops/caffe_p_relu.h @@ -18,8 +18,8 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" -#include "c_ops/activation.h" +#include "src/ops/primitive_c.h" +#include "src/ops/activation.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -30,16 +30,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_CAFFE_P_RE_L_U_H_ namespace mindspore { +namespace lite { class CaffePReLU : public Activation { public: -#ifdef PRIMITIVE_WRITEABLE - explicit CaffePReLU(schema::PrimitiveT *primitive) : Activation(primitive) {} -#else - explicit CaffePReLU(schema::Primitive *primitive) : Activation(primitive) {} -#endif + explicit CaffePReLU(OriginPrimitive *primitive) : Activation(primitive) {} + bool GetChannelShared() const; void SetChannelShared(bool channel_shared); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_CAFFE_P_RE_L_U_H_ diff --git a/mindspore/lite/src/ops/cast.cc b/mindspore/lite/src/ops/cast.cc index e89f3867b0..0cc8cb8dc5 100644 --- a/mindspore/lite/src/ops/cast.cc +++ b/mindspore/lite/src/ops/cast.cc @@ -14,12 +14,26 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/cast.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int Cast::GetSrcT() const { return this->primitive->value.AsCast()->srcT; } +int Cast::GetDstT() const { return this->primitive->value.AsCast()->dstT; } + +void Cast::SetSrcT(int src_t) { this->primitive->value.AsCast()->srcT = src_t; } +void Cast::SetDstT(int dst_t) { this->primitive->value.AsCast()->dstT = dst_t; } + +#else + +int Cast::GetSrcT() const { return this->primitive->value_as_Cast()->srcT(); } +int Cast::GetDstT() const { return this->primitive->value_as_Cast()->dstT(); } + +void Cast::SetSrcT(int src_t) {} +void Cast::SetDstT(int dst_t) {} +#endif -namespace mindspore::lite { int Cast::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -49,4 +63,5 @@ int Cast::InferShape(std::vector inputs_, std::vectorset_data_type(TypeId::kNumberTypeFloat32); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/cast.h b/mindspore/lite/src/ops/cast.h similarity index 84% rename from mindspore/lite/c_ops/cast.h rename to mindspore/lite/src/ops/cast.h index b49f1fd49c..cb58ee94fb 100644 --- a/mindspore/lite/c_ops/cast.h +++ b/mindspore/lite/src/ops/cast.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_CAST_H_ namespace mindspore { +namespace lite { class Cast : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Cast(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Cast(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Cast(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetSrcT() const; int GetDstT() const; void SetSrcT(int src_t); void SetDstT(int dst_t); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_CAST_H_ diff --git a/mindspore/lite/src/ops/ceil.h b/mindspore/lite/src/ops/ceil.h new file mode 100644 index 0000000000..d4591a46bc --- /dev/null +++ b/mindspore/lite/src/ops/ceil.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "ir/dtype/type_id.h" +#include "src/ops/arithmetic_self.h" +#ifdef PRIMITIVE_WRITEABLE +#include "schema/inner/model_generated.h" +#else +#include "schema/model_generated.h" +#endif + +#ifndef LITE_MINDSPORE_LITE_C_OPS_CEIL_H_ +#define LITE_MINDSPORE_LITE_C_OPS_CEIL_H_ + +namespace mindspore { +namespace lite { +class Ceil : public ArithmeticSelf { + public: + explicit Ceil(OriginPrimitive *primitive) : ArithmeticSelf(primitive) {} +}; +} // namespace lite +} // namespace mindspore + +#endif // LITE_MINDSPORE_LITE_C_OPS_CEIL_H_ diff --git a/mindspore/lite/c_ops/clip.cc b/mindspore/lite/src/ops/clip.cc similarity index 95% rename from mindspore/lite/c_ops/clip.cc rename to mindspore/lite/src/ops/clip.cc index 0eeb12e4a6..6822872c16 100644 --- a/mindspore/lite/c_ops/clip.cc +++ b/mindspore/lite/src/ops/clip.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/clip.h" +#include "src/ops/clip.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE float Clip::GetMax() const { return this->primitive->value.AsClip()->max; } float Clip::GetMin() const { return this->primitive->value.AsClip()->min; } @@ -32,4 +33,5 @@ float Clip::GetMin() const { return this->primitive->value_as_Clip()->min(); } void Clip::SetMax(float max) {} void Clip::SetMin(float min) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/clip.h b/mindspore/lite/src/ops/clip.h similarity index 83% rename from mindspore/lite/c_ops/clip.h rename to mindspore/lite/src/ops/clip.h index 12893ec1ef..2414ae75f0 100644 --- a/mindspore/lite/c_ops/clip.h +++ b/mindspore/lite/src/ops/clip.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,18 +29,17 @@ #define LITE_MINDSPORE_LITE_C_OPS_CLIP_H_ namespace mindspore { +namespace lite { class Clip : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Clip(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Clip(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Clip(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + float GetMax() const; float GetMin() const; void SetMax(float max); void SetMin(float min); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_CLIP_H_ diff --git a/mindspore/lite/src/ops/concat.cc b/mindspore/lite/src/ops/concat.cc index f5bb88453d..2899f14825 100644 --- a/mindspore/lite/src/ops/concat.cc +++ b/mindspore/lite/src/ops/concat.cc @@ -14,12 +14,28 @@ * limitations under the License. */ -#include "src/ops/ops.h" +#include "src/ops/concat.h" #include "include/errorcode.h" #include "utils/log_adapter.h" #include "src/ir/tensor.h" +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int Concat::GetAxis() const { return this->primitive->value.AsConcat()->axis; } +int Concat::GetN() const { return this->primitive->value.AsConcat()->n; } + +void Concat::SetAxis(int axis) { this->primitive->value.AsConcat()->axis = axis; } +void Concat::SetN(int n) { this->primitive->value.AsConcat()->n = n; } + +#else + +int Concat::GetAxis() const { return this->primitive->value_as_Concat()->axis(); } +int Concat::GetN() const { return this->primitive->value_as_Concat()->n(); } + +void Concat::SetAxis(int axis) {} +void Concat::SetN(int n) {} +#endif -namespace mindspore::lite { namespace { constexpr int kConcatOutputNum = 1; } @@ -47,7 +63,6 @@ int Concat::InferShape(std::vector inputs_, std::vectordata_type(); @@ -58,7 +73,6 @@ int Concat::InferShape(std::vector inputs_, std::vectorGetFormat() != input0_format) { MS_LOG(ERROR) << "All input format should be the same!"; return RET_PARAM_INVALID; @@ -81,4 +95,5 @@ int Concat::InferShape(std::vector inputs_, std::vectorset_shape(output_shape); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/concat.h b/mindspore/lite/src/ops/concat.h similarity index 84% rename from mindspore/lite/c_ops/concat.h rename to mindspore/lite/src/ops/concat.h index 193fb43f2b..981731beda 100644 --- a/mindspore/lite/c_ops/concat.h +++ b/mindspore/lite/src/ops/concat.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_CONCAT_H_ namespace mindspore { +namespace lite { class Concat : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Concat(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Concat(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Concat(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; int GetN() const; void SetAxis(int axis); void SetN(int n); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_CONCAT_H_ diff --git a/mindspore/lite/src/ops/canstant_of_shape.cc b/mindspore/lite/src/ops/constant_of_shape.cc similarity index 81% rename from mindspore/lite/src/ops/canstant_of_shape.cc rename to mindspore/lite/src/ops/constant_of_shape.cc index cd92c0e19b..4fc49d8a12 100644 --- a/mindspore/lite/src/ops/canstant_of_shape.cc +++ b/mindspore/lite/src/ops/constant_of_shape.cc @@ -14,16 +14,27 @@ * limitations under the License. */ -#include "src/ops/ops.h" #include "include/errorcode.h" #include "utils/log_adapter.h" #include "src/ir/tensor.h" +#include "src/ops/constant_of_shape.h" namespace mindspore::lite { namespace { constexpr int kShapeInputNum = 1; constexpr int kShapeOutputNum = 1; } // namespace +#ifdef PRIMITIVE_WRITEABLE +int ConstantOfShape::GetValue() const { return this->primitive->value.AsConstantOfShape()->Value; } + +void ConstantOfShape::SetValue(float value) { this->primitive->value.AsConstantOfShape()->Value = value; } + +#else + +float ConstantOfShape::GetValue() const { return this->primitive->value_as_ConstantOfShape()->value(); } + +void ConstantOfShape::SetValue(float value) {} +#endif int ConstantOfShape::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != kShapeInputNum) { diff --git a/mindspore/lite/src/ops/constant_of_shape.h b/mindspore/lite/src/ops/constant_of_shape.h new file mode 100644 index 0000000000..f67521cbfc --- /dev/null +++ b/mindspore/lite/src/ops/constant_of_shape.h @@ -0,0 +1,41 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "ir/dtype/type_id.h" +#include "src/ops/primitive_c.h" +#ifdef PRIMITIVE_WRITEABLE +#include "schema/inner/model_generated.h" +#else +#include "schema/model_generated.h" +#endif + +#ifndef LITE_MINDSPORE_LITE_SRC_OPS_CONSTANT_OF_SHAPE_H_ +#define LITE_MINDSPORE_LITE_SRC_OPS_CONSTANT_OF_SHAPE_H_ +namespace mindspore { +namespace lite { +class ConstantOfShape : public PrimitiveC { + public: + explicit ConstantOfShape(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; + float GetValue() const; + void SetValue(float value); +}; +} // namespace lite +} // namespace mindspore +#endif // LITE_MINDSPORE_LITE_SRC_OPS_CONSTANT_OF_SHAPE_H_ diff --git a/mindspore/lite/src/ops/conv.cc b/mindspore/lite/src/ops/conv.cc deleted file mode 100644 index 957a669573..0000000000 --- a/mindspore/lite/src/ops/conv.cc +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" - -namespace mindspore::lite { -void Conv2D::ConvInferShape(int input_h, int input_w, int *output_h, int *output_w) { - MS_ASSERT(this->primitive != nullptr); - auto conv2DPrim = this->primitive->value_as_Conv2D(); - int kernel_w = conv2DPrim->kernelW(); - int kernel_h = conv2DPrim->kernelH(); - int stride_w = conv2DPrim->strideW(); - int stride_h = conv2DPrim->strideH(); - int dilate_w = conv2DPrim->dilateW(); - int dilate_h = conv2DPrim->dilateH(); - pad_l_ = conv2DPrim->padLeft(); - pad_u_ = conv2DPrim->padUp(); - pad_d_ = conv2DPrim->padDown(); - pad_r_ = conv2DPrim->padRight(); - - if (conv2DPrim->padMode() == schema::PadMode_SAME) { - *output_w = std::ceil(static_cast(input_w) / static_cast(stride_w)); - *output_h = std::ceil(static_cast(input_h) / static_cast(stride_h)); - auto pad_h_all = ((*output_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - input_h); - auto pad_w_all = ((*output_w - 1) * stride_w + (kernel_w - 1) * dilate_w + 1 - input_w); - pad_u_ = pad_h_all / 2; - pad_d_ = pad_h_all - pad_u_; - pad_l_ = pad_w_all / 2; - pad_r_ = pad_w_all - pad_l_; - } else { - *output_w = std::ceil((static_cast(input_w) + pad_l_ + pad_r_ - - (static_cast(kernel_w) - 1) * static_cast(dilate_w)) / static_cast(stride_w)); - *output_h = std::ceil((static_cast(input_h) + pad_u_ + pad_d_ - - (static_cast(kernel_h) - 1) * static_cast(dilate_h)) / static_cast(stride_h)); - } -} - -int Conv2D::InferShape(std::vector inputs_, std::vector outputs_) { - if (inputs_.size() != 2 && inputs_.size() != 3) { - MS_LOG(ERROR) << "Add should has two or three inputs"; - return RET_ERROR; - } - if (outputs_.size() != 1) { - MS_LOG(ERROR) << "Add should has one outputs"; - return RET_ERROR; - } - auto *input_tensor = inputs_.front(); - auto *weight_tensor = inputs_.at(1); - auto *out_tensor = outputs_.front(); - MS_ASSERT(input_tensor != nullptr); - MS_ASSERT(out_tensor != nullptr); - - out_tensor->SetFormat(input_tensor->GetFormat()); - out_tensor->set_data_type(input_tensor->data_type()); - if (!GetInferFlag()) { - return RET_OK; - } - auto in_shape = input_tensor->shape(); - int input_h = in_shape.at(1); - int input_w = in_shape.at(2); - int output_w = 0, output_h = 0; - - this->ConvInferShape(input_h, input_w, &output_h, &output_w); - - std::vector out_shape{input_tensor->shape()}; - out_shape.at(1) = output_h; - out_shape.at(2) = output_w; - out_shape.at(3) = weight_tensor->shape()[0]; - out_tensor->set_shape(out_shape); - - return RET_OK; -} -} // namespace mindspore::lite - diff --git a/mindspore/lite/c_ops/conv2d.cc b/mindspore/lite/src/ops/conv2d.cc similarity index 90% rename from mindspore/lite/c_ops/conv2d.cc rename to mindspore/lite/src/ops/conv2d.cc index 25c0795247..467fe256d2 100644 --- a/mindspore/lite/c_ops/conv2d.cc +++ b/mindspore/lite/src/ops/conv2d.cc @@ -14,8 +14,13 @@ * limitations under the License. */ -#include "mindspore/lite/c_ops/conv2d.h" +#include "src/ops/conv2d.h" +#include "include/errorcode.h" +#include "utils/log_adapter.h" +#include "src/ir/tensor.h" + namespace mindspore { +namespace lite { int Conv2D::PadUp() const { return this->pad_u_; } int Conv2D::PadDown() const { return this->pad_d_; } int Conv2D::PadLeft() const { return this->pad_l_; } @@ -99,18 +104,19 @@ void Conv2D::SetActivationType(int activation_type) {} #endif void Conv2D::ConvInferShape(int input_h, int input_w, int *output_h, int *output_w) { MS_ASSERT(this->primitive != nullptr); - int kernel_w = GetKernelW(); - int kernel_h = GetKernelH(); - int stride_w = GetStrideW(); - int stride_h = GetStrideH(); - int dilate_w = GetDilateW(); - int dilate_h = GetDilateH(); - pad_l_ = GetPadLeft(); - pad_u_ = GetPadUp(); - pad_d_ = GetPadDown(); - pad_r_ = GetPadRight(); + auto conv2DPrim = this->primitive->value_as_Conv2D(); + int kernel_w = conv2DPrim->kernelW(); + int kernel_h = conv2DPrim->kernelH(); + int stride_w = conv2DPrim->strideW(); + int stride_h = conv2DPrim->strideH(); + int dilate_w = conv2DPrim->dilateW(); + int dilate_h = conv2DPrim->dilateH(); + pad_l_ = conv2DPrim->padLeft(); + pad_u_ = conv2DPrim->padUp(); + pad_d_ = conv2DPrim->padDown(); + pad_r_ = conv2DPrim->padRight(); - if (GetPadMode() == schema::PadMode_SAME) { + if (conv2DPrim->padMode() == schema::PadMode_SAME) { *output_w = std::ceil(static_cast(input_w) / static_cast(stride_w)); *output_h = std::ceil(static_cast(input_h) / static_cast(stride_h)); auto pad_h_all = ((*output_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - input_h); @@ -129,14 +135,14 @@ void Conv2D::ConvInferShape(int input_h, int input_w, int *output_h, int *output } } -int Conv2D::InferShape(std::vector inputs_, std::vector outputs_) { +int Conv2D::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != 2 && inputs_.size() != 3) { MS_LOG(ERROR) << "Add should has two or three inputs"; - return 1; + return RET_ERROR; } if (outputs_.size() != 1) { MS_LOG(ERROR) << "Add should has one outputs"; - return 1; + return RET_ERROR; } auto *input_tensor = inputs_.front(); auto *weight_tensor = inputs_.at(1); @@ -144,6 +150,11 @@ int Conv2D::InferShape(std::vector inputs_, std::vector< MS_ASSERT(input_tensor != nullptr); MS_ASSERT(out_tensor != nullptr); + out_tensor->SetFormat(input_tensor->GetFormat()); + out_tensor->set_data_type(input_tensor->data_type()); + if (!GetInferFlag()) { + return RET_OK; + } auto in_shape = input_tensor->shape(); int input_h = in_shape.at(1); int input_w = in_shape.at(2); @@ -156,8 +167,8 @@ int Conv2D::InferShape(std::vector inputs_, std::vector< out_shape.at(2) = output_w; out_shape.at(3) = weight_tensor->shape()[0]; out_tensor->set_shape(out_shape); - out_tensor->SetFormat(input_tensor->GetFormat()); - out_tensor->set_data_type(input_tensor->data_type()); - return 0; + + return RET_OK; } +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/conv2d.h b/mindspore/lite/src/ops/conv2d.h similarity index 91% rename from mindspore/lite/c_ops/conv2d.h rename to mindspore/lite/src/ops/conv2d.h index 020d50e4d7..328fd49718 100644 --- a/mindspore/lite/c_ops/conv2d.h +++ b/mindspore/lite/src/ops/conv2d.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,10 @@ #define LITE_MINDSPORE_LITE_C_OPS_CONV2_D_H_ namespace mindspore { +namespace lite { class Conv2D : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Conv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Conv2D(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Conv2D(OriginPrimitive *primitive) : PrimitiveC(primitive) {} int InferShape(std::vector inputs_, std::vector outputs_) override; int PadUp() const; int PadDown() const; @@ -86,6 +83,7 @@ class Conv2D : public PrimitiveC { int pad_l_ = 0; int pad_r_ = 0; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_CONV2_D_H_ diff --git a/mindspore/lite/c_ops/conv2d_grad_filter.cc b/mindspore/lite/src/ops/conv2d_grad_filter.cc similarity index 98% rename from mindspore/lite/c_ops/conv2d_grad_filter.cc rename to mindspore/lite/src/ops/conv2d_grad_filter.cc index b556a25eb4..49e29c87fe 100644 --- a/mindspore/lite/c_ops/conv2d_grad_filter.cc +++ b/mindspore/lite/src/ops/conv2d_grad_filter.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/conv2d_grad_filter.h" +#include "src/ops/conv2d_grad_filter.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int Conv2DGradFilter::GetFormat() const { return this->primitive->value.AsConv2DGradFilter()->format; } int Conv2DGradFilter::GetGroup() const { return this->primitive->value.AsConv2DGradFilter()->group; } @@ -104,4 +105,5 @@ void Conv2DGradFilter::SetDilateH(int dilate_h) {} void Conv2DGradFilter::SetHasBias(bool has_bias) {} void Conv2DGradFilter::SetActivationType(int activation_type) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/conv2d_grad_filter.h b/mindspore/lite/src/ops/conv2d_grad_filter.h similarity index 89% rename from mindspore/lite/c_ops/conv2d_grad_filter.h rename to mindspore/lite/src/ops/conv2d_grad_filter.h index b38df2e095..7094983ee0 100644 --- a/mindspore/lite/c_ops/conv2d_grad_filter.h +++ b/mindspore/lite/src/ops/conv2d_grad_filter.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_CONV2_D_GRAD_FILTER_H_ namespace mindspore { +namespace lite { class Conv2DGradFilter : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Conv2DGradFilter(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Conv2DGradFilter(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Conv2DGradFilter(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int GetFormat() const; int GetGroup() const; int GetChannelIn() const; @@ -71,6 +69,7 @@ class Conv2DGradFilter : public PrimitiveC { void SetHasBias(bool has_bias); void SetActivationType(int activation_type); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_CONV2_D_GRAD_FILTER_H_ diff --git a/mindspore/lite/c_ops/conv2d_grad_input.cc b/mindspore/lite/src/ops/conv2d_grad_input.cc similarity index 98% rename from mindspore/lite/c_ops/conv2d_grad_input.cc rename to mindspore/lite/src/ops/conv2d_grad_input.cc index c9a82ad7a8..9892f98a63 100644 --- a/mindspore/lite/c_ops/conv2d_grad_input.cc +++ b/mindspore/lite/src/ops/conv2d_grad_input.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/conv2d_grad_input.h" +#include "src/ops/conv2d_grad_input.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int Conv2DGradInput::GetFormat() const { return this->primitive->value.AsConv2DGradInput()->format; } int Conv2DGradInput::GetGroup() const { return this->primitive->value.AsConv2DGradInput()->group; } @@ -102,4 +103,5 @@ void Conv2DGradInput::SetDilateH(int dilate_h) {} void Conv2DGradInput::SetHasBias(bool has_bias) {} void Conv2DGradInput::SetActivationType(int activation_type) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/conv2d_grad_input.h b/mindspore/lite/src/ops/conv2d_grad_input.h similarity index 89% rename from mindspore/lite/c_ops/conv2d_grad_input.h rename to mindspore/lite/src/ops/conv2d_grad_input.h index bbd4e5d36a..4c06d0b858 100644 --- a/mindspore/lite/c_ops/conv2d_grad_input.h +++ b/mindspore/lite/src/ops/conv2d_grad_input.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_CONV2_D_GRAD_INPUT_H_ namespace mindspore { +namespace lite { class Conv2DGradInput : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Conv2DGradInput(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Conv2DGradInput(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Conv2DGradInput(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int GetFormat() const; int GetGroup() const; int GetChannelIn() const; @@ -71,6 +69,7 @@ class Conv2DGradInput : public PrimitiveC { void SetHasBias(bool has_bias); void SetActivationType(int activation_type); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_CONV2_D_GRAD_INPUT_H_ diff --git a/mindspore/lite/src/ops/convolution_depthwise.cc b/mindspore/lite/src/ops/convolution_depthwise.cc deleted file mode 100644 index 2b8c11ba39..0000000000 --- a/mindspore/lite/src/ops/convolution_depthwise.cc +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" - -namespace mindspore::lite { -int DepthwiseConv2D::InferShape(std::vector inputs_, std::vector outputs_) { - if (inputs_.size() != kDoubleNum && inputs_.size() != kMultiNum) { - MS_LOG(ERROR) << "inputs number is invalid"; - return RET_INPUT_TENSOR_ERROR; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "output number is invalid"; - return RET_INPUT_TENSOR_ERROR; - } - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto weight = inputs_.at(1); - MS_ASSERT(weight != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - auto in_shape = input->shape(); - int input_h = in_shape.at(1); - int input_w = in_shape.at(2); - int input_channel = in_shape.at(3); - int output_w = 0, output_h = 0; - - auto conv_prim = this->primitive->value_as_DepthwiseConv2D(); - pad_l_ = conv_prim->padLeft(); - pad_u_ = conv_prim->padUp(); - pad_d_ = conv_prim->padDown(); - pad_r_ = conv_prim->padRight(); - if (conv_prim->padMode() == schema::PadMode_SAME) { - output_h = std::ceil(static_cast(input_h) / static_cast(conv_prim->strideH())); - output_w = std::ceil(static_cast(input_w) / static_cast(conv_prim->strideW())); - auto pad_h_all = - ((output_h - 1) * conv_prim->strideH() + (conv_prim->kernelH() - 1) * conv_prim->dilateH() + 1 - input_h); - auto pad_w_all = - ((output_w - 1) * conv_prim->strideW() + (conv_prim->kernelW() - 1) * conv_prim->dilateW() + 1 - input_w); - pad_u_ = pad_h_all / 2; - pad_d_ = pad_h_all - pad_u_; - pad_l_ = pad_w_all / 2; - pad_r_ = pad_w_all - pad_l_; - } else { - output_h = - std::ceil((static_cast(input_h) + pad_u_ + pad_d_ - (static_cast(conv_prim->kernelH()) - 1) * - static_cast(conv_prim->dilateH())) / static_cast(conv_prim->strideH())); - output_w = - std::ceil((static_cast(input_w) + pad_l_ + pad_r_ - (static_cast(conv_prim->kernelW()) - 1) * - static_cast(conv_prim->dilateW())) / static_cast(conv_prim->strideW())); - } - std::vector out_shape{input->shape()}; - out_shape.at(1) = output_h; - out_shape.at(2) = output_w; - if (conv_prim->channelMultiplier() * input_channel != weight->shape()[0]) { - MS_LOG(ERROR) << "Conv depthwise only support group equals output channel."; - return RET_ERROR; - } - out_shape.at(3) = weight->shape()[0] * weight->shape()[3]; // in_channel * out_channel - - output->set_shape(out_shape); - output->SetFormat(input->GetFormat()); - output->set_data_type(input->data_type()); - return RET_OK; -} -} // namespace mindspore::lite - diff --git a/mindspore/lite/c_ops/cos.h b/mindspore/lite/src/ops/cos.h similarity index 72% rename from mindspore/lite/c_ops/cos.h rename to mindspore/lite/src/ops/cos.h index d2f3b84a80..25f199c104 100644 --- a/mindspore/lite/c_ops/cos.h +++ b/mindspore/lite/src/ops/cos.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic_self.h" +#include "src/ops/arithmetic_self.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_COS_H_ namespace mindspore { +namespace lite { class Cos : public ArithmeticSelf { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Cos(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - explicit Cos(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} -#endif + explicit Cos(OriginPrimitive *primitive) : ArithmeticSelf(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_COS_H_ diff --git a/mindspore/lite/src/ops/crop.cc b/mindspore/lite/src/ops/crop.cc index c1b2bcfb49..8c5ddb7cb7 100644 --- a/mindspore/lite/src/ops/crop.cc +++ b/mindspore/lite/src/ops/crop.cc @@ -14,17 +14,32 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/crop.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +long Crop::GetAxis() const { return this->primitive->value.AsCrop()->axis; } +std::vector Crop::GetOffsets() const { return this->primitive->value.AsCrop()->offsets; } + +void Crop::SetAxis(long axis) { this->primitive->value.AsCrop()->axis = axis; } +void Crop::SetOffsets(const std::vector &offsets) { this->primitive->value.AsCrop()->offsets = offsets; } + +#else + +long Crop::GetAxis() const { return this->primitive->value_as_Crop()->axis(); } +std::vector Crop::GetOffsets() const { + auto fb_vector = this->primitive->value_as_Crop()->offsets(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void Crop::SetAxis(long axis) {} +void Crop::SetOffsets(const std::vector &offsets) {} +#endif namespace { constexpr int kCropOutputNum = 1; constexpr int kCropInputNum = 2; } // namespace - int Crop::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive != nullptr); if (outputs.size() != kCropOutputNum || inputs.size() != kCropInputNum) { @@ -34,7 +49,7 @@ int Crop::InferShape(std::vector inputs, std::vectorset_shape(inputs[1]->shape()); outputs[0]->SetFormat(inputs[0]->GetFormat()); outputs[0]->set_data_type(inputs[0]->data_type()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/crop.h b/mindspore/lite/src/ops/crop.h similarity index 84% rename from mindspore/lite/c_ops/crop.h rename to mindspore/lite/src/ops/crop.h index c7d2c32da4..87a6fb2dbd 100644 --- a/mindspore/lite/c_ops/crop.h +++ b/mindspore/lite/src/ops/crop.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_CROP_H_ namespace mindspore { +namespace lite { class Crop : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Crop(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Crop(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Crop(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; long GetAxis() const; std::vector GetOffsets() const; void SetAxis(long axis); void SetOffsets(const std::vector &offsets); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_CROP_H_ diff --git a/mindspore/lite/c_ops/deconv2d.cc b/mindspore/lite/src/ops/deconv2d.cc similarity index 99% rename from mindspore/lite/c_ops/deconv2d.cc rename to mindspore/lite/src/ops/deconv2d.cc index f65ad24326..972c0a60f7 100644 --- a/mindspore/lite/c_ops/deconv2d.cc +++ b/mindspore/lite/src/ops/deconv2d.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/deconv2d.h" +#include "src/ops/deconv2d.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int DeConv2D::GetFormat() const { return this->primitive->value.AsDeConv2D()->format; } int DeConv2D::GetGroup() const { return this->primitive->value.AsDeConv2D()->group; } @@ -141,4 +142,5 @@ int DeConv2D::InferShape(std::vector inputs_, std::vecto output->set_data_type(input->data_type()); return 0; } +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/deconv2d.h b/mindspore/lite/src/ops/deconv2d.h similarity index 91% rename from mindspore/lite/c_ops/deconv2d.h rename to mindspore/lite/src/ops/deconv2d.h index b94e1a2a4f..de20b0ab73 100644 --- a/mindspore/lite/c_ops/deconv2d.h +++ b/mindspore/lite/src/ops/deconv2d.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_DE_CONV2_D_H_ namespace mindspore { +namespace lite { class DeConv2D : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit DeConv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit DeConv2D(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit DeConv2D(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetGroup() const; @@ -83,6 +81,7 @@ class DeConv2D : public PrimitiveC { int pad_l_ = 0; int pad_r_ = 0; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_DE_CONV2_D_H_ diff --git a/mindspore/lite/src/ops/deconvolution.cc b/mindspore/lite/src/ops/deconvolution.cc deleted file mode 100644 index effcef756a..0000000000 --- a/mindspore/lite/src/ops/deconvolution.cc +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" - -namespace mindspore::lite { -int DeConv2D::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto weight = inputs_.at(1); - MS_ASSERT(weight != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - int32_t input_h = input->Height(); - int32_t input_w = input->Width(); - - int32_t output_n = input->Batch(); - int32_t output_h = 0; - int32_t output_w = 0; - int32_t output_c = weight->Channel(); - - auto deconv = GetAttribute(); - int kernel_w = deconv->kernelW(); - int kernel_h = deconv->kernelH(); - int stride_w = deconv->strideW(); - int stride_h = deconv->strideH(); - int dilate_w = deconv->dilateW(); - int dilate_h = deconv->dilateH(); - pad_l_ = deconv->padLeft(); - pad_u_ = deconv->padUp(); - pad_d_ = deconv->padDown(); - pad_r_ = deconv->padRight(); - schema::PadMode pad_mode = deconv->padMode(); - - if (pad_mode == schema::PadMode_CAFFE) { - output_h = (input_h - 1) * stride_h + ((kernel_h - 1) * dilate_h + 1) - pad_u_ - pad_d_; - output_w = (input_w - 1) * stride_w + ((kernel_w - 1) * dilate_w + 1) - pad_l_ - pad_r_; - } else if (pad_mode == schema::PadMode_SAME) { - output_h = input_h * stride_h; - output_w = input_w * stride_w; - } else if (pad_mode == schema::PadMode_VALID) { - output_h = (input_h - 1) * stride_h + kernel_h; - output_w = (input_w - 1) * stride_w + kernel_w; - } else { - MS_LOG(ERROR) << "unsupported pad mode for deconv"; - } - - std::vector out_shape = {output_n, output_h, output_w, output_c}; - output->set_shape(out_shape); - output->SetFormat(input->GetFormat()); - output->set_data_type(input->data_type()); - return RET_OK; -} -} // namespace mindspore::lite diff --git a/mindspore/lite/src/ops/deconvolution_depthwise.cc b/mindspore/lite/src/ops/deconvolution_depthwise.cc deleted file mode 100644 index 4251ad6aad..0000000000 --- a/mindspore/lite/src/ops/deconvolution_depthwise.cc +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" - -namespace mindspore::lite { -int DeconvDepthwiseConv2D::InferShape(std::vector inputs_, std::vector outputs_) { - if (inputs_.size() != kDoubleNum && inputs_.size() != kMultiNum) { - MS_LOG(ERROR) << "inputs number is invalid"; - return RET_INPUT_TENSOR_ERROR; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "output number is invalid"; - return RET_INPUT_TENSOR_ERROR; - } - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto weight = inputs_.at(1); - MS_ASSERT(weight != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - auto in_shape = input->shape(); - int input_h = in_shape.at(1); - int input_w = in_shape.at(2); - int input_channel = in_shape.at(3); - int output_w = 0, output_h = 0; - - auto conv_prim = this->primitive->value_as_DeDepthwiseConv2D(); - pad_l_ = conv_prim->padLeft(); - pad_u_ = conv_prim->padUp(); - pad_d_ = conv_prim->padDown(); - pad_r_ = conv_prim->padRight(); - output_h = conv_prim->strideH() * (input_h - 1) + conv_prim->kernelH() - pad_u_ - pad_d_; - output_w = conv_prim->strideW() * (input_w - 1) + conv_prim->kernelW() - pad_l_ - pad_r_; - if ((output_h + conv_prim->padUp() + conv_prim->padDown() - conv_prim->kernelH()) % conv_prim->strideH() != 0) { - output_h += (output_h + conv_prim->padLeft() + conv_prim->padRight() - conv_prim->kernelH()) % conv_prim->strideH(); - } - if ((output_w + conv_prim->padLeft() + conv_prim->padRight() - conv_prim->kernelW()) % conv_prim->strideW() != 0) { - output_w += (output_w + conv_prim->padLeft() + conv_prim->padRight() - conv_prim->kernelW()) % conv_prim->strideW(); - } - std::vector out_shape{input->shape()}; - out_shape.at(1) = output_h; - out_shape.at(2) = output_w; - if (conv_prim->channelMultiplier() * input_channel != weight->shape()[0]) { - MS_LOG(ERROR) << "Conv depthwise only support group equals output channel."; - return RET_ERROR; - } - out_shape.at(3) = weight->shape()[0] * weight->shape()[3]; // in_channel * out_channel - - output->set_shape(out_shape); - output->SetFormat(input->GetFormat()); - output->set_data_type(input->data_type()); - return RET_OK; -} -} // namespace mindspore::lite - diff --git a/mindspore/lite/c_ops/dedepthwise_conv2d.cc b/mindspore/lite/src/ops/dedepthwise_conv2d.cc similarity index 99% rename from mindspore/lite/c_ops/dedepthwise_conv2d.cc rename to mindspore/lite/src/ops/dedepthwise_conv2d.cc index 17c813a502..81548ea462 100644 --- a/mindspore/lite/c_ops/dedepthwise_conv2d.cc +++ b/mindspore/lite/src/ops/dedepthwise_conv2d.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/dedepthwise_conv2d.h" +#include "src/ops/dedepthwise_conv2d.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int DeDepthwiseConv2D::GetFormat() const { return this->primitive->value.AsDeDepthwiseConv2D()->format; } int DeDepthwiseConv2D::GetChannelIn() const { return this->primitive->value.AsDeDepthwiseConv2D()->channelIn; } @@ -158,4 +159,5 @@ int DeDepthwiseConv2D::InferShape(std::vector inputs_, output->set_data_type(input->data_type()); return 0; } +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/dedepthwise_conv2d.h b/mindspore/lite/src/ops/dedepthwise_conv2d.h similarity index 90% rename from mindspore/lite/c_ops/dedepthwise_conv2d.h rename to mindspore/lite/src/ops/dedepthwise_conv2d.h index e77e281797..ed317016f1 100644 --- a/mindspore/lite/c_ops/dedepthwise_conv2d.h +++ b/mindspore/lite/src/ops/dedepthwise_conv2d.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_DE_DEPTHWISE_CONV2_D_H_ namespace mindspore { +namespace lite { class DeDepthwiseConv2D : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit DeDepthwiseConv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit DeDepthwiseConv2D(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit DeDepthwiseConv2D(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetChannelIn() const; @@ -81,6 +79,7 @@ class DeDepthwiseConv2D : public PrimitiveC { int pad_l_ = 0; int pad_r_ = 0; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_DE_DEPTHWISE_CONV2_D_H_ diff --git a/mindspore/lite/src/ops/depth_to_space.cc b/mindspore/lite/src/ops/depth_to_space.cc index f09fddfb58..4bfcbf369e 100644 --- a/mindspore/lite/src/ops/depth_to_space.cc +++ b/mindspore/lite/src/ops/depth_to_space.cc @@ -14,50 +14,64 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/depth_to_space.h" +#include "src/common/common.h" +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int DepthToSpace::GetBlockSize() const { return this->primitive->value.AsDepthToSpace()->blockSize; } +int DepthToSpace::GetFormat() const { return this->primitive->value.AsDepthToSpace()->format; } -namespace mindspore::lite { +void DepthToSpace::SetBlockSize(int block_size) { this->primitive->value.AsDepthToSpace()->blockSize = block_size; } +void DepthToSpace::SetFormat(int format) { this->primitive->value.AsDepthToSpace()->format = format; } + +#else + +int DepthToSpace::GetBlockSize() const { return this->primitive->value_as_DepthToSpace()->blockSize(); } +int DepthToSpace::GetFormat() const { return this->primitive->value_as_DepthToSpace()->format(); } + +void DepthToSpace::SetBlockSize(int block_size) {} +void DepthToSpace::SetFormat(int format) {} +#endif namespace { constexpr int kDepthToSpaceOutputNum = 1; constexpr int kDepthToSpaceInputNum = 1; } // namespace -int DepthToSpace::InferShape(std::vector inputs, std::vector outputs) { +int DepthToSpace::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive != nullptr); if (outputs.size() != kDepthToSpaceOutputNum || inputs.size() != kDepthToSpaceInputNum) { MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return RET_PARAM_INVALID; + return 1; } auto input = inputs.at(0); if (input->GetFormat() != schema::Format_NHWC) { MS_LOG(ERROR) << "depth_to_space only support NHWC now!"; - return RET_FORMAT_ERR; + return 1; } auto input_shape = input->shape(); if (input_shape.size() != kDimension_4d) { MS_LOG(ERROR) << "input shape dimension size should == " << kDimension_4d; - return RET_PARAM_INVALID; + return 1; } - auto prim = this->primitive->value_as_DepthToSpace(); - int32_t block_size = prim->blockSize(); - if (input_shape[kNHWC_c_index] % (block_size * block_size) != 0 || input_shape[kNHWC_c_index] == 0) { - MS_LOG(ERROR) << "input dimension c size " << input_shape[kNHWC_c_index] << " should be mulitple of block_size(" + + int32_t block_size = GetBlockSize(); + if (input_shape[NHWC_C] % (block_size * block_size) != 0 || input_shape[NHWC_C] == 0) { + MS_LOG(ERROR) << "input dimension c size " << input_shape[NHWC_C] << " should be mulitple of block_size(" << block_size << ") * block_size)!"; - return RET_PARAM_INVALID; + return 1; } std::vector output_shape(input_shape.size()); - output_shape[kNHWC_n_index] = input_shape[kNHWC_n_index]; - output_shape[kNHWC_h_index] = input_shape[kNHWC_h_index] * block_size; - output_shape[kNHWC_w_index] = input_shape[kNHWC_w_index] * block_size; - output_shape[kNHWC_c_index] = input_shape[kNHWC_c_index] / (block_size * block_size); + output_shape[NHWC_N] = input_shape[NHWC_N]; + output_shape[NHWC_H] = input_shape[NHWC_H] * block_size; + output_shape[NHWC_W] = input_shape[NHWC_W] * block_size; + output_shape[NHWC_C] = input_shape[NHWC_C] / (block_size * block_size); outputs[0]->set_shape(output_shape); outputs[0]->set_data_type(input->data_type()); outputs[0]->SetFormat(input->GetFormat()); - return RET_OK; + return 0; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/depth_to_space.h b/mindspore/lite/src/ops/depth_to_space.h similarity index 84% rename from mindspore/lite/c_ops/depth_to_space.h rename to mindspore/lite/src/ops/depth_to_space.h index 1c02c25320..6ab1fc3075 100644 --- a/mindspore/lite/c_ops/depth_to_space.h +++ b/mindspore/lite/src/ops/depth_to_space.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_DEPTH_TO_SPACE_H_ namespace mindspore { +namespace lite { class DepthToSpace : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit DepthToSpace(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit DepthToSpace(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit DepthToSpace(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetBlockSize() const; int GetFormat() const; void SetBlockSize(int block_size); void SetFormat(int format); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_DEPTH_TO_SPACE_H_ diff --git a/mindspore/lite/c_ops/depthwise_conv2d.cc b/mindspore/lite/src/ops/depthwise_conv2d.cc similarity index 99% rename from mindspore/lite/c_ops/depthwise_conv2d.cc rename to mindspore/lite/src/ops/depthwise_conv2d.cc index beb6e8ddf8..0cfb482bbd 100644 --- a/mindspore/lite/c_ops/depthwise_conv2d.cc +++ b/mindspore/lite/src/ops/depthwise_conv2d.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/depthwise_conv2d.h" +#include "src/ops/depthwise_conv2d.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int DepthwiseConv2D::GetFormat() const { return this->primitive->value.AsDepthwiseConv2D()->format; } int DepthwiseConv2D::GetChannelIn() const { return this->primitive->value.AsDepthwiseConv2D()->channelIn; } @@ -161,4 +162,5 @@ int DepthwiseConv2D::InferShape(std::vector inputs_, output->set_data_type(input->data_type()); return 0; } +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/depthwise_conv2d.h b/mindspore/lite/src/ops/depthwise_conv2d.h similarity index 90% rename from mindspore/lite/c_ops/depthwise_conv2d.h rename to mindspore/lite/src/ops/depthwise_conv2d.h index 5de8d7224d..b61505feee 100644 --- a/mindspore/lite/c_ops/depthwise_conv2d.h +++ b/mindspore/lite/src/ops/depthwise_conv2d.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_DEPTHWISE_CONV2_D_H_ namespace mindspore { +namespace lite { class DepthwiseConv2D : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit DepthwiseConv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit DepthwiseConv2D(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit DepthwiseConv2D(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetChannelIn() const; @@ -81,6 +79,7 @@ class DepthwiseConv2D : public PrimitiveC { int pad_l_ = 0; int pad_r_ = 0; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_DEPTHWISE_CONV2_D_H_ diff --git a/mindspore/lite/c_ops/detection_post_process.cc b/mindspore/lite/src/ops/detection_post_process.cc similarity index 98% rename from mindspore/lite/c_ops/detection_post_process.cc rename to mindspore/lite/src/ops/detection_post_process.cc index ca75aafc93..b1256a7840 100644 --- a/mindspore/lite/c_ops/detection_post_process.cc +++ b/mindspore/lite/src/ops/detection_post_process.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/detection_post_process.h" +#include "src/ops/detection_post_process.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int DetectionPostProcess::GetFormat() const { return this->primitive->value.AsDetectionPostProcess()->format; } int DetectionPostProcess::GetInputSize() const { return this->primitive->value.AsDetectionPostProcess()->inputSize; } @@ -128,4 +129,5 @@ void DetectionPostProcess::SetMaxClassesPreDetection(long max_classes_pre_detect void DetectionPostProcess::SetNumClasses(long num_classes) {} void DetectionPostProcess::SetUseRegularNms(bool use_regular_nms) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/detection_post_process.h b/mindspore/lite/src/ops/detection_post_process.h similarity index 88% rename from mindspore/lite/c_ops/detection_post_process.h rename to mindspore/lite/src/ops/detection_post_process.h index 6be3feb61b..2aa87f656b 100644 --- a/mindspore/lite/c_ops/detection_post_process.h +++ b/mindspore/lite/src/ops/detection_post_process.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_DETECTION_POST_PROCESS_H_ namespace mindspore { +namespace lite { class DetectionPostProcess : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit DetectionPostProcess(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit DetectionPostProcess(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit DetectionPostProcess(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int GetFormat() const; int GetInputSize() const; float GetHScale() const; @@ -63,6 +61,7 @@ class DetectionPostProcess : public PrimitiveC { void SetNumClasses(long num_classes); void SetUseRegularNms(bool use_regular_nms); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_DETECTION_POST_PROCESS_H_ diff --git a/mindspore/lite/c_ops/div.cc b/mindspore/lite/src/ops/div.cc similarity index 94% rename from mindspore/lite/c_ops/div.cc rename to mindspore/lite/src/ops/div.cc index dc378c522d..cd042ab509 100644 --- a/mindspore/lite/c_ops/div.cc +++ b/mindspore/lite/src/ops/div.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/div.h" +#include "src/ops/div.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int Div::GetActivationType() const { return this->primitive->value.AsDiv()->activationType; } @@ -30,4 +31,5 @@ int Div::GetActivationType() const { return this->primitive->value_as_Div()->act void Div::SetActivationType(int activation_type) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/div.h b/mindspore/lite/src/ops/div.h similarity index 77% rename from mindspore/lite/c_ops/div.h rename to mindspore/lite/src/ops/div.h index fa4efffe7f..b6e0ca344f 100644 --- a/mindspore/lite/c_ops/div.h +++ b/mindspore/lite/src/ops/div.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" @@ -30,17 +30,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_DIV_H_ namespace mindspore { +namespace lite { class Div : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Div(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit Div(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif - int InferShape(std::vector inputs_, std::vector outputs_) override; + explicit Div(OriginPrimitive *primitive) : Arithmetic(primitive) {} + int GetActivationType() const; void SetActivationType(int activation_type); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_DIV_H_ diff --git a/mindspore/lite/c_ops/dropout.cc b/mindspore/lite/src/ops/dropout.cc similarity index 93% rename from mindspore/lite/c_ops/dropout.cc rename to mindspore/lite/src/ops/dropout.cc index 97b26d46d8..83835811c7 100644 --- a/mindspore/lite/c_ops/dropout.cc +++ b/mindspore/lite/src/ops/dropout.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/dropout.h" +#include "src/ops/dropout.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE float Dropout::GetRatio() const { return this->primitive->value.AsDropout()->ratio; } @@ -28,4 +29,5 @@ float Dropout::GetRatio() const { return this->primitive->value_as_Dropout()->ra void Dropout::SetRatio(float ratio) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/dropout.h b/mindspore/lite/src/ops/dropout.h similarity index 82% rename from mindspore/lite/c_ops/dropout.h rename to mindspore/lite/src/ops/dropout.h index 0a781162c2..a59302e89b 100644 --- a/mindspore/lite/c_ops/dropout.h +++ b/mindspore/lite/src/ops/dropout.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,16 +29,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_DROPOUT_H_ namespace mindspore { +namespace lite { class Dropout : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Dropout(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Dropout(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Dropout(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + float GetRatio() const; void SetRatio(float ratio); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_DROPOUT_H_ diff --git a/mindspore/lite/c_ops/eltwise.cc b/mindspore/lite/src/ops/eltwise.cc similarity index 93% rename from mindspore/lite/c_ops/eltwise.cc rename to mindspore/lite/src/ops/eltwise.cc index 882a54f8ac..95eafc7cfb 100644 --- a/mindspore/lite/c_ops/eltwise.cc +++ b/mindspore/lite/src/ops/eltwise.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/eltwise.h" +#include "src/ops/eltwise.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int Eltwise::GetMode() const { return this->primitive->value.AsEltwise()->mode; } @@ -28,4 +29,5 @@ int Eltwise::GetMode() const { return this->primitive->value_as_Eltwise()->mode( void Eltwise::SetMode(int mode) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/eltwise.h b/mindspore/lite/src/ops/eltwise.h similarity index 82% rename from mindspore/lite/c_ops/eltwise.h rename to mindspore/lite/src/ops/eltwise.h index 4354018566..9ea6ac81f9 100644 --- a/mindspore/lite/c_ops/eltwise.h +++ b/mindspore/lite/src/ops/eltwise.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,16 +29,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_ELTWISE_H_ namespace mindspore { +namespace lite { class Eltwise : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Eltwise(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Eltwise(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Eltwise(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int GetMode() const; void SetMode(int mode); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_ELTWISE_H_ diff --git a/mindspore/lite/c_ops/elu.cc b/mindspore/lite/src/ops/elu.cc similarity index 93% rename from mindspore/lite/c_ops/elu.cc rename to mindspore/lite/src/ops/elu.cc index 955af3823e..2c2ad09d69 100644 --- a/mindspore/lite/c_ops/elu.cc +++ b/mindspore/lite/src/ops/elu.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/elu.h" +#include "src/ops/elu.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE float Elu::GetAlpha() const { return this->primitive->value.AsElu()->alpha; } @@ -28,4 +29,5 @@ float Elu::GetAlpha() const { return this->primitive->value_as_Elu()->alpha(); } void Elu::SetAlpha(float alpha) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/elu.h b/mindspore/lite/src/ops/elu.h similarity index 82% rename from mindspore/lite/c_ops/elu.h rename to mindspore/lite/src/ops/elu.h index 7160256097..e85acee30d 100644 --- a/mindspore/lite/c_ops/elu.h +++ b/mindspore/lite/src/ops/elu.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,16 +29,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_ELU_H_ namespace mindspore { +namespace lite { class Elu : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Elu(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Elu(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Elu(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + float GetAlpha() const; void SetAlpha(float alpha); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_ELU_H_ diff --git a/mindspore/lite/src/ops/embedding_lookup.cc b/mindspore/lite/src/ops/embedding_lookup.cc index 3a25197611..0653f1d8c1 100644 --- a/mindspore/lite/src/ops/embedding_lookup.cc +++ b/mindspore/lite/src/ops/embedding_lookup.cc @@ -14,39 +14,44 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "src/ir/tensor.h" -#include "utils/log_adapter.h" +#include "src/ops/embedding_lookup.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +float EmbeddingLookup::GetMaxNorm() const { return this->primitive->value.AsEmbeddingLookup()->maxNorm; } + +void EmbeddingLookup::SetMaxNorm(float max_norm) { this->primitive->value.AsEmbeddingLookup()->maxNorm = max_norm; } + +#else + +float EmbeddingLookup::GetMaxNorm() const { return this->primitive->value_as_EmbeddingLookup()->maxNorm(); } + +void EmbeddingLookup::SetMaxNorm(float max_norm) {} +#endif -namespace mindspore::lite { int EmbeddingLookup::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); if (inputs_.size() < kDoubleNum) { MS_LOG(ERROR) << "Embedding Lookup should have at least two inputs"; return RET_INPUT_TENSOR_ERROR; } - if (outputs_.size() != kSingleNum) { MS_LOG(ERROR) << "Embedding Lookup should have one outputs"; return RET_INPUT_TENSOR_ERROR; } - auto params_ = inputs_.front(); MS_ASSERT(params_ != nullptr); auto ids = inputs_.back(); MS_ASSERT(ids != nullptr); auto output = outputs_.front(); MS_ASSERT(output != nullptr); - auto embedding_shape = params_->shape(); embedding_shape.erase(embedding_shape.begin()); - std::vector output_shape(ids->shape()); for (size_t i = 0; i < embedding_shape.size(); ++i) { output_shape.push_back(embedding_shape.at(i)); } - for (int i = 1; i < inputs_.size() - 1; ++i) { auto embedding_shape_t = inputs_.at(i)->shape(); embedding_shape_t.erase(embedding_shape_t.begin()); @@ -55,9 +60,9 @@ int EmbeddingLookup::InferShape(std::vector inputs_, std::vect return RET_INPUT_TENSOR_ERROR; } } - output->set_shape(output_shape); output->set_data_type(params_->data_type()); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/embedding_lookup.h b/mindspore/lite/src/ops/embedding_lookup.h similarity index 83% rename from mindspore/lite/c_ops/embedding_lookup.h rename to mindspore/lite/src/ops/embedding_lookup.h index 744befbac6..76105f3f84 100644 --- a/mindspore/lite/c_ops/embedding_lookup.h +++ b/mindspore/lite/src/ops/embedding_lookup.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_H_ namespace mindspore { +namespace lite { class EmbeddingLookup : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit EmbeddingLookup(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit EmbeddingLookup(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit EmbeddingLookup(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; float GetMaxNorm() const; void SetMaxNorm(float max_norm); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_H_ diff --git a/mindspore/lite/c_ops/embedding_lookup_sparse.cc b/mindspore/lite/src/ops/embedding_lookup_sparse.cc similarity index 96% rename from mindspore/lite/c_ops/embedding_lookup_sparse.cc rename to mindspore/lite/src/ops/embedding_lookup_sparse.cc index 284bd0fa86..c443af5353 100644 --- a/mindspore/lite/c_ops/embedding_lookup_sparse.cc +++ b/mindspore/lite/src/ops/embedding_lookup_sparse.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/embedding_lookup_sparse.h" +#include "src/ops/embedding_lookup_sparse.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE std::vector EmbeddingLookupSparse::GetSpIds() const { return this->primitive->value.AsEmbeddingLookupSparse()->spIds; @@ -54,4 +55,5 @@ void EmbeddingLookupSparse::SetSpIds(const std::vector &sp_ids) {} void EmbeddingLookupSparse::SetSpWeights(const std::vector &sp_weights) {} void EmbeddingLookupSparse::SetMaxNortm(float max_nortm) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/embedding_lookup_sparse.h b/mindspore/lite/src/ops/embedding_lookup_sparse.h similarity index 83% rename from mindspore/lite/c_ops/embedding_lookup_sparse.h rename to mindspore/lite/src/ops/embedding_lookup_sparse.h index cf10c2d555..836ae23268 100644 --- a/mindspore/lite/c_ops/embedding_lookup_sparse.h +++ b/mindspore/lite/src/ops/embedding_lookup_sparse.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_SPARSE_H_ namespace mindspore { +namespace lite { class EmbeddingLookupSparse : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit EmbeddingLookupSparse(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit EmbeddingLookupSparse(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit EmbeddingLookupSparse(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + std::vector GetSpIds() const; std::vector GetSpWeights() const; float GetMaxNortm() const; @@ -43,6 +41,7 @@ class EmbeddingLookupSparse : public PrimitiveC { void SetSpWeights(const std::vector &sp_weights); void SetMaxNortm(float max_nortm); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_SPARSE_H_ diff --git a/mindspore/lite/c_ops/equal.h b/mindspore/lite/src/ops/equal.h similarity index 78% rename from mindspore/lite/c_ops/equal.h rename to mindspore/lite/src/ops/equal.h index f535c883d0..1acf5d8713 100644 --- a/mindspore/lite/c_ops/equal.h +++ b/mindspore/lite/src/ops/equal.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,10 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_EQUAL_H_ namespace mindspore { +namespace lite { class Equal : public Arithmetic { public: - explicit Equal(schema::Primitive *primitive) : Arithmetic(primitive) {} + explicit Equal(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_EQUAL_H_ diff --git a/mindspore/lite/c_ops/exp.h b/mindspore/lite/src/ops/exp.h similarity index 72% rename from mindspore/lite/c_ops/exp.h rename to mindspore/lite/src/ops/exp.h index f245b7a3a4..4b6c10a75f 100644 --- a/mindspore/lite/c_ops/exp.h +++ b/mindspore/lite/src/ops/exp.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic_self.h" +#include "src/ops/arithmetic_self.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_EXP_H_ namespace mindspore { +namespace lite { class Exp : public ArithmeticSelf { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Exp(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - explicit Exp(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} -#endif + explicit Exp(OriginPrimitive *primitive) : ArithmeticSelf(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_EXP_H_ diff --git a/mindspore/lite/src/ops/expand_dims.cc b/mindspore/lite/src/ops/expand_dims.cc index 5b0391d654..f959d3c501 100644 --- a/mindspore/lite/src/ops/expand_dims.cc +++ b/mindspore/lite/src/ops/expand_dims.cc @@ -14,12 +14,22 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/expand_dims.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int ExpandDims::GetDim() const { return this->primitive->value.AsExpandDims()->dim; } + +void ExpandDims::SetDim(int dim) { this->primitive->value.AsExpandDims()->dim = dim; } + +#else + +int ExpandDims::GetDim() const { return this->primitive->value_as_ExpandDims()->dim(); } + +void ExpandDims::SetDim(int dim) {} +#endif -namespace mindspore::lite { int ExpandDims::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -46,7 +56,7 @@ int ExpandDims::InferShape(std::vector inputs_, std::vectorset_shape(out_shape); output->set_data_type(input->data_type()); output->SetFormat(input->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/expand_dims.h b/mindspore/lite/src/ops/expand_dims.h similarity index 83% rename from mindspore/lite/c_ops/expand_dims.h rename to mindspore/lite/src/ops/expand_dims.h index 931182d064..3a3aa1f6b8 100644 --- a/mindspore/lite/c_ops/expand_dims.h +++ b/mindspore/lite/src/ops/expand_dims.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_EXPAND_DIMS_H_ namespace mindspore { +namespace lite { class ExpandDims : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit ExpandDims(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit ExpandDims(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit ExpandDims(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetDim() const; void SetDim(int dim); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_EXPAND_DIMS_H_ diff --git a/mindspore/lite/c_ops/fake_quant_with_min_max_vars.cc b/mindspore/lite/src/ops/fake_quant_with_min_max_vars.cc similarity index 94% rename from mindspore/lite/c_ops/fake_quant_with_min_max_vars.cc rename to mindspore/lite/src/ops/fake_quant_with_min_max_vars.cc index 957aa1f44a..a6bf9e293b 100644 --- a/mindspore/lite/c_ops/fake_quant_with_min_max_vars.cc +++ b/mindspore/lite/src/ops/fake_quant_with_min_max_vars.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/fake_quant_with_min_max_vars.h" +#include "src/ops/fake_quant_with_min_max_vars.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE bool FakeQuantWithMinMaxVars::GetNarrowRange() const { return this->primitive->value.AsFakeQuantWithMinMaxVars()->narrowRange; @@ -42,4 +43,5 @@ int FakeQuantWithMinMaxVars::GetNumBits() const { void FakeQuantWithMinMaxVars::SetNarrowRange(bool narrow_range) {} void FakeQuantWithMinMaxVars::SetNumBits(int num_bits) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/fake_quant_with_min_max_vars.h b/mindspore/lite/src/ops/fake_quant_with_min_max_vars.h similarity index 82% rename from mindspore/lite/c_ops/fake_quant_with_min_max_vars.h rename to mindspore/lite/src/ops/fake_quant_with_min_max_vars.h index d29ac2c528..a8cea93ccf 100644 --- a/mindspore/lite/c_ops/fake_quant_with_min_max_vars.h +++ b/mindspore/lite/src/ops/fake_quant_with_min_max_vars.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,18 +29,17 @@ #define LITE_MINDSPORE_LITE_C_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_H_ namespace mindspore { +namespace lite { class FakeQuantWithMinMaxVars : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit FakeQuantWithMinMaxVars(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit FakeQuantWithMinMaxVars(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit FakeQuantWithMinMaxVars(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + bool GetNarrowRange() const; int GetNumBits() const; void SetNarrowRange(bool narrow_range); void SetNumBits(int num_bits); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_H_ diff --git a/mindspore/lite/src/ops/fill.cc b/mindspore/lite/src/ops/fill.cc index f4bd0c1952..9e0ed36f53 100644 --- a/mindspore/lite/src/ops/fill.cc +++ b/mindspore/lite/src/ops/fill.cc @@ -14,12 +14,25 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/fill.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector Fill::GetDims() const { return this->primitive->value.AsFill()->dims; } + +void Fill::SetDims(const std::vector &dims) { this->primitive->value.AsFill()->dims = dims; } + +#else + +std::vector Fill::GetDims() const { + auto fb_vector = this->primitive->value_as_Fill()->dims(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void Fill::SetDims(const std::vector &dims) {} +#endif -namespace mindspore::lite { int Fill::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -28,7 +41,6 @@ int Fill::InferShape(std::vector inputs_, std::vector inputs_, std::vectorset_shape(output_shape); output->set_data_type(input->data_type()); output->SetFormat(input->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/fill.h b/mindspore/lite/src/ops/fill.h similarity index 84% rename from mindspore/lite/c_ops/fill.h rename to mindspore/lite/src/ops/fill.h index 5227e13dba..7850d09b8e 100644 --- a/mindspore/lite/c_ops/fill.h +++ b/mindspore/lite/src/ops/fill.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_FILL_H_ namespace mindspore { +namespace lite { class Fill : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Fill(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Fill(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Fill(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetDims() const; void SetDims(const std::vector &dims); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_FILL_H_ diff --git a/mindspore/lite/src/ops/flatten.cc b/mindspore/lite/src/ops/flatten.cc index bde0cd16c5..fbc008f311 100644 --- a/mindspore/lite/src/ops/flatten.cc +++ b/mindspore/lite/src/ops/flatten.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,12 +14,11 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/flatten.h" + +namespace mindspore { +namespace lite { -namespace mindspore::lite { int Flatten::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -28,12 +27,10 @@ int Flatten::InferShape(std::vector inputs_, std::vectorshape(); std::vector output_shape(2); output_shape[0] = input_shape[0]; @@ -44,7 +41,7 @@ int Flatten::InferShape(std::vector inputs_, std::vectorset_shape(output_shape); output->set_data_type(input->data_type()); output->SetFormat(input->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/flatten.h b/mindspore/lite/src/ops/flatten.h similarity index 74% rename from mindspore/lite/c_ops/flatten.h rename to mindspore/lite/src/ops/flatten.h index b373340198..0c0023809c 100644 --- a/mindspore/lite/c_ops/flatten.h +++ b/mindspore/lite/src/ops/flatten.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,15 +29,14 @@ #define LITE_MINDSPORE_LITE_C_OPS_FLATTEN_H_ namespace mindspore { +namespace lite { class Flatten : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Flatten(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Flatten(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Flatten(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_FLATTEN_H_ diff --git a/mindspore/lite/c_ops/floor.h b/mindspore/lite/src/ops/floor.h similarity index 72% rename from mindspore/lite/c_ops/floor.h rename to mindspore/lite/src/ops/floor.h index daf59f7783..7f61378c77 100644 --- a/mindspore/lite/c_ops/floor.h +++ b/mindspore/lite/src/ops/floor.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic_self.h" +#include "src/ops/arithmetic_self.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_FLOOR_H_ namespace mindspore { +namespace lite { class Floor : public ArithmeticSelf { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Floor(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - explicit Floor(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} -#endif + explicit Floor(OriginPrimitive *primitive) : ArithmeticSelf(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_FLOOR_H_ diff --git a/mindspore/lite/c_ops/floor_div.h b/mindspore/lite/src/ops/floor_div.h similarity index 73% rename from mindspore/lite/c_ops/floor_div.h rename to mindspore/lite/src/ops/floor_div.h index a73f5379d9..4b187426b4 100644 --- a/mindspore/lite/c_ops/floor_div.h +++ b/mindspore/lite/src/ops/floor_div.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_FLOOR_DIV_H_ namespace mindspore { +namespace lite { class FloorDiv : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit FloorDiv(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit FloorDiv(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit FloorDiv(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_FLOOR_DIV_H_ diff --git a/mindspore/lite/c_ops/floor_mod.h b/mindspore/lite/src/ops/floor_mod.h similarity index 73% rename from mindspore/lite/c_ops/floor_mod.h rename to mindspore/lite/src/ops/floor_mod.h index c2b4f2236a..d9e78e10b7 100644 --- a/mindspore/lite/c_ops/floor_mod.h +++ b/mindspore/lite/src/ops/floor_mod.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_FLOOR_MOD_H_ namespace mindspore { +namespace lite { class FloorMod : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit FloorMod(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit FloorMod(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit FloorMod(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_FLOOR_MOD_H_ diff --git a/mindspore/lite/c_ops/full_connection.cc b/mindspore/lite/src/ops/full_connection.cc similarity index 85% rename from mindspore/lite/c_ops/full_connection.cc rename to mindspore/lite/src/ops/full_connection.cc index 4a59469638..69b5c4417e 100644 --- a/mindspore/lite/c_ops/full_connection.cc +++ b/mindspore/lite/src/ops/full_connection.cc @@ -14,27 +14,33 @@ * limitations under the License. */ -#include "c_ops/full_connection.h" +#include "src/ops/full_connection.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE bool FullConnection::GetHasBias() const { return this->primitive->value.AsFullConnection()->hasBias; } int FullConnection::GetAxis() const { return this->primitive->value.AsFullConnection()->axis; } bool FullConnection::GetUseAxis() const { return this->primitive->value.AsFullConnection()->useAxis; } +int FullConnection::GetActivationType() const { return this->primitive->value.AsFullConnection()->activationType(); } void FullConnection::SetHasBias(bool has_bias) { this->primitive->value.AsFullConnection()->hasBias = has_bias; } void FullConnection::SetAxis(int axis) { this->primitive->value.AsFullConnection()->axis = axis; } void FullConnection::SetUseAxis(bool use_axis) { this->primitive->value.AsFullConnection()->useAxis = use_axis; } - +void FullConnection::SetActivationType(int activationType) { + his->primitive->value.AsFullConnection()->activationType = (schema::ActivationType)activationType; +} #else bool FullConnection::GetHasBias() const { return this->primitive->value_as_FullConnection()->hasBias(); } int FullConnection::GetAxis() const { return this->primitive->value_as_FullConnection()->axis(); } bool FullConnection::GetUseAxis() const { return this->primitive->value_as_FullConnection()->useAxis(); } +int FullConnection::GetActivationType() const { return this->primitive->value_as_FullConnection()->activationType(); } void FullConnection::SetHasBias(bool has_bias) {} void FullConnection::SetAxis(int axis) {} void FullConnection::SetUseAxis(bool use_axis) {} +void FullConnection::SetActivationType(int activationType) {} #endif int FullConnection::InferShape(std::vector inputs_, std::vector outputs_) { @@ -77,4 +83,5 @@ int FullConnection::InferShape(std::vector inputs_, return 0; } +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/full_connection.h b/mindspore/lite/src/ops/full_connection.h similarity index 84% rename from mindspore/lite/c_ops/full_connection.h rename to mindspore/lite/src/ops/full_connection.h index 9d3732988d..5c971bac78 100644 --- a/mindspore/lite/c_ops/full_connection.h +++ b/mindspore/lite/src/ops/full_connection.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,21 +29,22 @@ #define LITE_MINDSPORE_LITE_C_OPS_FULL_CONNECTION_H_ namespace mindspore { +namespace lite { class FullConnection : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit FullConnection(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit FullConnection(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit FullConnection(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; bool GetHasBias() const; int GetAxis() const; bool GetUseAxis() const; + int GetActivationType() const; void SetHasBias(bool has_bias); void SetAxis(int axis); void SetUseAxis(bool use_axis); + void SetActivationType(int activationType); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_FULL_CONNECTION_H_ diff --git a/mindspore/lite/src/ops/fullconnection.cc b/mindspore/lite/src/ops/fullconnection.cc deleted file mode 100644 index 4e32bc66d3..0000000000 --- a/mindspore/lite/src/ops/fullconnection.cc +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" - -namespace mindspore::lite { -int FullConnection::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input0 = inputs_.front(); - MS_ASSERT(input0 != nullptr); - auto input1 = inputs_[1]; - MS_ASSERT(input1 != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - auto fc_prim = this->primitive->value_as_FullConnection(); - if ((fc_prim->hasBias() && inputs_.size() != kMultiNum) || (!fc_prim->hasBias() && inputs_.size() != kDoubleNum)) { - MS_LOG(ERROR) << "Input tensors num error"; - return RET_INPUT_TENSOR_ERROR; - } - auto axis = fc_prim->axis(); - auto use_axis = fc_prim->useAxis(); - if (use_axis && (axis < 1 || axis >= input0->shape().size())) { - MS_LOG(ERROR) << "FullConnection axis invalid"; - return RET_INPUT_TENSOR_ERROR; - } - int new_k = 1; - if (use_axis) { - for (int i = axis; i < input0->shape().size(); ++i) { - new_k *= input0->shape()[i]; - } - if (new_k != input1->shape()[1]) { - MS_LOG(ERROR) << "Input1 size invalid"; - return RET_PARAM_INVALID; - } - } else { - new_k = input1->shape()[1]; - } - - if (fc_prim->hasBias()) { - if (inputs_[2]->shape()[0] != input1->shape()[0]) { - MS_LOG(ERROR) << "bias size invalid"; - return RET_PARAM_INVALID; - } - } - std::vector out_shape{inputs_[0]->shape()}; - if (use_axis) { - out_shape.resize(fc_prim->axis() + 1); - out_shape[fc_prim->axis()] = input1->shape()[0]; - } else { - int total = 1; - for (int i = 0; i < input0->shape().size(); ++i) { - total *= input0->shape()[i]; - } - out_shape.resize(2); - auto batch_size = total / new_k; - out_shape[0] = batch_size; - out_shape[1] = input1->shape()[0]; - } - output->set_shape(out_shape); - output->set_data_type(input0->data_type()); - output->SetFormat(input0->GetFormat()); - - return RET_OK; -} -} // namespace mindspore::lite diff --git a/mindspore/lite/c_ops/fused_batchnorm.cc b/mindspore/lite/src/ops/fused_batchnorm.cc similarity index 96% rename from mindspore/lite/c_ops/fused_batchnorm.cc rename to mindspore/lite/src/ops/fused_batchnorm.cc index c8fce2de78..5e97cef7d1 100644 --- a/mindspore/lite/c_ops/fused_batchnorm.cc +++ b/mindspore/lite/src/ops/fused_batchnorm.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/fused_batchnorm.h" +#include "src/ops/fused_batchnorm.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE float FusedBatchNorm::GetEpsilon() const { return this->primitive->value.AsFusedBatchNorm()->epsilon; } float FusedBatchNorm::GetMomentum() const { return this->primitive->value.AsFusedBatchNorm()->momentum; } @@ -36,4 +37,5 @@ void FusedBatchNorm::SetEpsilon(float epsilon) {} void FusedBatchNorm::SetMomentum(float momentum) {} void FusedBatchNorm::SetSpatial(int spatial) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/fused_batchnorm.h b/mindspore/lite/src/ops/fused_batchnorm.h similarity index 83% rename from mindspore/lite/c_ops/fused_batchnorm.h rename to mindspore/lite/src/ops/fused_batchnorm.h index 6614e2a43f..17381fdaa5 100644 --- a/mindspore/lite/c_ops/fused_batchnorm.h +++ b/mindspore/lite/src/ops/fused_batchnorm.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_FUSED_BATCH_NORM_H_ namespace mindspore { +namespace lite { class FusedBatchNorm : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit FusedBatchNorm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit FusedBatchNorm(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit FusedBatchNorm(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + float GetEpsilon() const; float GetMomentum() const; int GetSpatial() const; @@ -43,6 +41,7 @@ class FusedBatchNorm : public PrimitiveC { void SetMomentum(float momentum); void SetSpatial(int spatial); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_FUSED_BATCH_NORM_H_ diff --git a/mindspore/lite/src/ops/gather.cc b/mindspore/lite/src/ops/gather.cc index efe58dc3cf..a7730db82f 100644 --- a/mindspore/lite/src/ops/gather.cc +++ b/mindspore/lite/src/ops/gather.cc @@ -14,12 +14,29 @@ * limitations under the License. */ -#include "src/ops/ops.h" +#include "src/ops/gather.h" #include "include/errorcode.h" #include "utils/log_adapter.h" #include "src/ir/tensor.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int Gather::GetAxis() const { return this->primitive->value.AsGather()->axis; } +int Gather::GetBatchDims() const { return this->primitive->value.AsGather()->batchDims; } + +void Gather::SetAxis(int axis) { this->primitive->value.AsGather()->axis = axis; } +void Gather::SetBatchDims(int batch_dims) { this->primitive->value.AsGather()->batchDims = batch_dims; } + +#else + +int Gather::GetAxis() const { return this->primitive->value_as_Gather()->axis(); } +int Gather::GetBatchDims() const { return this->primitive->value_as_Gather()->batchDims(); } + +void Gather::SetAxis(int axis) {} +void Gather::SetBatchDims(int batch_dims) {} +#endif + int Gather::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); if (inputs_.size() != kDoubleNum) { @@ -30,7 +47,6 @@ int Gather::InferShape(std::vector inputs_, std::vector inputs_, std::vectorprimitive->value_as_Gather(); MS_ASSERT(gather_prim != nullptr); - int axis = gather_prim->axis(); int batch_dims = gather_prim->batchDims(); if (axis < 0) { @@ -66,15 +81,13 @@ int Gather::InferShape(std::vector inputs_, std::vector out_shape{in_shape}; out_shape.erase(out_shape.begin() + axis); for (size_t i = 0; i < indices_rank; i++) { out_shape.insert(out_shape.begin() + axis, indices_shape[i]); } - output->set_shape(out_shape); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/gather.h b/mindspore/lite/src/ops/gather.h similarity index 84% rename from mindspore/lite/c_ops/gather.h rename to mindspore/lite/src/ops/gather.h index 67126fca54..a53b0de319 100644 --- a/mindspore/lite/c_ops/gather.h +++ b/mindspore/lite/src/ops/gather.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_GATHER_H_ namespace mindspore { +namespace lite { class Gather : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Gather(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Gather(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Gather(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; int GetBatchDims() const; void SetAxis(int axis); void SetBatchDims(int batch_dims); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_GATHER_H_ diff --git a/mindspore/lite/src/ops/gather_nd.cc b/mindspore/lite/src/ops/gather_nd.cc index 681e2d207b..36eeaacd13 100644 --- a/mindspore/lite/src/ops/gather_nd.cc +++ b/mindspore/lite/src/ops/gather_nd.cc @@ -14,12 +14,22 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/gather_nd.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int GatherNd::GetBatchDims() const { return this->primitive->value.AsGatherNd()->batchDims; } + +void GatherNd::SetBatchDims(int batch_dims) { this->primitive->value.AsGatherNd()->batchDims = batch_dims; } + +#else + +int GatherNd::GetBatchDims() const { return this->primitive->value_as_GatherNd()->batchDims(); } + +void GatherNd::SetBatchDims(int batch_dims) {} +#endif -namespace mindspore::lite { int GatherNd::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); if (inputs_.size() != kDoubleNum) { @@ -30,24 +40,20 @@ int GatherNd::InferShape(std::vector inputs_, std::vectorshape(); int in_rank = in_shape.size(); auto indices_shape = indices->shape(); int indices_rank = indices_shape.size(); - if (indices_shape[indices_rank - 1] > in_rank) { MS_LOG(ERROR) << "Input of indices data is error!"; return RET_ERROR; } - std::vector out_shape; int i = 0; for (i = 0; i < indices_rank - 1; ++i) { @@ -56,11 +62,10 @@ int GatherNd::InferShape(std::vector inputs_, std::vectorset_shape(out_shape); output->set_data_type(input->data_type()); output->SetFormat(input->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/gather_nd.h b/mindspore/lite/src/ops/gather_nd.h similarity index 83% rename from mindspore/lite/c_ops/gather_nd.h rename to mindspore/lite/src/ops/gather_nd.h index 012b1c695d..1016e68717 100644 --- a/mindspore/lite/c_ops/gather_nd.h +++ b/mindspore/lite/src/ops/gather_nd.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_GATHER_ND_H_ namespace mindspore { +namespace lite { class GatherNd : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit GatherNd(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit GatherNd(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit GatherNd(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetBatchDims() const; void SetBatchDims(int batch_dims); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_GATHER_ND_H_ diff --git a/mindspore/lite/c_ops/greater.h b/mindspore/lite/src/ops/greater.h similarity index 73% rename from mindspore/lite/c_ops/greater.h rename to mindspore/lite/src/ops/greater.h index e8890dd219..059eb53bbf 100644 --- a/mindspore/lite/c_ops/greater.h +++ b/mindspore/lite/src/ops/greater.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_GREATER_H_ namespace mindspore { +namespace lite { class Greater : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Greater(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit Greater(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit Greater(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_GREATER_H_ diff --git a/mindspore/lite/c_ops/greater_equal.h b/mindspore/lite/src/ops/greater_equal.h similarity index 73% rename from mindspore/lite/c_ops/greater_equal.h rename to mindspore/lite/src/ops/greater_equal.h index 3fa7b95a21..1cbacd9bd0 100644 --- a/mindspore/lite/c_ops/greater_equal.h +++ b/mindspore/lite/src/ops/greater_equal.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_GREATER_EQUAL_H_ namespace mindspore { +namespace lite { class GreaterEqual : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit GreaterEqual(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit GreaterEqual(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit GreaterEqual(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_GREATER_EQUAL_H_ diff --git a/mindspore/lite/c_ops/l2_norm.cc b/mindspore/lite/src/ops/l2_norm.cc similarity index 95% rename from mindspore/lite/c_ops/l2_norm.cc rename to mindspore/lite/src/ops/l2_norm.cc index 163f74ad26..c9cbe584dd 100644 --- a/mindspore/lite/c_ops/l2_norm.cc +++ b/mindspore/lite/src/ops/l2_norm.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/l2_norm.h" +#include "src/ops/l2_norm.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE std::vector L2Norm::GetAxis() const { return this->primitive->value.AsL2Norm()->axis; } float L2Norm::GetEpsilon() const { return this->primitive->value.AsL2Norm()->epsilon; } @@ -35,4 +36,5 @@ float L2Norm::GetEpsilon() const { return this->primitive->value_as_L2Norm()->ep void L2Norm::SetAxis(const std::vector &axis) {} void L2Norm::SetEpsilon(float epsilon) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/l2_norm.h b/mindspore/lite/src/ops/l2_norm.h similarity index 83% rename from mindspore/lite/c_ops/l2_norm.h rename to mindspore/lite/src/ops/l2_norm.h index 2ca793e686..912795b5ba 100644 --- a/mindspore/lite/c_ops/l2_norm.h +++ b/mindspore/lite/src/ops/l2_norm.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,18 +29,17 @@ #define LITE_MINDSPORE_LITE_C_OPS_L2_NORM_H_ namespace mindspore { +namespace lite { class L2Norm : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit L2Norm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit L2Norm(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit L2Norm(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + std::vector GetAxis() const; float GetEpsilon() const; void SetAxis(const std::vector &axis); void SetEpsilon(float epsilon); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_L2_NORM_H_ diff --git a/mindspore/lite/c_ops/leaky_relu.cc b/mindspore/lite/src/ops/leaky_relu.cc similarity index 94% rename from mindspore/lite/c_ops/leaky_relu.cc rename to mindspore/lite/src/ops/leaky_relu.cc index b339ece178..e0850f50a9 100644 --- a/mindspore/lite/c_ops/leaky_relu.cc +++ b/mindspore/lite/src/ops/leaky_relu.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/leaky_relu.h" +#include "src/ops/leaky_relu.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE float LeakyReLU::GetNegativeSlope() const { return this->primitive->value.AsLeakyReLU()->negativeSlope; } @@ -30,4 +31,5 @@ float LeakyReLU::GetNegativeSlope() const { return this->primitive->value_as_Lea void LeakyReLU::SetNegativeSlope(float negative_slope) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/leaky_relu.h b/mindspore/lite/src/ops/leaky_relu.h similarity index 82% rename from mindspore/lite/c_ops/leaky_relu.h rename to mindspore/lite/src/ops/leaky_relu.h index 155dfc2753..4cdaa74090 100644 --- a/mindspore/lite/c_ops/leaky_relu.h +++ b/mindspore/lite/src/ops/leaky_relu.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,16 +29,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_LEAKY_RE_L_U_H_ namespace mindspore { +namespace lite { class LeakyReLU : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit LeakyReLU(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit LeakyReLU(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit LeakyReLU(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + float GetNegativeSlope() const; void SetNegativeSlope(float negative_slope); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_LEAKY_RE_L_U_H_ diff --git a/mindspore/lite/c_ops/less.h b/mindspore/lite/src/ops/less.h similarity index 73% rename from mindspore/lite/c_ops/less.h rename to mindspore/lite/src/ops/less.h index 06a4cf6e1e..58c359735b 100644 --- a/mindspore/lite/c_ops/less.h +++ b/mindspore/lite/src/ops/less.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_LESS_H_ namespace mindspore { +namespace lite { class Less : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Less(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit Less(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit Less(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_LESS_H_ diff --git a/mindspore/lite/c_ops/less_equal.h b/mindspore/lite/src/ops/less_equal.h similarity index 73% rename from mindspore/lite/c_ops/less_equal.h rename to mindspore/lite/src/ops/less_equal.h index 78c7170956..cd1308dcd6 100644 --- a/mindspore/lite/c_ops/less_equal.h +++ b/mindspore/lite/src/ops/less_equal.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_LESS_EQUAL_H_ namespace mindspore { +namespace lite { class LessEqual : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit LessEqual(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit LessEqual(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit LessEqual(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_LESS_EQUAL_H_ diff --git a/mindspore/lite/c_ops/local_response_normalization.cc b/mindspore/lite/src/ops/local_response_normalization.cc similarity index 96% rename from mindspore/lite/c_ops/local_response_normalization.cc rename to mindspore/lite/src/ops/local_response_normalization.cc index 65456450c9..2e6dc944c6 100644 --- a/mindspore/lite/c_ops/local_response_normalization.cc +++ b/mindspore/lite/src/ops/local_response_normalization.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/local_response_normalization.h" +#include "src/ops/local_response_normalization.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int LocalResponseNormalization::GetDepthRadius() const { return this->primitive->value.AsLocalResponseNormalization()->depth_radius; @@ -64,4 +65,5 @@ void LocalResponseNormalization::SetBias(float bias) {} void LocalResponseNormalization::SetAlpha(float alpha) {} void LocalResponseNormalization::SetBeta(float beta) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/local_response_normalization.h b/mindspore/lite/src/ops/local_response_normalization.h similarity index 83% rename from mindspore/lite/c_ops/local_response_normalization.h rename to mindspore/lite/src/ops/local_response_normalization.h index 30a28c2857..f099967195 100644 --- a/mindspore/lite/c_ops/local_response_normalization.h +++ b/mindspore/lite/src/ops/local_response_normalization.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_LOCAL_RESPONSE_NORMALIZATION_H_ namespace mindspore { +namespace lite { class LocalResponseNormalization : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit LocalResponseNormalization(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit LocalResponseNormalization(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit LocalResponseNormalization(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int GetDepthRadius() const; float GetBias() const; float GetAlpha() const; @@ -45,6 +43,7 @@ class LocalResponseNormalization : public PrimitiveC { void SetAlpha(float alpha); void SetBeta(float beta); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_LOCAL_RESPONSE_NORMALIZATION_H_ diff --git a/mindspore/lite/c_ops/log.h b/mindspore/lite/src/ops/log.h similarity index 72% rename from mindspore/lite/c_ops/log.h rename to mindspore/lite/src/ops/log.h index 52b92764ba..d8a05dd268 100644 --- a/mindspore/lite/c_ops/log.h +++ b/mindspore/lite/src/ops/log.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic_self.h" +#include "src/ops/arithmetic_self.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_LOG_H_ namespace mindspore { +namespace lite { class Log : public ArithmeticSelf { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Log(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - explicit Log(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} -#endif + explicit Log(OriginPrimitive *primitive) : ArithmeticSelf(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_LOG_H_ diff --git a/mindspore/lite/c_ops/logical_and.h b/mindspore/lite/src/ops/logical_and.h similarity index 73% rename from mindspore/lite/c_ops/logical_and.h rename to mindspore/lite/src/ops/logical_and.h index cab8648157..4dea085e5d 100644 --- a/mindspore/lite/c_ops/logical_and.h +++ b/mindspore/lite/src/ops/logical_and.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_LOGICAL_AND_H_ namespace mindspore { +namespace lite { class LogicalAnd : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit LogicalAnd(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit LogicalAnd(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit LogicalAnd(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_LOGICAL_AND_H_ diff --git a/mindspore/lite/c_ops/logical_not.h b/mindspore/lite/src/ops/logical_not.h similarity index 72% rename from mindspore/lite/c_ops/logical_not.h rename to mindspore/lite/src/ops/logical_not.h index 16a8a5d702..675d490605 100644 --- a/mindspore/lite/c_ops/logical_not.h +++ b/mindspore/lite/src/ops/logical_not.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic_self.h" +#include "src/ops/arithmetic_self.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_LOGICAL_NOT_H_ namespace mindspore { +namespace lite { class LogicalNot : public ArithmeticSelf { public: -#ifdef PRIMITIVE_WRITEABLE - explicit LogicalNot(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - explicit LogicalNot(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} -#endif + explicit LogicalNot(OriginPrimitive *primitive) : ArithmeticSelf(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_LOGICAL_NOT_H_ diff --git a/mindspore/lite/c_ops/logical_or.h b/mindspore/lite/src/ops/logical_or.h similarity index 73% rename from mindspore/lite/c_ops/logical_or.h rename to mindspore/lite/src/ops/logical_or.h index ab34a7fb39..158cb367c0 100644 --- a/mindspore/lite/c_ops/logical_or.h +++ b/mindspore/lite/src/ops/logical_or.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_LOGICAL_OR_H_ namespace mindspore { +namespace lite { class LogicalOr : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit LogicalOr(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit LogicalOr(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit LogicalOr(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_LOGICAL_OR_H_ diff --git a/mindspore/lite/c_ops/lrn.cc b/mindspore/lite/src/ops/lrn.cc similarity index 96% rename from mindspore/lite/c_ops/lrn.cc rename to mindspore/lite/src/ops/lrn.cc index 04b00c065a..eb9a8fcd23 100644 --- a/mindspore/lite/c_ops/lrn.cc +++ b/mindspore/lite/src/ops/lrn.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/lrn.h" +#include "src/ops/lrn.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE float Lrn::GetAlpha() const { return this->primitive->value.AsLrn()->alpha; } float Lrn::GetBeta() const { return this->primitive->value.AsLrn()->beta; } @@ -40,4 +41,5 @@ void Lrn::SetBeta(float beta) {} void Lrn::SetBias(float bias) {} void Lrn::SetSize(int size) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/lrn.h b/mindspore/lite/src/ops/lrn.h similarity index 84% rename from mindspore/lite/c_ops/lrn.h rename to mindspore/lite/src/ops/lrn.h index ed61bc86bd..cc91453882 100644 --- a/mindspore/lite/c_ops/lrn.h +++ b/mindspore/lite/src/ops/lrn.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_LRN_H_ namespace mindspore { +namespace lite { class Lrn : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Lrn(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Lrn(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Lrn(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + float GetAlpha() const; float GetBeta() const; float GetBias() const; @@ -45,6 +43,7 @@ class Lrn : public PrimitiveC { void SetBias(float bias); void SetSize(int size); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_LRN_H_ diff --git a/mindspore/lite/src/ops/lstm.cc b/mindspore/lite/src/ops/lstm.cc index 22b52ad8a7..6983619636 100644 --- a/mindspore/lite/src/ops/lstm.cc +++ b/mindspore/lite/src/ops/lstm.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,12 +14,22 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/lstm.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +bool Lstm::GetBidirection() const { return this->primitive->value.AsLstm()->bidirection; } + +void Lstm::SetBidirection(bool bidirection) { this->primitive->value.AsLstm()->bidirection = bidirection; } + +#else + +bool Lstm::GetBidirection() const { return this->primitive->value_as_Lstm()->bidirection(); } + +void Lstm::SetBidirection(bool bidirection) {} +#endif -namespace mindspore::lite { const int kLstmInputNum = 6; const int kLstmOutputNum = 3; int Lstm::InferShape(std::vector inputs_, std::vector outputs_) { @@ -34,17 +44,14 @@ int Lstm::InferShape(std::vector inputs_, std::vector in_shape = input->shape(); std::vector w_shape = weight_i->shape(); // layer, hidden_size * 4, input_size if (in_shape.size() != 3 || w_shape.size() != 3) { MS_LOG(ERROR) << "OpLstm input dims should be 3."; return RET_ERROR; } - auto lstm_prim = this->primitive->value_as_Lstm(); int hidden_size = w_shape[1] / 4; - // set output std::vector out_shape(in_shape); out_shape[2] = hidden_size; @@ -52,18 +59,17 @@ int Lstm::InferShape(std::vector inputs_, std::vectorset_shape(out_shape); - // set hidden state, cell state std::vector state_shape(in_shape); state_shape[0] = lstm_prim->bidirection() ? 2 : 1; state_shape[2] = hidden_size; outputs_[1]->set_shape(state_shape); outputs_[2]->set_shape(state_shape); - for (int i = 0; i < kLstmOutputNum; i++) { outputs_[i]->set_data_type(input->data_type()); outputs_[i]->SetFormat(input->GetFormat()); } return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/lstm.h b/mindspore/lite/src/ops/lstm.h similarity index 84% rename from mindspore/lite/c_ops/lstm.h rename to mindspore/lite/src/ops/lstm.h index 0d8b8678cd..6ae0020d46 100644 --- a/mindspore/lite/c_ops/lstm.h +++ b/mindspore/lite/src/ops/lstm.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_LSTM_H_ namespace mindspore { +namespace lite { class Lstm : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Lstm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Lstm(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Lstm(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; bool GetBidirection() const; void SetBidirection(bool bidirection); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_LSTM_H_ diff --git a/mindspore/lite/src/ops/matmul.cc b/mindspore/lite/src/ops/matmul.cc index f56d1c9a14..7a6d7e1452 100644 --- a/mindspore/lite/src/ops/matmul.cc +++ b/mindspore/lite/src/ops/matmul.cc @@ -14,12 +14,27 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/matmul.h" +#include + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +bool MatMul::GetTransposeA() const { return this->primitive->value.AsMatMul()->transposeA; } +bool MatMul::GetTransposeB() const { return this->primitive->value.AsMatMul()->transposeB; } + +void MatMul::SetTransposeA(bool transpose_a) { this->primitive->value.AsMatMul()->transposeA = transpose_a; } +void MatMul::SetTransposeB(bool transpose_b) { this->primitive->value.AsMatMul()->transposeB = transpose_b; } + +#else + +bool MatMul::GetTransposeA() const { return this->primitive->value_as_MatMul()->transposeA(); } +bool MatMul::GetTransposeB() const { return this->primitive->value_as_MatMul()->transposeB(); } + +void MatMul::SetTransposeA(bool transpose_a) {} +void MatMul::SetTransposeB(bool transpose_b) {} +#endif -namespace mindspore::lite { int MatMul::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input0 = inputs_.front(); @@ -28,21 +43,18 @@ int MatMul::InferShape(std::vector inputs_, std::vector a_shape = input0->shape(); std::vector b_shape = input1->shape(); if (a_shape.size() < 2 || b_shape.size() < 2) { MS_LOG(ERROR) << "inputs shape is invalid"; return RET_INPUT_TENSOR_ERROR; } - for (int i = 0; i < a_shape.size() - 2; ++i) { if (a_shape[i] != b_shape[i]) { MS_LOG(ERROR) << "Op MatMul's dimensions must be equal"; return RET_INPUT_TENSOR_ERROR; } } - auto matmul_prim = this->primitive->value_as_MatMul(); if (matmul_prim->transposeA()) { std::swap(a_shape[a_shape.size() - 1], a_shape[a_shape.size() - 2]); @@ -55,7 +67,7 @@ int MatMul::InferShape(std::vector inputs_, std::vectorset_shape(c_shape); output->set_data_type(input0->data_type()); output->SetFormat(input0->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/matmul.h b/mindspore/lite/src/ops/matmul.h similarity index 84% rename from mindspore/lite/c_ops/matmul.h rename to mindspore/lite/src/ops/matmul.h index 08e8cdece3..d079cf587f 100644 --- a/mindspore/lite/c_ops/matmul.h +++ b/mindspore/lite/src/ops/matmul.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_MAT_MUL_H_ namespace mindspore { +namespace lite { class MatMul : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit MatMul(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit MatMul(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit MatMul(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; bool GetTransposeA() const; bool GetTransposeB() const; void SetTransposeA(bool transpose_a); void SetTransposeB(bool transpose_b); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_MAT_MUL_H_ diff --git a/mindspore/lite/c_ops/matrix_diag.cc b/mindspore/lite/src/ops/matrix_diag.cc similarity index 96% rename from mindspore/lite/c_ops/matrix_diag.cc rename to mindspore/lite/src/ops/matrix_diag.cc index 3b6772ae0e..0f329e1179 100644 --- a/mindspore/lite/c_ops/matrix_diag.cc +++ b/mindspore/lite/src/ops/matrix_diag.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/matrix_diag.h" +#include "src/ops/matrix_diag.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int MatrixDiag::GetK() const { return this->primitive->value.AsMatrixDiag()->k; } int MatrixDiag::GetNumRows() const { return this->primitive->value.AsMatrixDiag()->numRows; } @@ -42,4 +43,5 @@ void MatrixDiag::SetNumRows(int num_rows) {} void MatrixDiag::SetNumCols(int num_cols) {} void MatrixDiag::SetPaddingValue(float padding_value) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/matrix_diag.h b/mindspore/lite/src/ops/matrix_diag.h similarity index 84% rename from mindspore/lite/c_ops/matrix_diag.h rename to mindspore/lite/src/ops/matrix_diag.h index 1ef2fcf7d1..a619c8dcfe 100644 --- a/mindspore/lite/c_ops/matrix_diag.h +++ b/mindspore/lite/src/ops/matrix_diag.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_MATRIX_DIAG_H_ namespace mindspore { +namespace lite { class MatrixDiag : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit MatrixDiag(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit MatrixDiag(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit MatrixDiag(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int GetK() const; int GetNumRows() const; int GetNumCols() const; @@ -45,6 +43,7 @@ class MatrixDiag : public PrimitiveC { void SetNumCols(int num_cols); void SetPaddingValue(float padding_value); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_MATRIX_DIAG_H_ diff --git a/mindspore/lite/c_ops/maximum.h b/mindspore/lite/src/ops/maximum.h similarity index 73% rename from mindspore/lite/c_ops/maximum.h rename to mindspore/lite/src/ops/maximum.h index 37ab2c3541..97e8c938c3 100644 --- a/mindspore/lite/c_ops/maximum.h +++ b/mindspore/lite/src/ops/maximum.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_MAXIMUM_H_ namespace mindspore { +namespace lite { class Maximum : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Maximum(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit Maximum(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit Maximum(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_MAXIMUM_H_ diff --git a/mindspore/lite/src/ops/mean.cc b/mindspore/lite/src/ops/mean.cc index 331198221f..f7f2b487e2 100644 --- a/mindspore/lite/src/ops/mean.cc +++ b/mindspore/lite/src/ops/mean.cc @@ -14,12 +14,29 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/mean.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector Mean::GetAxis() const { return this->primitive->value.AsMean()->axis; } +bool Mean::GetKeepDims() const { return this->primitive->value.AsMean()->keepDims; } + +void Mean::SetAxis(const std::vector &axis) { this->primitive->value.AsMean()->axis = axis; } +void Mean::SetKeepDims(bool keep_dims) { this->primitive->value.AsMean()->keepDims = keep_dims; } + +#else + +std::vector Mean::GetAxis() const { + auto fb_vector = this->primitive->value_as_Mean()->axis(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +bool Mean::GetKeepDims() const { return this->primitive->value_as_Mean()->keepDims(); } + +void Mean::SetAxis(const std::vector &axis) {} +void Mean::SetKeepDims(bool keep_dims) {} +#endif -namespace mindspore::lite { namespace { constexpr size_t kInputSize = 1; constexpr size_t kOutputSize = 1; @@ -53,7 +70,6 @@ int Mean::InferShape(std::vector inputs_, std::vectorset_data_type(input->data_type()); return RET_OK; } - // reduce on selected axes for (size_t i = 0; i < in_shape.size(); i++) { bool reduce_axis = false; @@ -76,4 +92,5 @@ int Mean::InferShape(std::vector inputs_, std::vectorSetFormat(input->GetFormat()); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/mean.h b/mindspore/lite/src/ops/mean.h similarity index 84% rename from mindspore/lite/c_ops/mean.h rename to mindspore/lite/src/ops/mean.h index 56d6efdd4f..6c5927f521 100644 --- a/mindspore/lite/c_ops/mean.h +++ b/mindspore/lite/src/ops/mean.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_MEAN_H_ namespace mindspore { +namespace lite { class Mean : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Mean(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Mean(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Mean(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetAxis() const; bool GetKeepDims() const; void SetAxis(const std::vector &axis); void SetKeepDims(bool keep_dims); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_MEAN_H_ diff --git a/mindspore/lite/c_ops/minimum.h b/mindspore/lite/src/ops/minimum.h similarity index 73% rename from mindspore/lite/c_ops/minimum.h rename to mindspore/lite/src/ops/minimum.h index c738273fc7..36bb5ff77a 100644 --- a/mindspore/lite/c_ops/minimum.h +++ b/mindspore/lite/src/ops/minimum.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_MINIMUM_H_ namespace mindspore { +namespace lite { class Minimum : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Minimum(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit Minimum(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit Minimum(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_MINIMUM_H_ diff --git a/mindspore/lite/c_ops/mul.cc b/mindspore/lite/src/ops/mul.cc similarity index 94% rename from mindspore/lite/c_ops/mul.cc rename to mindspore/lite/src/ops/mul.cc index d26152db5c..65205ecd57 100644 --- a/mindspore/lite/c_ops/mul.cc +++ b/mindspore/lite/src/ops/mul.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/mul.h" +#include "src/ops/mul.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int Mul::GetActivationType() const { return this->primitive->value.AsMul()->activationType; } @@ -28,4 +29,5 @@ int Mul::GetActivationType() const { return this->primitive->value_as_Mul()->act void Mul::SetActivationType(int activation_type) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/mul.h b/mindspore/lite/src/ops/mul.h similarity index 81% rename from mindspore/lite/c_ops/mul.h rename to mindspore/lite/src/ops/mul.h index b73070d9da..2a8ede8741 100644 --- a/mindspore/lite/c_ops/mul.h +++ b/mindspore/lite/src/ops/mul.h @@ -18,8 +18,8 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" -#include "c_ops/arithmetic.h" +#include "src/ops/primitive_c.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -30,16 +30,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_MUL_H_ namespace mindspore { +namespace lite { class Mul : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Mul(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit Mul(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit Mul(OriginPrimitive *primitive) : Arithmetic(primitive) {} + int GetActivationType() const; void SetActivationType(int activation_type); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_MUL_H_ diff --git a/mindspore/lite/src/ops/nchw2nhwc.cc b/mindspore/lite/src/ops/nchw2nhwc.cc index 5a420ceba8..c4bd16c581 100644 --- a/mindspore/lite/src/ops/nchw2nhwc.cc +++ b/mindspore/lite/src/ops/nchw2nhwc.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,14 +14,12 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/nchw2nhwc.h" #include "src/common/common.h" -namespace mindspore::lite { -int Nchw2Nhwc::InferShape(std::vector inputs_, std::vector outputs_) { +namespace mindspore { +namespace lite { +int Nchw2Nhwc::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); @@ -42,5 +40,5 @@ int Nchw2Nhwc::InferShape(std::vector inputs_, std::vectorset_data_type(input->data_type()); return RET_OK; } -} // namespace mindspore::lite - +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/nchw2nhwc.h b/mindspore/lite/src/ops/nchw2nhwc.h similarity index 74% rename from mindspore/lite/c_ops/nchw2nhwc.h rename to mindspore/lite/src/ops/nchw2nhwc.h index dbe2be247b..edd85e2364 100644 --- a/mindspore/lite/c_ops/nchw2nhwc.h +++ b/mindspore/lite/src/ops/nchw2nhwc.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,15 +29,14 @@ #define LITE_MINDSPORE_LITE_C_OPS_NCHW_2_NHWC_H_ namespace mindspore { +namespace lite { class Nchw2Nhwc : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Nchw2Nhwc(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Nchw2Nhwc(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Nchw2Nhwc(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_NCHW_2_NHWC_H_ diff --git a/mindspore/lite/src/ops/nhwc2nchw.cc b/mindspore/lite/src/ops/nhwc2nchw.cc index 579ce71be2..a13e858853 100644 --- a/mindspore/lite/src/ops/nhwc2nchw.cc +++ b/mindspore/lite/src/ops/nhwc2nchw.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,14 +14,12 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/nhwc2nchw.h" #include "src/common/common.h" -namespace mindspore::lite { -int Nhwc2Nchw::InferShape(std::vector inputs_, std::vector outputs_) { +namespace mindspore { +namespace lite { +int Nhwc2Nchw::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); MS_ASSERT(input != nullptr); @@ -42,4 +40,5 @@ int Nhwc2Nchw::InferShape(std::vector inputs_, std::vectorset_data_type(input->data_type()); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/nhwc2nchw.h b/mindspore/lite/src/ops/nhwc2nchw.h similarity index 74% rename from mindspore/lite/c_ops/nhwc2nchw.h rename to mindspore/lite/src/ops/nhwc2nchw.h index 55eafe7a70..54cab1716c 100644 --- a/mindspore/lite/c_ops/nhwc2nchw.h +++ b/mindspore/lite/src/ops/nhwc2nchw.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,15 +29,14 @@ #define LITE_MINDSPORE_LITE_C_OPS_NHWC_2_NCHW_H_ namespace mindspore { +namespace lite { class Nhwc2Nchw : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Nhwc2Nchw(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Nhwc2Nchw(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Nhwc2Nchw(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_NHWC_2_NCHW_H_ diff --git a/mindspore/lite/c_ops/not_equal.h b/mindspore/lite/src/ops/not_equal.h similarity index 73% rename from mindspore/lite/c_ops/not_equal.h rename to mindspore/lite/src/ops/not_equal.h index 818fb249dc..18e1017666 100644 --- a/mindspore/lite/c_ops/not_equal.h +++ b/mindspore/lite/src/ops/not_equal.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_NOT_EQUAL_H_ namespace mindspore { +namespace lite { class NotEqual : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit NotEqual(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit NotEqual(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit NotEqual(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_NOT_EQUAL_H_ diff --git a/mindspore/lite/src/ops/one_hot.cc b/mindspore/lite/src/ops/one_hot.cc index 878813c995..7406216deb 100644 --- a/mindspore/lite/src/ops/one_hot.cc +++ b/mindspore/lite/src/ops/one_hot.cc @@ -14,12 +14,22 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/one_hot.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int OneHot::GetAxis() const { return this->primitive->value.AsOneHot()->axis; } + +void OneHot::SetAxis(int axis) { this->primitive->value.AsOneHot()->axis = axis; } + +#else + +int OneHot::GetAxis() const { return this->primitive->value_as_OneHot()->axis(); } + +void OneHot::SetAxis(int axis) {} +#endif -namespace mindspore::lite { namespace { constexpr size_t kOneHotInputNum = 4; } @@ -32,7 +42,6 @@ int OneHot::InferShape(std::vector inputs, std::vectoraxis(); - // indices, depth, on_value, off_value if (inputs.size() != kOneHotInputNum) { MS_LOG(ERROR) << "OneHot got inputs num " << inputs.size() << ", should be " << kOneHotInputNum; @@ -43,7 +52,6 @@ int OneHot::InferShape(std::vector inputs, std::vector(depth_tensor->Data()); - auto input = inputs.front(); if (input == nullptr) { return RET_NULL_PTR; @@ -55,20 +63,18 @@ int OneHot::InferShape(std::vector inputs, std::vector output_shape(input_shape); output_shape.insert(output_shape.cbegin() + axis, *depth); - auto output = outputs.front(); if (output == nullptr) { return RET_NULL_PTR; } output->set_shape(output_shape); - auto on_value = inputs.at(2); if (on_value == nullptr) { return RET_NULL_PTR; } output->set_data_type(on_value->data_type()); output->SetFormat(on_value->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/one_hot.h b/mindspore/lite/src/ops/one_hot.h similarity index 83% rename from mindspore/lite/c_ops/one_hot.h rename to mindspore/lite/src/ops/one_hot.h index b93b2230d8..7193178d4a 100644 --- a/mindspore/lite/c_ops/one_hot.h +++ b/mindspore/lite/src/ops/one_hot.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_ONE_HOT_H_ namespace mindspore { +namespace lite { class OneHot : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit OneHot(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit OneHot(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit OneHot(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; void SetAxis(int axis); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_ONE_HOT_H_ diff --git a/mindspore/lite/src/ops/ops.cc b/mindspore/lite/src/ops/ops.cc deleted file mode 100644 index 72434c5db6..0000000000 --- a/mindspore/lite/src/ops/ops.cc +++ /dev/null @@ -1,170 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/ops.h" -#include -#include "utils/log_adapter.h" -#include "include/errorcode.h" -#include "src/ir/tensor.h" - -namespace mindspore::lite { -Primitive *Primitive::CreatePrimitive(schema::Primitive *primitive) { - MS_ASSERT(primitive != nullptr); - auto op_type = primitive->value_type(); - switch (op_type) { - case schema::PrimitiveType_SoftMax: - return new lite::SoftMax(const_cast(primitive)); - case schema::PrimitiveType_Activation: - return new lite::Activation(const_cast(primitive)); - case schema::PrimitiveType_Conv2D: - return new lite::Conv2D(const_cast(primitive)); - case schema::PrimitiveType_Reduce: - return new lite::Reduce(const_cast(primitive)); - case schema::PrimitiveType_Pooling: - return new lite::Pooling(const_cast(primitive)); - case schema::PrimitiveType_ROIPooling: - return new lite::ROIPooling(const_cast(primitive)); - case schema::PrimitiveType_DepthwiseConv2D: - return new lite::DepthwiseConv2D(const_cast(primitive)); - case schema::PrimitiveType_FusedBatchNorm: - return new lite::FusedBatchNorm(const_cast(primitive)); - case schema::PrimitiveType_BatchNorm: - return new lite::BatchNorm(const_cast(primitive)); - case schema::PrimitiveType_FullConnection: - return new lite::FullConnection(const_cast(primitive)); - case schema::PrimitiveType_Power: - return new lite::Power(const_cast(primitive)); - case schema::PrimitiveType_Pad: - return new lite::Pad(const_cast(primitive)); - case schema::PrimitiveType_Range: - return new lite::Range(const_cast(primitive)); - case schema::PrimitiveType_Mul: - return new lite::Mul(const_cast(primitive)); - case schema::PrimitiveType_Add: - return new lite::Add(const_cast(primitive)); - case schema::PrimitiveType_Sub: - return new lite::Sub(const_cast(primitive)); - case schema::PrimitiveType_Div: - return new lite::Div(const_cast(primitive)); - case schema::PrimitiveType_BiasAdd: - return new lite::BiasAdd(const_cast(primitive)); - case schema::PrimitiveType_ExpandDims: - return new lite::ExpandDims(const_cast(primitive)); - case schema::PrimitiveType_ArgMax: - return new lite::ArgMax(const_cast(primitive)); - case schema::PrimitiveType_ArgMin: - return new lite::ArgMin(const_cast(primitive)); - case schema::PrimitiveType_Cast: - return new lite::Cast(const_cast(primitive)); - case schema::PrimitiveType_Reshape: - return new lite::Reshape(const_cast(primitive)); - case schema::PrimitiveType_Eltwise: - return new lite::Eltwise(const_cast(primitive)); - case schema::PrimitiveType_Ceil: - return new lite::Ceil(const_cast(primitive)); - case schema::PrimitiveType_Concat: - return new lite::Concat(const_cast(primitive)); - case schema::PrimitiveType_Fill: - return new lite::Fill(const_cast(primitive)); - case schema::PrimitiveType_Nhwc2Nchw: - return new lite::Nhwc2Nchw(const_cast(primitive)); - case schema::PrimitiveType_Nchw2Nhwc: - return new lite::Nchw2Nhwc(const_cast(primitive)); - case schema::PrimitiveType_Transpose: - return new lite::Transpose(const_cast(primitive)); - case schema::PrimitiveType_Squeeze: - return new lite::Squeeze(const_cast(primitive)); - case schema::PrimitiveType_SquaredDifference: - return new lite::SquaredDifference(const_cast(primitive)); - case schema::PrimitiveType_Split: - return new lite::Split(const_cast(primitive)); - case schema::PrimitiveType_FloorDiv: - return new lite::FloorDiv(const_cast(primitive)); - case schema::PrimitiveType_FloorMod: - return new lite::FloorMod(const_cast(primitive)); - case schema::PrimitiveType_Reverse: - return new lite::Reverse(const_cast(primitive)); - case schema::PrimitiveType_Scale: - return new lite::Scale(const_cast(primitive)); - case schema::PrimitiveType_GatherNd: - return new lite::GatherNd(const_cast(primitive)); - case schema::PrimitiveType_Tile: - return new lite::Tile(const_cast(primitive)); - case schema::PrimitiveType_TopK: - return new lite::TopK(const_cast(primitive)); - case schema::PrimitiveType_Unique: - return new lite::Unique(const_cast(primitive)); - case schema::PrimitiveType_Unstack: - return new lite::Unstack(const_cast(primitive)); - case schema::PrimitiveType_ReverseSequence: - return new lite::ReverseSequence(const_cast(primitive)); - case schema::PrimitiveType_Round: - return new lite::Round(const_cast(primitive)); - case schema::PrimitiveType_ZerosLike: - return new lite::ZerosLike(const_cast(primitive)); - case schema::PrimitiveType_Where: - return new lite::Where(const_cast(primitive)); - case schema::PrimitiveType_Floor: - return new lite::Floor(const_cast(primitive)); - case schema::PrimitiveType_Shape: - return new lite::Shape(const_cast(primitive)); - case schema::PrimitiveType_ScatterND: - return new lite::ScatterND(const_cast(primitive)); - case schema::PrimitiveType_Unsqueeze: - return new lite::Unsqueeze(const_cast(primitive)); - case schema::PrimitiveType_Flatten: - return new lite::Flatten(const_cast(primitive)); - case schema::PrimitiveType_StridedSlice: - return new lite::StridedSlice(const_cast(primitive)); - case schema::PrimitiveType_Resize: - return new lite::Resize(const_cast(primitive)); - case schema::PrimitiveType_OneHot: - return new lite::OneHot(const_cast(primitive)); - case schema::PrimitiveType_PriorBox: - return new lite::PriorBox(const_cast(primitive)); - case schema::PrimitiveType_SpaceToDepth: - return new lite::SpaceToDepth(const_cast(primitive)); - case schema::PrimitiveType_SpaceToBatch: - return new lite::SpaceToBatch(const_cast(primitive)); - case schema::PrimitiveType_QuantDTypeCast: - return new lite::QuantDTypeCast(const_cast(primitive)); - case schema::PrimitiveType_MatMul: - return new lite::MatMul(const_cast(primitive)); - case schema::PrimitiveType_EmbeddingLookup: - return new lite::EmbeddingLookup(const_cast(primitive)); - case schema::PrimitiveType_ConstantOfShape: - return new lite::ConstantOfShape(const_cast(primitive)); - default: - break; - } - return nullptr; -} - -int Primitive::InferShape(std::vector inputs_, std::vector outputs_) { - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - if (!GetInferFlag()) { - return RET_OK; - } - output->set_shape(input->shape()); - - return RET_OK; -} -} // namespace mindspore::lite diff --git a/mindspore/lite/src/ops/ops.h b/mindspore/lite/src/ops/ops.h deleted file mode 100644 index 5aa32fdc4e..0000000000 --- a/mindspore/lite/src/ops/ops.h +++ /dev/null @@ -1,819 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_OPS_H_ -#define MINDSPORE_LITE_SRC_OPS_OPS_H_ - -#include -#include -#include -#include "schema/model_generated.h" -#include "ir/dtype/type_id.h" - -namespace mindspore { -namespace lite::tensor { -class Tensor; -} -namespace lite { -constexpr uint32_t kSingleNum = 1; -constexpr uint32_t kDoubleNum = 2; -constexpr uint32_t kMultiNum = 3; -constexpr uint32_t kNHWC_n_index = 0; -constexpr uint32_t kNHWC_h_index = 1; -constexpr uint32_t kNHWC_w_index = 2; -constexpr uint32_t kNHWC_c_index = 3; -constexpr uint32_t kDimension_4d = 4; - -const std::set kSupportDataType = {kNumberTypeUInt8, kNumberTypeInt32, kNumberTypeFloat32, kNumberTypeFloat16}; - -class Primitive { - public: - explicit Primitive(schema::Primitive *primitive) : primitive(primitive) {} - static Primitive *CreatePrimitive(schema::Primitive *primitive); - virtual ~Primitive() {} - const schema::Primitive *Value() const { return this->primitive; } - const bool GetInferFlag() const { return this->infer_flag_; } - void SetInferFlag(bool flag) { this->infer_flag_ = flag; } - schema::PrimitiveType Type() const { return this->primitive->value_type(); } - const void *Attribute() const { return this->primitive->value(); } - virtual int InferShape(std::vector inputs_, std::vector outputs_); - - protected: - schema::Primitive *primitive; - bool infer_flag_ = true; -}; - -class ROIPooling : public Primitive { - public: - explicit ROIPooling(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::ROIPooling *GetAttribute() const { return this->primitive->value_as_ROIPooling(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Conv2D : public Primitive { - public: - explicit Conv2D(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Conv2D *GetAttribute() const { return this->primitive->value_as_Conv2D(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; - int PadUp() const { return this->pad_u_; } - int PadDown() const { return this->pad_d_; } - int PadLeft() const { return this->pad_l_; } - int PadRight() const { return this->pad_r_; } - - protected: - void ConvInferShape(int input_h, int input_w, int *output_h, int *output_w); - - protected: - int pad_u_ = 0; - int pad_d_ = 0; - int pad_l_ = 0; - int pad_r_ = 0; -}; - -class Pooling : public Primitive { - public: - explicit Pooling(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Pooling *GetAttribute() const { return this->primitive->value_as_Pooling(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; - int PadUp() const { return this->pad_u_; } - int PadDown() const { return this->pad_d_; } - int PadLeft() const { return this->pad_l_; } - int PadRight() const { return this->pad_r_; } - - protected: - int pad_u_ = 0; - int pad_d_ = 0; - int pad_l_ = 0; - int pad_r_ = 0; -}; - -class BatchNorm : public Primitive { - public: - explicit BatchNorm(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::BatchNorm *GetAttribute() const { return this->primitive->value_as_BatchNorm(); } -}; - -class FusedBatchNorm : public Primitive { - public: - explicit FusedBatchNorm(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::FusedBatchNorm *GetAttribute() const { return this->primitive->value_as_FusedBatchNorm(); } -}; - -class Activation : public Primitive { - public: - explicit Activation(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Activation *GetAttribute() const { return this->primitive->value_as_Activation(); } -}; - -class Prelu : public Activation { - public: - explicit Prelu(schema::Primitive *primitive) : Activation(primitive) {} - const schema::Prelu *GetAttribute() const { return this->primitive->value_as_Prelu(); } -}; - -class CaffePReLU : public Activation { - public: - explicit CaffePReLU(schema::Primitive *primitive) : Activation(primitive) {} - const schema::CaffePReLU *GetAttribute() const { return this->primitive->value_as_CaffePReLU(); } -}; - -class Split : public Primitive { - public: - explicit Split(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Split *GetAttribute() const { return this->primitive->value_as_Split(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Reshape : public Primitive { - public: - explicit Reshape(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Reshape *GetAttribute() const { return this->primitive->value_as_Reshape(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; - - private: - int CalNewShape(const tensor::Tensor *in_tensor, std::vector *out_shape) const; -}; - -class FullConnection : public Primitive { - public: - explicit FullConnection(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::FullConnection *GetAttribute() const { return this->primitive->value_as_FullConnection(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class SoftMax : public Primitive { - public: - explicit SoftMax(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::SoftMax *GetAttribute() const { return this->primitive->value_as_SoftMax(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Reduce : public Primitive { - public: - explicit Reduce(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Reduce *GetAttribute() const { return this->primitive->value_as_Reduce(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class DepthwiseConv2D : public Primitive { - public: - explicit DepthwiseConv2D(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::DepthwiseConv2D *GetAttribute() const { return this->primitive->value_as_DepthwiseConv2D(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; - int PadUp() const { return this->pad_u_; } - int PadDown() const { return this->pad_d_; } - int PadLeft() const { return this->pad_l_; } - int PadRight() const { return this->pad_r_; } - - protected: - int pad_u_ = 0; - int pad_d_ = 0; - int pad_l_ = 0; - int pad_r_ = 0; -}; - -class DeConv2D : public Primitive { - public: - explicit DeConv2D(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::DeConv2D *GetAttribute() const { return this->primitive->value_as_DeConv2D(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; - int PadUp() const { return this->pad_u_; } - int PadDown() const { return this->pad_d_; } - int PadLeft() const { return this->pad_l_; } - int PadRight() const { return this->pad_r_; } - - protected: - int pad_u_ = 0; - int pad_d_ = 0; - int pad_l_ = 0; - int pad_r_ = 0; -}; - -class DeconvDepthwiseConv2D : public Primitive { - public: - explicit DeconvDepthwiseConv2D(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::DeDepthwiseConv2D *GetAttribute() const { return this->primitive->value_as_DeDepthwiseConv2D(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; - int PadUp() const { return this->pad_u_; } - int PadDown() const { return this->pad_d_; } - int PadLeft() const { return this->pad_l_; } - int PadRight() const { return this->pad_r_; } - - protected: - int pad_u_ = 0; - int pad_d_ = 0; - int pad_l_ = 0; - int pad_r_ = 0; -}; - -class Power : public Primitive { - public: - explicit Power(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Power *GetAttribute() const { return this->primitive->value_as_Power(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Range : public Primitive { - public: - explicit Range(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Range *GetAttribute() const { return this->primitive->value_as_Range(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class AddN : public Primitive { - public: - explicit AddN(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::AddN *GetAttribute() const { return this->primitive->value_as_AddN(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Arithmetic : public Primitive { - public: - explicit Arithmetic(schema::Primitive *primitive) : Primitive(primitive) {} - int InferShape(std::vector inputs_, std::vector outputs_) override; - bool Broadcasting() { return this->broadcasting_; } - int NDims() { return this->ndim_; } - std::vector InShape0() { return this->in_shape0_; } - std::vector InShape1() { return this->in_shape1_; } - std::vector OutputShape() { return this->out_shape_; } - - protected: - bool broadcasting_ = false; - int ndim_; - std::vector in_shape0_; - std::vector in_shape1_; - std::vector out_shape_; -}; - -class Add : public Arithmetic { - public: - explicit Add(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::Add *GetAttribute() const { return this->primitive->value_as_Add(); } -}; - -class Mul : public Arithmetic { - public: - explicit Mul(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::Mul *GetAttribute() const { return this->primitive->value_as_Mul(); } -}; - -class Sub : public Arithmetic { - public: - explicit Sub(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::Sub *GetAttribute() const { return this->primitive->value_as_Sub(); } -}; - -class Div : public Arithmetic { - public: - explicit Div(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::Div *GetAttribute() const { return this->primitive->value_as_Div(); } -}; - -class LogicalAnd : public Arithmetic { - public: - explicit LogicalAnd(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::LogicalAnd *GetAttribute() const { return this->primitive->value_as_LogicalAnd(); } -}; - -class LogicalOr : public Arithmetic { - public: - explicit LogicalOr(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::LogicalOr *GetAttribute() const { return this->primitive->value_as_LogicalOr(); } -}; - -class Maximum : public Arithmetic { - public: - explicit Maximum(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::Maximum *GetAttribute() const { return this->primitive->value_as_Maximum(); } -}; - -class Minimum : public Arithmetic { - public: - explicit Minimum(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::Minimum *GetAttribute() const { return this->primitive->value_as_Minimum(); } -}; - -class FloorDiv : public Arithmetic { - public: - explicit FloorDiv(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::FloorDiv *GetAttribute() const { return this->primitive->value_as_FloorDiv(); } -}; - -class FloorMod : public Arithmetic { - public: - explicit FloorMod(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::FloorMod *GetAttribute() const { return this->primitive->value_as_FloorMod(); } -}; - -class SquaredDifference : public Arithmetic { - public: - explicit SquaredDifference(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::SquaredDifference *GetAttribute() const { return this->primitive->value_as_SquaredDifference(); } -}; - -class Equal : public Arithmetic { - public: - explicit Equal(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::Equal *GetAttribute() const { return this->primitive->value_as_Equal(); } -}; - -class NotEqual : public Arithmetic { - public: - explicit NotEqual(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::NotEqual *GetAttribute() const { return this->primitive->value_as_NotEqual(); } -}; - -class Less : public Arithmetic { - public: - explicit Less(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::Less *GetAttribute() const { return this->primitive->value_as_Less(); } -}; - -class LessEqual : public Arithmetic { - public: - explicit LessEqual(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::LessEqual *GetAttribute() const { return this->primitive->value_as_LessEqual(); } -}; - -class Greater : public Arithmetic { - public: - explicit Greater(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::Greater *GetAttribute() const { return this->primitive->value_as_Greater(); } -}; - -class GreaterEqual : public Arithmetic { - public: - explicit GreaterEqual(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::GreaterEqual *GetAttribute() const { return this->primitive->value_as_GreaterEqual(); } -}; - -class Eltwise : public Arithmetic { - public: - explicit Eltwise(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::Eltwise *GetAttribute() const { return this->primitive->value_as_Eltwise(); } -}; - -class ArithmeticSelf : public Primitive { - public: - explicit ArithmeticSelf(schema::Primitive *primitive) : Primitive(primitive) {} - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Abs : public ArithmeticSelf { - public: - explicit Abs(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} - const schema::Abs *GetAttribute() const { return this->primitive->value_as_Abs(); } -}; - -class Cos : public ArithmeticSelf { - public: - explicit Cos(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} - const schema::Cos *GetAttribute() const { return this->primitive->value_as_Cos(); } -}; - -class Exp : public ArithmeticSelf { - public: - explicit Exp(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} - const schema::Exp *GetAttribute() const { return this->primitive->value_as_Exp(); } -}; - -class Log : public ArithmeticSelf { - public: - explicit Log(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} - const schema::Log *GetAttribute() const { return this->primitive->value_as_Log(); } -}; - -class Square : public ArithmeticSelf { - public: - explicit Square(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} - const schema::Square *GetAttribute() const { return this->primitive->value_as_Square(); } -}; - -class Sqrt : public ArithmeticSelf { - public: - explicit Sqrt(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} - const schema::Sqrt *GetAttribute() const { return this->primitive->value_as_Sqrt(); } -}; - -class Rsqrt : public ArithmeticSelf { - public: - explicit Rsqrt(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} - const schema::Rsqrt *GetAttribute() const { return this->primitive->value_as_Rsqrt(); } -}; - -class Sin : public ArithmeticSelf { - public: - explicit Sin(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} - const schema::Sin *GetAttribute() const { return this->primitive->value_as_Sin(); } -}; - -class LogicalNot : public ArithmeticSelf { - public: - explicit LogicalNot(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} - const schema::LogicalNot *GetAttribute() const { return this->primitive->value_as_LogicalNot(); } -}; - -class Floor : public ArithmeticSelf { - public: - explicit Floor(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} - const schema::Floor *GetAttribute() const { return this->primitive->value_as_Floor(); } -}; - -class Ceil : public ArithmeticSelf { - public: - explicit Ceil(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} - const schema::Ceil *GetAttribute() const { return this->primitive->value_as_Ceil(); } -}; - -class RealDiv : public Arithmetic { - public: - explicit RealDiv(schema::Primitive *primitive) : Arithmetic(primitive) {} - const schema::RealDiv *GetAttribute() const { return this->primitive->value_as_RealDiv(); } -}; - -class BiasAdd : public Primitive { - public: - explicit BiasAdd(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::BiasAdd *GetAttribute() const { return this->primitive->value_as_BiasAdd(); } -}; - -class ExpandDims : public Primitive { - public: - explicit ExpandDims(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::ExpandDims *GetAttribute() const { return this->primitive->value_as_ExpandDims(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Unsqueeze : public Primitive { - public: - explicit Unsqueeze(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Unsqueeze *GetAttribute() const { return this->primitive->value_as_Unsqueeze(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Cast : public Primitive { - public: - explicit Cast(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Cast *GetAttribute() const { return this->primitive->value_as_Cast(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Concat : public Primitive { - public: - explicit Concat(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Concat *GetAttribute() const { return this->primitive->value_as_Concat(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Fill : public Primitive { - public: - explicit Fill(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Fill *GetAttribute() const { return this->primitive->value_as_Fill(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Mean : public Primitive { - public: - explicit Mean(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Mean *GetAttribute() const { return this->primitive->value_as_Mean(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class ArgMax : public Primitive { - public: - explicit ArgMax(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::ArgMax *GetAttribute() const { return this->primitive->value_as_ArgMax(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class ArgMin : public Primitive { - public: - explicit ArgMin(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::ArgMin *GetAttribute() const { return this->primitive->value_as_ArgMin(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class MatMul : public Primitive { - public: - explicit MatMul(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::MatMul *GetAttribute() const { return this->primitive->value_as_MatMul(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Nchw2Nhwc : public Primitive { - public: - explicit Nchw2Nhwc(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Nchw2Nhwc *GetAttribute() const { return this->primitive->value_as_Nchw2Nhwc(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Nhwc2Nchw : public Primitive { - public: - explicit Nhwc2Nchw(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Nhwc2Nchw *GetAttribute() const { return this->primitive->value_as_Nhwc2Nchw(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Rank : public Primitive { - public: - explicit Rank(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Rank *GetAttribute() const { return this->primitive->value_as_Rank(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Pad : public Primitive { - public: - explicit Pad(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Pad *GetAttribute() const { return this->primitive->value_as_Pad(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class Gather : public Primitive { - public: - explicit Gather(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Gather *GatherAttribute() const { return this->primitive->value_as_Gather(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class GatherNd : public Primitive { - public: - explicit GatherNd(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::GatherNd *GetAttribute() const { return this->primitive->value_as_GatherNd(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class Slice : public Primitive { - public: - explicit Slice(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Slice *GetAttribute() const { return this->primitive->value_as_Slice(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class BroadcastTo : public Primitive { - public: - explicit BroadcastTo(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::BroadcastTo *GetAttribute() const { return this->primitive->value_as_BroadcastTo(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class Squeeze : public Primitive { - public: - explicit Squeeze(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Squeeze *SqueezeAttribute() const { return this->primitive->value_as_Squeeze(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Transpose : public Primitive { - public: - explicit Transpose(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Transpose *GetAttribute() const { return this->primitive->value_as_Transpose(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class LocalResponseNormalization : public Primitive { - public: - explicit LocalResponseNormalization(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::LocalResponseNormalization *GetAttribute() const { - return this->primitive->value_as_LocalResponseNormalization(); - } -}; - -class Tile : public Primitive { - public: - explicit Tile(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Tile *GetAttribute() const { return this->primitive->value_as_Tile(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Reverse : public Primitive { - public: - explicit Reverse(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Reverse *GetAttribute() const { return this->primitive->value_as_Reverse(); } -}; - -class TopK : public Primitive { - public: - explicit TopK(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::TopK *GetAttribute() const { return this->primitive->value_as_TopK(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; - -class Scale : public Primitive { - public: - explicit Scale(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Scale *GetAttribute() const { return this->primitive->value_as_Scale(); } -}; - -class Stack : public Primitive { - public: - explicit Stack(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Stack *GetAttribute() const { return this->primitive->value_as_Stack(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class Unstack : public Primitive { - public: - explicit Unstack(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Unstack *GetAttribute() const { return this->primitive->value_as_Unstack(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class Unique : public Primitive { - public: - explicit Unique(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Unique *GetAttribute() const { return this->primitive->value_as_Unique(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class ReverseSequence : public Primitive { - public: - explicit ReverseSequence(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::ReverseSequence *GetAttribute() const { return this->primitive->value_as_ReverseSequence(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class DepthToSpace : public Primitive { - public: - explicit DepthToSpace(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::DepthToSpace *GetAttribute() const { return this->primitive->value_as_DepthToSpace(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class Resize : public Primitive { - public: - explicit Resize(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Resize *GetAttrbute() const { return this->primitive->value_as_Resize(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class Round : public ArithmeticSelf { - public: - explicit Round(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} - const schema::Round *GetAttribute() const { return this->primitive->value_as_Round(); } -}; - -class ZerosLike : public Primitive { - public: - explicit ZerosLike(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::ZerosLike *GetAttribute() const { return this->primitive->value_as_ZerosLike(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class Where : public Primitive { - public: - explicit Where(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Where *GetAttribute() const { return this->primitive->value_as_Where(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class BatchToSpace : public Primitive { - public: - explicit BatchToSpace(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::BatchToSpace *GetAttribute() const { return this->primitive->value_as_BatchToSpace(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class SpaceToBatch : public Primitive { - public: - explicit SpaceToBatch(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::SpaceToBatch *GetAttribute() const { return this->primitive->value_as_SpaceToBatch(); } - int InferShape(std::vector inputs, std::vector outputs) override; - std::vector BlockSizes() { return block_sizes_; } - std::vector Paddings() { return block_sizes_; } - std::vector InShape() { return block_sizes_; } - std::vector PaddedInShape() { return block_sizes_; } - - private: - std::vector block_sizes_; - std::vector paddings_; - std::vector in_shape_; - std::vector padded_in_shape_; -}; - -class Crop : public Primitive { - public: - explicit Crop(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Crop *GetAttribute() const { return this->primitive->value_as_Crop(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class Shape : public Primitive { - public: - explicit Shape(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Shape *GetAttribute() const { return this->primitive->value_as_Shape(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class ConstantOfShape : public Primitive { - public: - explicit ConstantOfShape(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::ConstantOfShape *GetAttribute() const { return this->primitive->value_as_ConstantOfShape(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class ScatterND : public Primitive { - public: - explicit ScatterND(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::ScatterND *GetAttribute() const { return this->primitive->value_as_ScatterND(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class Flatten : public Primitive { - public: - explicit Flatten(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Flatten *GetAttribute() const { return this->primitive->value_as_Flatten(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class OneHot : public Primitive { - public: - explicit OneHot(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::OneHot *GetAttribute() const { return this->primitive->value_as_OneHot(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class StridedSlice : public Primitive { - public: - explicit StridedSlice(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::StridedSlice *GetAttribute() const { return this->primitive->value_as_StridedSlice(); } - int InferShape(std::vector inputs, std::vector outputs) override; - int NDims() { return this->ndim_; } - void ApplyNewAxisMask(); - std::vector ApplyShrinkMask(std::vector out_shape); - void ApplyBeginMask(); - void ApplyEndMask(); - void ApplyEllipsisMask(); - std::vector GetInShape() { return this->in_shape_; } - std::vector GetBegins() { return this->begins_; } - std::vector GetEnds() { return this->ends_; } - std::vector GetStrides() { return this->strides_; } - - protected: - int ndim_; - std::vector in_shape_; - std::vector begins_; - std::vector ends_; - std::vector strides_; - std::vector begins_mask_; - std::vector ends_mask_; - std::vector ellipsis_mask_; - std::vector new_axis_mask_; - std::vector shrink_axis_mask_; -}; - -class PriorBox : public Primitive { - public: - explicit PriorBox(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::PriorBox *GetAttrbute() const { return this->primitive->value_as_PriorBox(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class SpaceToDepth : public Primitive { - public: - explicit SpaceToDepth(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::SpaceToDepth *GetAttribute() const { return this->primitive->value_as_SpaceToDepth(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class QuantDTypeCast : public Primitive { - public: - explicit QuantDTypeCast(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::QuantDTypeCast *GetAttribute() const { return this->primitive->value_as_QuantDTypeCast(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class Lstm : public Primitive { - public: - explicit Lstm(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Lstm *GetAttribute() const { return this->primitive->value_as_Lstm(); } - int InferShape(std::vector inputs, std::vector outputs) override; -}; - -class Elu : public Primitive { - public: - explicit Elu(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::Elu *GetAttribute() const { return this->primitive->value_as_Elu(); } -}; - -class EmbeddingLookup : public Primitive { - public: - explicit EmbeddingLookup(schema::Primitive *primitive) : Primitive(primitive) {} - const schema::EmbeddingLookup *GetAttribute() const { return this->primitive->value_as_EmbeddingLookup(); } - int InferShape(std::vector inputs_, std::vector outputs_) override; -}; -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_SRC_OPS_OPS_H_ diff --git a/mindspore/lite/src/ops/pad.cc b/mindspore/lite/src/ops/pad.cc index 109e7ab59a..e35a050b64 100644 --- a/mindspore/lite/src/ops/pad.cc +++ b/mindspore/lite/src/ops/pad.cc @@ -14,12 +14,32 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/pad.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector Pad::GetPaddings() const { return this->primitive->value.AsPad()->paddings; } +int Pad::GetPaddingMode() const { return this->primitive->value.AsPad()->paddingMode; } +float Pad::GetConstantValue() const { return this->primitive->value.AsPad()->constantValue; } + +void Pad::SetPaddings(const std::vector &paddings) { this->primitive->value.AsPad()->paddings = paddings; } +void Pad::SetPaddingMode(int padding_mode) { this->primitive->value.AsPad()->paddingMode = padding_mode; } +void Pad::SetConstantValue(float constant_value) { this->primitive->value.AsPad()->constantValue = constant_value; } + +#else + +std::vector Pad::GetPaddings() const { + auto fb_vector = this->primitive->value_as_Pad()->paddings(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +int Pad::GetPaddingMode() const { return this->primitive->value_as_Pad()->paddingMode(); } +float Pad::GetConstantValue() const { return this->primitive->value_as_Pad()->constantValue(); } + +void Pad::SetPaddings(const std::vector &paddings) {} +void Pad::SetPaddingMode(int padding_mode) {} +void Pad::SetConstantValue(float constant_value) {} +#endif namespace { const size_t kPaddingsSize = 8; const size_t kInputRank = 4; @@ -37,7 +57,6 @@ int Pad::InferShape(std::vector inputs, std::vector inputs, std::vector inputs, std::vectorset_data_type(input->data_type()); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/pad.h b/mindspore/lite/src/ops/pad.h similarity index 85% rename from mindspore/lite/c_ops/pad.h rename to mindspore/lite/src/ops/pad.h index e0b1a54138..94359d6fd9 100644 --- a/mindspore/lite/c_ops/pad.h +++ b/mindspore/lite/src/ops/pad.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_PAD_H_ namespace mindspore { +namespace lite { class Pad : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Pad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Pad(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Pad(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetPaddings() const; int GetPaddingMode() const; @@ -44,6 +42,7 @@ class Pad : public PrimitiveC { void SetPaddingMode(int padding_mode); void SetConstantValue(float constant_value); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_PAD_H_ diff --git a/mindspore/lite/c_ops/permute.cc b/mindspore/lite/src/ops/permute.cc similarity index 94% rename from mindspore/lite/c_ops/permute.cc rename to mindspore/lite/src/ops/permute.cc index 3578071fd0..ea2045243b 100644 --- a/mindspore/lite/c_ops/permute.cc +++ b/mindspore/lite/src/ops/permute.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/permute.h" +#include "src/ops/permute.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE std::vector Permute::GetOrder() const { return this->primitive->value.AsPermute()->order; } @@ -31,4 +32,5 @@ std::vector Permute::GetOrder() const { void Permute::SetOrder(const std::vector &order) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/permute.h b/mindspore/lite/src/ops/permute.h similarity index 82% rename from mindspore/lite/c_ops/permute.h rename to mindspore/lite/src/ops/permute.h index 0348a41d45..53c0bafa58 100644 --- a/mindspore/lite/c_ops/permute.h +++ b/mindspore/lite/src/ops/permute.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,16 +29,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_PERMUTE_H_ namespace mindspore { +namespace lite { class Permute : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Permute(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Permute(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Permute(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + std::vector GetOrder() const; void SetOrder(const std::vector &order); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_PERMUTE_H_ diff --git a/mindspore/lite/src/ops/pooling.cc b/mindspore/lite/src/ops/pooling.cc index 9fa8c16215..0f7807b386 100644 --- a/mindspore/lite/src/ops/pooling.cc +++ b/mindspore/lite/src/ops/pooling.cc @@ -14,12 +14,81 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/pooling.h" + +namespace mindspore { +namespace lite { + +#ifdef PRIMITIVE_WRITEABLE +int Pooling::GetFormat() const { return this->primitive->value.AsPooling()->format; } +int Pooling::GetPoolingMode() const { return this->primitive->value.AsPooling()->poolingMode; } +bool Pooling::GetGlobal() const { return this->primitive->value.AsPooling()->global; } +int Pooling::GetWindowW() const { return this->primitive->value.AsPooling()->windowW; } +int Pooling::GetWindowH() const { return this->primitive->value.AsPooling()->windowH; } +int Pooling::GetStrideW() const { return this->primitive->value.AsPooling()->strideW; } +int Pooling::GetStrideH() const { return this->primitive->value.AsPooling()->strideH; } +int Pooling::GetPadMode() const { return this->primitive->value.AsPooling()->padMode; } +int Pooling::GetPadUp() const { return this->primitive->value.AsPooling()->padUp; } +int Pooling::GetPadDown() const { return this->primitive->value.AsPooling()->padDown; } +int Pooling::GetPadLeft() const { return this->primitive->value.AsPooling()->padLeft; } +int Pooling::GetPadRight() const { return this->primitive->value.AsPooling()->padRight; } +int Pooling::GetRoundMode() const { return this->primitive->value.AsPooling()->roundMode; } + +void Pooling::SetFormat(int format) { this->primitive->value.AsPooling()->format = (schema::Format)format; } +void Pooling::SetPoolingMode(int pooling_mode) { + this->primitive->value.AsPooling()->poolingMode = (schema::PoolMode)pooling_mode; +} +void Pooling::SetGlobal(bool global) { this->primitive->value.AsPooling()->global = global; } +void Pooling::SetWindowW(int window_w) { this->primitive->value.AsPooling()->windowW = window_w; } +void Pooling::SetWindowH(int window_h) { this->primitive->value.AsPooling()->windowH = window_h; } +void Pooling::SetStrideW(int stride_w) { this->primitive->value.AsPooling()->strideW = stride_w; } +void Pooling::SetStrideH(int stride_h) { this->primitive->value.AsPooling()->strideH = stride_h; } +void Pooling::SetPadMode(int pad_mode) { this->primitive->value.AsPooling()->padMode = (schema::PadMode)pad_mode; } +void Pooling::SetPadUp(int pad_up) { this->primitive->value.AsPooling()->padUp = pad_up; } +void Pooling::SetPadDown(int pad_down) { this->primitive->value.AsPooling()->padDown = pad_down; } +void Pooling::SetPadLeft(int pad_left) { this->primitive->value.AsPooling()->padLeft = pad_left; } +void Pooling::SetPadRight(int pad_right) { this->primitive->value.AsPooling()->padRight = pad_right; } +void Pooling::SetRoundMode(int round_mode) { + this->primitive->value.AsPooling()->roundMode = (schema::RoundMode)round_mode; +} + +#else + +int Pooling::GetFormat() const { return this->primitive->value_as_Pooling()->format(); } +int Pooling::GetPoolingMode() const { return this->primitive->value_as_Pooling()->poolingMode(); } +bool Pooling::GetGlobal() const { return this->primitive->value_as_Pooling()->global(); } +int Pooling::GetWindowW() const { return this->primitive->value_as_Pooling()->windowW(); } +int Pooling::GetWindowH() const { return this->primitive->value_as_Pooling()->windowH(); } +int Pooling::GetStrideW() const { return this->primitive->value_as_Pooling()->strideW(); } +int Pooling::GetStrideH() const { return this->primitive->value_as_Pooling()->strideH(); } +int Pooling::GetPadMode() const { return this->primitive->value_as_Pooling()->padMode(); } +int Pooling::GetPadUp() const { return this->primitive->value_as_Pooling()->padUp(); } +int Pooling::GetPadDown() const { return this->primitive->value_as_Pooling()->padDown(); } +int Pooling::GetPadLeft() const { return this->primitive->value_as_Pooling()->padLeft(); } +int Pooling::GetPadRight() const { return this->primitive->value_as_Pooling()->padRight(); } +int Pooling::GetRoundMode() const { return this->primitive->value_as_Pooling()->roundMode(); } + +void Pooling::SetFormat(int format) {} +void Pooling::SetPoolingMode(int pooling_mode) {} +void Pooling::SetGlobal(bool global) {} +void Pooling::SetWindowW(int window_w) {} +void Pooling::SetWindowH(int window_h) {} +void Pooling::SetStrideW(int stride_w) {} +void Pooling::SetStrideH(int stride_h) {} +void Pooling::SetPadMode(int pad_mode) {} +void Pooling::SetPadUp(int pad_up) {} +void Pooling::SetPadDown(int pad_down) {} +void Pooling::SetPadLeft(int pad_left) {} +void Pooling::SetPadRight(int pad_right) {} +void Pooling::SetRoundMode(int round_mode) {} + +int Pooling::PadUp() const { return this->pad_u_; } +int Pooling::PadDown() const { return this->pad_d_; } +int Pooling::PadLeft() const { return this->pad_l_; } +int Pooling::PadRight() const { return this->pad_r_; } + +#endif -namespace mindspore::lite { int Pooling::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -28,7 +97,6 @@ int Pooling::InferShape(std::vector inputs_, std::vectorshape().at(1); int input_w = input->shape().at(2); - auto pooling_prim = this->primitive->value_as_Pooling(); MS_ASSERT(pooling_prim != nullptr); auto window_h = pooling_prim->windowH(); @@ -37,7 +105,6 @@ int Pooling::InferShape(std::vector inputs_, std::vectorpadLeft(); @@ -65,16 +132,15 @@ int Pooling::InferShape(std::vector inputs_, std::vectorshape(); input_shape.at(1) = output_h; input_shape.at(2) = output_w; output->set_shape(input_shape); output->set_data_type(input->data_type()); - // todo: temp fix output->SetFormat(schema::Format_NHWC); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/pooling.h b/mindspore/lite/src/ops/pooling.h similarity index 82% rename from mindspore/lite/c_ops/pooling.h rename to mindspore/lite/src/ops/pooling.h index 587a0c61e7..84e42590a6 100644 --- a/mindspore/lite/c_ops/pooling.h +++ b/mindspore/lite/src/ops/pooling.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_POOLING_H_ namespace mindspore { +namespace lite { class Pooling : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Pooling(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Pooling(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Pooling(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetPoolingMode() const; @@ -64,10 +62,10 @@ class Pooling : public PrimitiveC { void SetPadRight(int pad_right); void SetRoundMode(int round_mode); - int PadUp() const { return this->pad_u_; } - int PadDown() const { return this->pad_d_; } - int PadLeft() const { return this->pad_l_; } - int PadRight() const { return this->pad_r_; } + int PadUp() const; + int PadDown() const; + int PadLeft() const; + int PadRight() const; protected: int pad_u_ = 0; @@ -75,6 +73,7 @@ class Pooling : public PrimitiveC { int pad_l_ = 0; int pad_r_ = 0; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_POOLING_H_ diff --git a/mindspore/lite/c_ops/pooling_grad.cc b/mindspore/lite/src/ops/pooling_grad.cc similarity index 98% rename from mindspore/lite/c_ops/pooling_grad.cc rename to mindspore/lite/src/ops/pooling_grad.cc index 93cb55ec6b..345c5d04da 100644 --- a/mindspore/lite/c_ops/pooling_grad.cc +++ b/mindspore/lite/src/ops/pooling_grad.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/pooling_grad.h" +#include "src/ops/pooling_grad.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int PoolingGrad::GetFormat() const { return this->primitive->value.AsPoolingGrad()->format; } int PoolingGrad::GetPoolingMode() const { return this->primitive->value.AsPoolingGrad()->poolingMode; } @@ -82,4 +83,5 @@ void PoolingGrad::SetPadLeft(int pad_left) {} void PoolingGrad::SetPadRight(int pad_right) {} void PoolingGrad::SetRoundMode(int round_mode) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/pooling_grad.h b/mindspore/lite/src/ops/pooling_grad.h similarity index 88% rename from mindspore/lite/c_ops/pooling_grad.h rename to mindspore/lite/src/ops/pooling_grad.h index 4b0f5d3ea5..617a3a62d5 100644 --- a/mindspore/lite/c_ops/pooling_grad.h +++ b/mindspore/lite/src/ops/pooling_grad.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_POOLING_GRAD_H_ namespace mindspore { +namespace lite { class PoolingGrad : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit PoolingGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit PoolingGrad(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit PoolingGrad(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int GetFormat() const; int GetPoolingMode() const; bool GetGlobal() const; @@ -63,6 +61,7 @@ class PoolingGrad : public PrimitiveC { void SetPadRight(int pad_right); void SetRoundMode(int round_mode); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_POOLING_GRAD_H_ diff --git a/mindspore/lite/src/ops/power.cc b/mindspore/lite/src/ops/power.cc index 91630b7edc..7f9aeb6796 100644 --- a/mindspore/lite/src/ops/power.cc +++ b/mindspore/lite/src/ops/power.cc @@ -13,13 +13,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" -namespace mindspore::lite { +#include "src/ops/power.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +float Power::GetPower() const { return this->primitive->value.AsPower()->power; } +float Power::GetScale() const { return this->primitive->value.AsPower()->scale; } +float Power::GetShift() const { return this->primitive->value.AsPower()->shift; } + +void Power::SetPower(float power) { this->primitive->value.AsPower()->power = power; } +void Power::SetScale(float scale) { this->primitive->value.AsPower()->scale = scale; } +void Power::SetShift(float shift) { this->primitive->value.AsPower()->shift = shift; } + +#else + +float Power::GetPower() const { return this->primitive->value_as_Power()->power(); } +float Power::GetScale() const { return this->primitive->value_as_Power()->scale(); } +float Power::GetShift() const { return this->primitive->value_as_Power()->shift(); } + +void Power::SetPower(float power) {} +void Power::SetScale(float scale) {} +void Power::SetShift(float shift) {} +#endif + int Power::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive != nullptr); auto x_tensor = inputs[0]; @@ -37,10 +55,10 @@ int Power::InferShape(std::vector inputs, std::vectorSetFormat(x_tensor->GetFormat()); output_tensor->set_shape(x_tensor->shape()); output_tensor->set_data_type(x_tensor->data_type()); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/power.h b/mindspore/lite/src/ops/power.h similarity index 84% rename from mindspore/lite/c_ops/power.h rename to mindspore/lite/src/ops/power.h index 923194c97e..cb68962659 100644 --- a/mindspore/lite/c_ops/power.h +++ b/mindspore/lite/src/ops/power.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_POWER_H_ namespace mindspore { +namespace lite { class Power : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Power(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Power(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Power(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; float GetPower() const; float GetScale() const; @@ -44,6 +42,7 @@ class Power : public PrimitiveC { void SetScale(float scale); void SetShift(float shift); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_POWER_H_ diff --git a/mindspore/lite/c_ops/power_grad.cc b/mindspore/lite/src/ops/power_grad.cc similarity index 95% rename from mindspore/lite/c_ops/power_grad.cc rename to mindspore/lite/src/ops/power_grad.cc index b8cc0711b9..b898221ae2 100644 --- a/mindspore/lite/c_ops/power_grad.cc +++ b/mindspore/lite/src/ops/power_grad.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/power_grad.h" +#include "src/ops/power_grad.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE float PowerGrad::GetPower() const { return this->primitive->value.AsPowerGrad()->power; } float PowerGrad::GetScale() const { return this->primitive->value.AsPowerGrad()->scale; } @@ -36,4 +37,5 @@ void PowerGrad::SetPower(float power) {} void PowerGrad::SetScale(float scale) {} void PowerGrad::SetShift(float shift) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/power_grad.h b/mindspore/lite/src/ops/power_grad.h similarity index 83% rename from mindspore/lite/c_ops/power_grad.h rename to mindspore/lite/src/ops/power_grad.h index e5952cb87b..bb55b93521 100644 --- a/mindspore/lite/c_ops/power_grad.h +++ b/mindspore/lite/src/ops/power_grad.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_POWER_GRAD_H_ namespace mindspore { +namespace lite { class PowerGrad : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit PowerGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit PowerGrad(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit PowerGrad(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + float GetPower() const; float GetScale() const; float GetShift() const; @@ -43,6 +41,7 @@ class PowerGrad : public PrimitiveC { void SetScale(float scale); void SetShift(float shift); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_POWER_GRAD_H_ diff --git a/mindspore/lite/c_ops/prelu.cc b/mindspore/lite/src/ops/prelu.cc similarity index 94% rename from mindspore/lite/c_ops/prelu.cc rename to mindspore/lite/src/ops/prelu.cc index 3433939ee9..f0c53e8cd3 100644 --- a/mindspore/lite/c_ops/prelu.cc +++ b/mindspore/lite/src/ops/prelu.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/prelu.h" +#include "src/ops/prelu.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE std::vector Prelu::GetSlope() const { return this->primitive->value.AsPrelu()->slope; } @@ -31,4 +32,5 @@ std::vector Prelu::GetSlope() const { void Prelu::SetSlope(const std::vector &slope) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/prelu.h b/mindspore/lite/src/ops/prelu.h similarity index 81% rename from mindspore/lite/c_ops/prelu.h rename to mindspore/lite/src/ops/prelu.h index c7b5910c2b..87bd188b21 100644 --- a/mindspore/lite/c_ops/prelu.h +++ b/mindspore/lite/src/ops/prelu.h @@ -18,8 +18,9 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" -#include "c_ops/activation.h" +#include "src/ops/primitive_c.h" +#include "src/ops/activation.h" + #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -30,16 +31,14 @@ #define LITE_MINDSPORE_LITE_C_OPS_PRELU_H_ namespace mindspore { +namespace lite { class Prelu : public Activation { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Prelu(schema::PrimitiveT *primitive) : Activation(primitive) {} -#else - explicit Prelu(schema::Primitive *primitive) : Activation(primitive) {} -#endif + explicit Prelu(OriginPrimitive *primitive) : Activation(primitive) {} + std::vector GetSlope() const; void SetSlope(const std::vector &slope); }; +} // namespace lite } // namespace mindspore - #endif // LITE_MINDSPORE_LITE_C_OPS_PRELU_H_ diff --git a/mindspore/lite/src/ops/primitive_c.cc b/mindspore/lite/src/ops/primitive_c.cc new file mode 100644 index 0000000000..cd5cd35fca --- /dev/null +++ b/mindspore/lite/src/ops/primitive_c.cc @@ -0,0 +1,273 @@ +/** + * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). + * + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/ops/primitive_c.h" +#include "src/ops/space_to_batch.h" +#include "src/ops/conv2d.h" +#include "src/ops/roi_pooling.h" +#include "src/ops/topk.h" +#include "src/ops/broadcast_to.h" +#include "src/ops/unsqueeze.h" +#include "src/ops/unstack.h" +#include "src/ops/depth_to_space.h" +#include "src/ops/batch_to_space.h" +#include "src/ops/prior_box.h" +#include "src/ops/lstm.h" +#include "src/ops/softmax.h" +#include "src/ops/activation.h" +#include "src/ops/deconv2d.h" +#include "src/ops/reduce.h" +#include "src/ops/pooling.h" +#include "src/ops/fused_batchnorm.h" +#include "src/ops/batch_norm.h" +#include "src/ops/power.h" +#include "src/ops/range.h" +#include "src/ops/add.h" +#include "src/ops/sub.h" +#include "src/ops/div.h" +#include "src/ops/bias_add.h" +#include "src/ops/expand_dims.h" +#include "src/ops/full_connection.h" +#include "src/ops/shape.h" +#include "src/ops/elu.h" +#include "src/ops/embedding_lookup.h" +#include "src/ops/quant_dtype_cast.h" +#include "src/ops/matmul.h" +#include "src/ops/resize.h" +#include "src/ops/tile.h" +#include "src/ops/one_hot.h" +#include "src/ops/space_to_depth.h" +#include "src/ops/split.h" +#include "src/ops/argmax.h" +#include "src/ops/argmin.h" +#include "src/ops/cast.h" +#include "src/ops/reshape.h" +#include "src/ops/scale.h" +#include "src/ops/concat.h" +#include "src/ops/nchw2nhwc.h" +#include "src/ops/slice.h" +#include "src/ops/squeeze.h" +#include "src/ops/flatten.h" +#include "src/ops/mean.h" +#include "src/ops/nhwc2nchw.h" +#include "src/ops/stack.h" +#include "src/ops/crop.h" +#include "src/ops/addn.h" +#include "src/ops/gather.h" +#include "src/ops/gather_nd.h" +#include "src/ops/local_response_normalization.h" +#include "src/ops/pad.h" +#include "src/ops/prelu.h" +#include "src/ops/caffe_p_relu.h" +#include "src/ops/reverse_sequence.h" +#include "src/ops/dedepthwise_conv2d.h" +#include "src/ops/depthwise_conv2d.h" +#include "src/ops/mul.h" +#include "src/ops/eltwise.h" +#include "src/ops/fill.h" +#include "src/ops/transpose.h" +#include "src/ops/log.h" +#include "src/ops/abs.h" +#include "src/ops/sin.h" +#include "src/ops/cos.h" +#include "src/ops/sqrt.h" +#include "src/ops/square.h" +#include "src/ops/exp.h" +#include "src/ops/rsqrt.h" +#include "src/ops/maximum.h" +#include "src/ops/minimum.h" +#include "src/ops/strided_slice.h" +#include "src/ops/reverse.h" +#include "src/ops/logical_and.h" +#include "src/ops/logical_or.h" +#include "src/ops/logical_not.h" +#include "src/ops/floor_div.h" +#include "src/ops/floor_mod.h" +#include "src/ops/equal.h" +#include "src/ops/not_equal.h" +#include "src/ops/less.h" +#include "src/ops/less_equal.h" +#include "src/ops/greater_equal.h" +#include "src/ops/greater.h" +#include "src/ops/floor.h" +#include "src/ops/squared_difference.h" +#include "src/ops/ceil.h" +#include "src/ops/round.h" +#include "src/ops/unique.h" +#include "src/ops/zeros_like.h" +#include "src/ops/where.h" +#include "src/ops/scatter_nd.h" +#include "src/ops/constant_of_shape.h" + +namespace mindspore { +namespace lite { +int PrimitiveC::InferShape(std::vector inputs_, std::vector outputs_) { + auto input = inputs_.front(); + MS_ASSERT(input != nullptr); + auto output = outputs_.front(); + MS_ASSERT(output != nullptr); + output->set_shape(input->shape()); + output->set_data_type(input->data_type()); + output->SetFormat(input->GetFormat()); + return 0; +} +int PrimitiveC::Type() const { +#ifdef PRIMITIVE_WRITEABLE + return this->primitive->value.type; +#else + return this->primitive->value_type(); +#endif +} + +bool PrimitiveC::GetInferFlag() const { return this->infer_flag_; } + +void PrimitiveC::SetInferFlag(bool flag) { this->infer_flag_ = flag; } + +PrimitiveC *PrimitiveC::CreatePrimitive(OriginPrimitive *primitive) { + MS_ASSERT(primitive != nullptr); + auto op_type = primitive->value_type(); + switch (op_type) { + case schema::PrimitiveType_SoftMax: + return new SoftMax(const_cast(primitive)); + case schema::PrimitiveType_Activation: + return new Activation(const_cast(primitive)); + case schema::PrimitiveType_Conv2D: + return new Conv2D(const_cast(primitive)); + case schema::PrimitiveType_Reduce: + return new Reduce(const_cast(primitive)); + case schema::PrimitiveType_Pooling: + return new Pooling(const_cast(primitive)); + case schema::PrimitiveType_ROIPooling: + return new ROIPooling(const_cast(primitive)); + case schema::PrimitiveType_DepthwiseConv2D: + return new DepthwiseConv2D(const_cast(primitive)); + case schema::PrimitiveType_FusedBatchNorm: + return new FusedBatchNorm(const_cast(primitive)); + case schema::PrimitiveType_BatchNorm: + return new BatchNorm(const_cast(primitive)); + case schema::PrimitiveType_FullConnection: + return new FullConnection(const_cast(primitive)); + case schema::PrimitiveType_Power: + return new Power(const_cast(primitive)); + case schema::PrimitiveType_Pad: + return new Pad(const_cast(primitive)); + case schema::PrimitiveType_Range: + return new Range(const_cast(primitive)); + case schema::PrimitiveType_Mul: + return new Mul(const_cast(primitive)); + case schema::PrimitiveType_Add: + return new Add(const_cast(primitive)); + case schema::PrimitiveType_Sub: + return new Sub(const_cast(primitive)); + case schema::PrimitiveType_Div: + return new Div(const_cast(primitive)); + case schema::PrimitiveType_BiasAdd: + return new BiasAdd(const_cast(primitive)); + case schema::PrimitiveType_ExpandDims: + return new ExpandDims(const_cast(primitive)); + case schema::PrimitiveType_ArgMax: + return new ArgMax(const_cast(primitive)); + case schema::PrimitiveType_ArgMin: + return new ArgMin(const_cast(primitive)); + case schema::PrimitiveType_Cast: + return new Cast(const_cast(primitive)); + case schema::PrimitiveType_Reshape: + return new Reshape(const_cast(primitive)); + case schema::PrimitiveType_Eltwise: + return new Eltwise(const_cast(primitive)); + case schema::PrimitiveType_Ceil: + return new Ceil(const_cast(primitive)); + case schema::PrimitiveType_Concat: + return new Concat(const_cast(primitive)); + case schema::PrimitiveType_Fill: + return new Fill(const_cast(primitive)); + case schema::PrimitiveType_Nhwc2Nchw: + return new Nhwc2Nchw(const_cast(primitive)); + case schema::PrimitiveType_Nchw2Nhwc: + return new Nchw2Nhwc(const_cast(primitive)); + case schema::PrimitiveType_Transpose: + return new Transpose(const_cast(primitive)); + case schema::PrimitiveType_Squeeze: + return new Squeeze(const_cast(primitive)); + case schema::PrimitiveType_SquaredDifference: + return new SquaredDifference(const_cast(primitive)); + case schema::PrimitiveType_Split: + return new Split(const_cast(primitive)); + case schema::PrimitiveType_FloorDiv: + return new FloorDiv(const_cast(primitive)); + case schema::PrimitiveType_FloorMod: + return new FloorMod(const_cast(primitive)); + case schema::PrimitiveType_Reverse: + return new Reverse(const_cast(primitive)); + case schema::PrimitiveType_Scale: + return new Scale(const_cast(primitive)); + case schema::PrimitiveType_GatherNd: + return new GatherNd(const_cast(primitive)); + case schema::PrimitiveType_Tile: + return new Tile(const_cast(primitive)); + case schema::PrimitiveType_TopK: + return new TopK(const_cast(primitive)); + case schema::PrimitiveType_Unique: + return new Unique(const_cast(primitive)); + case schema::PrimitiveType_Unstack: + return new Unstack(const_cast(primitive)); + case schema::PrimitiveType_ReverseSequence: + return new ReverseSequence(const_cast(primitive)); + case schema::PrimitiveType_Round: + return new Round(const_cast(primitive)); + case schema::PrimitiveType_ZerosLike: + return new ZerosLike(const_cast(primitive)); + case schema::PrimitiveType_Where: + return new Where(const_cast(primitive)); + case schema::PrimitiveType_Floor: + return new Floor(const_cast(primitive)); + case schema::PrimitiveType_Shape: + return new Shape(const_cast(primitive)); + case schema::PrimitiveType_ScatterND: + return new ScatterND(const_cast(primitive)); + case schema::PrimitiveType_Unsqueeze: + return new Unsqueeze(const_cast(primitive)); + case schema::PrimitiveType_Flatten: + return new Flatten(const_cast(primitive)); + case schema::PrimitiveType_StridedSlice: + return new StridedSlice(const_cast(primitive)); + case schema::PrimitiveType_Resize: + return new Resize(const_cast(primitive)); + case schema::PrimitiveType_OneHot: + return new OneHot(const_cast(primitive)); + case schema::PrimitiveType_PriorBox: + return new PriorBox(const_cast(primitive)); + case schema::PrimitiveType_SpaceToDepth: + return new SpaceToDepth(const_cast(primitive)); + case schema::PrimitiveType_SpaceToBatch: + return new SpaceToBatch(const_cast(primitive)); + case schema::PrimitiveType_QuantDTypeCast: + return new QuantDTypeCast(const_cast(primitive)); + case schema::PrimitiveType_MatMul: + return new MatMul(const_cast(primitive)); + case schema::PrimitiveType_EmbeddingLookup: + return new EmbeddingLookup(const_cast(primitive)); + case schema::PrimitiveType_ConstantOfShape: + return new ConstantOfShape(const_cast(primitive)); + default: + break; + } + return nullptr; +} +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/primitive_c.h b/mindspore/lite/src/ops/primitive_c.h similarity index 53% rename from mindspore/lite/c_ops/primitive_c.h rename to mindspore/lite/src/ops/primitive_c.h index 363aed120a..72a030074f 100644 --- a/mindspore/lite/c_ops/primitive_c.h +++ b/mindspore/lite/src/ops/primitive_c.h @@ -22,52 +22,49 @@ #include #include #include "src/ir/tensor.h" -#include "ir/primitive.h" -#include "ir/value.h" +#include "include/errorcode.h" +#include "utils/log_adapter.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" +using OriginPrimitive = mindspore::schema::PrimitiveT; #else #include "schema/model_generated.h" +using OriginPrimitive = mindspore::schema::Primitive; #endif + namespace mindspore { +namespace lite { constexpr uint32_t kSingleNum = 1; constexpr uint32_t kDoubleNum = 2; constexpr uint32_t kMultiNum = 3; constexpr uint32_t kDimension_4d = 4; -enum NCHW_SHAPE { NCHW_N = 0, NCHW_C = 1, NCHW_H = 2, NCHW_W = 3 }; -enum NHWC_SHAPE { NHWC_N = 0, NHWC_H = 1, NHWC_W = 2, NHWC_C = 3 }; -enum HWCK_SHAPE { HWCK_H = 0, HWCK_W = 1, HWCK_C = 2, HWCK_K = 3 }; -enum HWKC_SHAPE { HWKC_H = 0, HWKC_W = 1, HWKC_K = 2, HWKC_C = 3 }; -enum KCHW_SHAPE { KCHW_K = 0, KCHW_C = 1, KCHW_H = 2, KCHW_W = 3 }; -enum CKHW_SHAPE { CKHW_C = 0, CKHW_K = 1, CKHW_H = 2, CKHW_W = 3 }; -enum CHWK_SHAPE { CHWK_C = 0, CHWK_H = 1, CHWK_W = 2, CHWK_K = 3 }; -enum KHWC_SHAPE { KHWC_K = 0, KHWC_H = 1, KHWC_W = 2, KHWC_C = 3 }; const std::set kSupportDataType = {kNumberTypeUInt8, kNumberTypeInt32, kNumberTypeFloat32}; -class PrimitiveC : public Primitive { +// #if LITE_OPTIMIZE +class PrimitiveC { public: - explicit PrimitiveC(const std::string &name) : Primitive(name) {} + PrimitiveC() = default; + + explicit PrimitiveC(OriginPrimitive *primitive) : primitive(primitive) {} + + static PrimitiveC *CreatePrimitive(OriginPrimitive *primitive); -#ifdef PRIMITIVE_WRITEABLE - explicit PrimitiveC(schema::PrimitiveT *primitive) : Primitive(""), primitive(primitive) {} -#else - explicit PrimitiveC(schema::Primitive *primitive) : Primitive(""), primitive(primitive) {} -#endif - static Primitive *CreatePrimitive(schema::Primitive *primitive); virtual ~PrimitiveC() {} - const bool GetInferFlag() const { return this->infer_flag_; } - void SetInferFlag(bool flag) { this->infer_flag_ = flag; } - virtual int InferShape(std::vector inputs_, std::vector outputs_) = 0; + + bool GetInferFlag() const; + + void SetInferFlag(bool flag); + + virtual int InferShape(std::vector inputs_, std::vector outputs_); + + int Type() const; protected: -#ifdef PRIMITIVE_WRITEABLE - schema::PrimitiveT *primitive; -#else - schema::Primitive *primitive; -#endif + OriginPrimitive *primitive; bool infer_flag_ = true; }; +} // namespace lite } // namespace mindspore #endif // MINDSPORE_CORE_C_OPS_PRIMITIVE_C_H_ diff --git a/mindspore/lite/src/ops/prior_box.cc b/mindspore/lite/src/ops/prior_box.cc index 96e5b65413..bd7b921897 100644 --- a/mindspore/lite/src/ops/prior_box.cc +++ b/mindspore/lite/src/ops/prior_box.cc @@ -14,30 +14,98 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/prior_box.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector PriorBox::GetMinSizes() const { return this->primitive->value.AsPriorBox()->max_sizes; } +std::vector PriorBox::GetMaxSizes() const { return this->primitive->value.AsPriorBox()->max_sizes; } +std::vector PriorBox::GetAspectRatios() const { return this->primitive->value.AsPriorBox()->aspect_ratios; } +std::vector PriorBox::GetVariances() const { return this->primitive->value.AsPriorBox()->variances; } +int PriorBox::GetImageSizeW() const { return this->primitive->value.AsPriorBox()->image_size_w; } +int PriorBox::GetImageSizeH() const { return this->primitive->value.AsPriorBox()->image_size_h; } +float PriorBox::GetStepW() const { return this->primitive->value.AsPriorBox()->step_w; } +float PriorBox::GetStepH() const { return this->primitive->value.AsPriorBox()->step_h; } +bool PriorBox::GetClip() const { return this->primitive->value.AsPriorBox()->clip; } +bool PriorBox::GetFlip() const { return this->primitive->value.AsPriorBox()->flip; } +float PriorBox::GetOffset() const { return this->primitive->value.AsPriorBox()->offset; } + +void PriorBox::SetMinSizes(const std::vector &min_sizes) { + this->primitive->value.AsPriorBox()->min_sizes = min_sizes; +} +void PriorBox::SetMaxSizes(const std::vector &max_sizes) { + this->primitive->value.AsPriorBox()->max_sizes = max_sizes; +} +void PriorBox::SetAspectRatios(const std::vector &aspect_ratios) { + this->primitive->value.AsPriorBox()->aspect_ratios = aspect_ratios; +} +void PriorBox::SetVariances(const std::vector &variances) { + this->primitive->value.AsPriorBox()->variances = variances; +} +void PriorBox::SetImageSizeW(int image_size_w) { this->primitive->value.AsPriorBox()->image_size_w = image_size_w; } +void PriorBox::SetImageSizeH(int image_size_h) { this->primitive->value.AsPriorBox()->image_size_h = image_size_h; } +void PriorBox::SetStepW(float step_w) { this->primitive->value.AsPriorBox()->step_w = step_w; } +void PriorBox::SetStepH(float step_h) { this->primitive->value.AsPriorBox()->step_h = step_h; } +void PriorBox::SetClip(bool clip) { this->primitive->value.AsPriorBox()->clip = clip; } +void PriorBox::SetFlip(bool flip) { this->primitive->value.AsPriorBox()->flip = flip; } +void PriorBox::SetOffset(float offset) { this->primitive->value.AsPriorBox()->offset = offset; } + +#else + +std::vector PriorBox::GetMinSizes() const { + auto fb_vector = this->primitive->value_as_PriorBox()->min_sizes(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +std::vector PriorBox::GetMaxSizes() const { + auto fb_vector = this->primitive->value_as_PriorBox()->max_sizes(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +std::vector PriorBox::GetAspectRatios() const { + auto fb_vector = this->primitive->value_as_PriorBox()->aspect_ratios(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +std::vector PriorBox::GetVariances() const { + auto fb_vector = this->primitive->value_as_PriorBox()->variances(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +int PriorBox::GetImageSizeW() const { return this->primitive->value_as_PriorBox()->image_size_w(); } +int PriorBox::GetImageSizeH() const { return this->primitive->value_as_PriorBox()->image_size_h(); } +float PriorBox::GetStepW() const { return this->primitive->value_as_PriorBox()->step_w(); } +float PriorBox::GetStepH() const { return this->primitive->value_as_PriorBox()->step_h(); } +bool PriorBox::GetClip() const { return this->primitive->value_as_PriorBox()->clip(); } +bool PriorBox::GetFlip() const { return this->primitive->value_as_PriorBox()->flip(); } +float PriorBox::GetOffset() const { return this->primitive->value_as_PriorBox()->offset(); } + +void PriorBox::SetMinSizes(const std::vector &min_sizes) {} +void PriorBox::SetMaxSizes(const std::vector &max_sizes) {} +void PriorBox::SetAspectRatios(const std::vector &aspect_ratios) {} +void PriorBox::SetVariances(const std::vector &variances) {} +void PriorBox::SetImageSizeW(int image_size_w) {} +void PriorBox::SetImageSizeH(int image_size_h) {} +void PriorBox::SetStepW(float step_w) {} +void PriorBox::SetStepH(float step_h) {} +void PriorBox::SetClip(bool clip) {} +void PriorBox::SetFlip(bool flip) {} +void PriorBox::SetOffset(float offset) {} +#endif -namespace mindspore::lite { namespace { constexpr int kPriorBoxPoints = 4; constexpr int kPriorBoxN = 1; constexpr int kPriorBoxW = 1; constexpr int kPriorBoxC = 2; } // namespace - int PriorBox::InferShape(std::vector inputs_, std::vector outputs_) { - auto param = GetAttrbute(); + auto param = this->primitive->value_as_PriorBox(); MS_ASSERT(param != nullptr); std::vector different_aspect_ratios{1.0f}; auto aspect_ratios = param->aspect_ratios(); MS_ASSERT(aspect_ratios != nullptr); for (auto i = 0; i < aspect_ratios->size(); i++) { float ratio = (*aspect_ratios)[i]; - bool exist = std::any_of(different_aspect_ratios.begin(), different_aspect_ratios.end(), [&](float v) { - return abs(ratio - v) < 1e-6; - }); + bool exist = std::any_of(different_aspect_ratios.begin(), different_aspect_ratios.end(), + [&](float v) { return abs(ratio - v) < 1e-6; }); if (!exist) { different_aspect_ratios.emplace_back(ratio); if (param->flip()) { @@ -49,14 +117,13 @@ int PriorBox::InferShape(std::vector inputs_, std::vectorHeight() * input->Width() * num_priors_box * kPriorBoxPoints; - std::vector output_shape{kPriorBoxN, h, kPriorBoxW, kPriorBoxC}; auto output = outputs_.at(0); MS_ASSERT(output != nullptr); - output->set_shape(output_shape); output->set_data_type(kNumberTypeFloat32); output->SetFormat(input->GetFormat()); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/prior_box.h b/mindspore/lite/src/ops/prior_box.h similarity index 89% rename from mindspore/lite/c_ops/prior_box.h rename to mindspore/lite/src/ops/prior_box.h index e0d72a5a8b..78eaafc849 100644 --- a/mindspore/lite/c_ops/prior_box.h +++ b/mindspore/lite/src/ops/prior_box.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_PRIOR_BOX_H_ namespace mindspore { +namespace lite { class PriorBox : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit PriorBox(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit PriorBox(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit PriorBox(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetMinSizes() const; std::vector GetMaxSizes() const; @@ -60,6 +58,7 @@ class PriorBox : public PrimitiveC { void SetFlip(bool flip); void SetOffset(float offset); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_PRIOR_BOX_H_ diff --git a/mindspore/lite/src/ops/quant_dtype_cast.cc b/mindspore/lite/src/ops/quant_dtype_cast.cc index 82a9fa6548..50cd868c6d 100644 --- a/mindspore/lite/src/ops/quant_dtype_cast.cc +++ b/mindspore/lite/src/ops/quant_dtype_cast.cc @@ -14,12 +14,26 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/quant_dtype_cast.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int QuantDTypeCast::GetSrcT() const { return this->primitive->value.AsQuantDTypeCast()->srcT; } +int QuantDTypeCast::GetDstT() const { return this->primitive->value.AsQuantDTypeCast()->dstT; } + +void QuantDTypeCast::SetSrcT(int src_t) { this->primitive->value.AsQuantDTypeCast()->srcT = src_t; } +void QuantDTypeCast::SetDstT(int dst_t) { this->primitive->value.AsQuantDTypeCast()->dstT = dst_t; } + +#else + +int QuantDTypeCast::GetSrcT() const { return this->primitive->value_as_QuantDTypeCast()->srcT(); } +int QuantDTypeCast::GetDstT() const { return this->primitive->value_as_QuantDTypeCast()->dstT(); } + +void QuantDTypeCast::SetSrcT(int src_t) {} +void QuantDTypeCast::SetDstT(int dst_t) {} +#endif -namespace mindspore::lite { int QuantDTypeCast::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -33,4 +47,5 @@ int QuantDTypeCast::InferShape(std::vector inputs_, std::vecto output->SetFormat(input->GetFormat()); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/quant_dtype_cast.h b/mindspore/lite/src/ops/quant_dtype_cast.h similarity index 83% rename from mindspore/lite/c_ops/quant_dtype_cast.h rename to mindspore/lite/src/ops/quant_dtype_cast.h index e7e128e9e2..2970ac5e37 100644 --- a/mindspore/lite/c_ops/quant_dtype_cast.h +++ b/mindspore/lite/src/ops/quant_dtype_cast.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_QUANT_D_TYPE_CAST_H_ namespace mindspore { +namespace lite { class QuantDTypeCast : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit QuantDTypeCast(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit QuantDTypeCast(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit QuantDTypeCast(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetSrcT() const; int GetDstT() const; void SetSrcT(int src_t); void SetDstT(int dst_t); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_QUANT_D_TYPE_CAST_H_ diff --git a/mindspore/lite/src/ops/range.cc b/mindspore/lite/src/ops/range.cc index 53180a8d51..29173b35f5 100644 --- a/mindspore/lite/src/ops/range.cc +++ b/mindspore/lite/src/ops/range.cc @@ -14,12 +14,34 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/range.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int Range::GetDType() const { return this->primitive->value.AsRange()->dType; } +int Range::GetStart() const { return this->primitive->value.AsRange()->start; } +int Range::GetLimit() const { return this->primitive->value.AsRange()->limit; } +int Range::GetDelta() const { return this->primitive->value.AsRange()->delta; } + +void Range::SetDType(int d_type) { this->primitive->value.AsRange()->dType = d_type; } +void Range::SetStart(int start) { this->primitive->value.AsRange()->start = start; } +void Range::SetLimit(int limit) { this->primitive->value.AsRange()->limit = limit; } +void Range::SetDelta(int delta) { this->primitive->value.AsRange()->delta = delta; } + +#else + +int Range::GetDType() const { return this->primitive->value_as_Range()->dType(); } +int Range::GetStart() const { return this->primitive->value_as_Range()->start(); } +int Range::GetLimit() const { return this->primitive->value_as_Range()->limit(); } +int Range::GetDelta() const { return this->primitive->value_as_Range()->delta(); } + +void Range::SetDType(int d_type) {} +void Range::SetStart(int start) {} +void Range::SetLimit(int limit) {} +void Range::SetDelta(int delta) {} +#endif -namespace mindspore::lite { int Range::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -28,14 +50,13 @@ int Range::InferShape(std::vector inputs_, std::vectorprimitive->value_as_Range(); MS_ASSERT(range_prim != nullptr); - int shape_size = std::ceil(static_cast(range_prim->limit() - range_prim->start()) / range_prim->delta()); std::vector in_shape(1); in_shape.push_back(shape_size); output->set_shape(in_shape); output->set_data_type(input->data_type()); output->SetFormat(input->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/range.h b/mindspore/lite/src/ops/range.h similarity index 85% rename from mindspore/lite/c_ops/range.h rename to mindspore/lite/src/ops/range.h index 7317d20880..237c764b2f 100644 --- a/mindspore/lite/c_ops/range.h +++ b/mindspore/lite/src/ops/range.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_RANGE_H_ namespace mindspore { +namespace lite { class Range : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Range(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Range(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Range(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetDType() const; int GetStart() const; @@ -46,6 +44,7 @@ class Range : public PrimitiveC { void SetLimit(int limit); void SetDelta(int delta); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_RANGE_H_ diff --git a/mindspore/lite/src/ops/rank.cc b/mindspore/lite/src/ops/rank.cc index 5939396d16..5ee331bc76 100644 --- a/mindspore/lite/src/ops/rank.cc +++ b/mindspore/lite/src/ops/rank.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,12 +14,11 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/rank.h" + +namespace mindspore { +namespace lite { -namespace mindspore::lite { int Rank::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -30,7 +29,7 @@ int Rank::InferShape(std::vector inputs_, std::vectorset_shape(in_shape); output->set_data_type(input->data_type()); output->SetFormat(input->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/rank.h b/mindspore/lite/src/ops/rank.h similarity index 74% rename from mindspore/lite/c_ops/rank.h rename to mindspore/lite/src/ops/rank.h index bd593e37e1..3c979f9d54 100644 --- a/mindspore/lite/c_ops/rank.h +++ b/mindspore/lite/src/ops/rank.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,15 +29,14 @@ #define LITE_MINDSPORE_LITE_C_OPS_RANK_H_ namespace mindspore { +namespace lite { class Rank : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Rank(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Rank(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Rank(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_RANK_H_ diff --git a/mindspore/lite/src/ops/reduce.cc b/mindspore/lite/src/ops/reduce.cc index 2edd135d8b..652d962ade 100644 --- a/mindspore/lite/src/ops/reduce.cc +++ b/mindspore/lite/src/ops/reduce.cc @@ -14,12 +14,33 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/reduce.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector Reduce::GetAxes() const { return this->primitive->value.AsReduce()->axes; } +int Reduce::GetKeepDims() const { return this->primitive->value.AsReduce()->keepDims; } +int Reduce::GetMode() const { return this->primitive->value.AsReduce()->mode; } + +void Reduce::SetAxes(const std::vector &axes) { this->primitive->value.AsReduce()->axes = axes; } +void Reduce::SetKeepDims(int keep_dims) { this->primitive->value.AsReduce()->keepDims = keep_dims; } +void Reduce::SetMode(int mode) { this->primitive->value.AsReduce()->mode = (schema::ReduceMode)mode; } + +#else + +std::vector Reduce::GetAxes() const { + auto fb_vector = this->primitive->value_as_Reduce()->axes(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +int Reduce::GetKeepDims() const { return this->primitive->value_as_Reduce()->keepDims(); } +int Reduce::GetMode() const { return this->primitive->value_as_Reduce()->mode(); } + +void Reduce::SetAxes(const std::vector &axes) {} +void Reduce::SetKeepDims(int keep_dims) {} +void Reduce::SetMode(int mode) {} +#endif -namespace mindspore::lite { namespace { constexpr size_t kInputSize = 1; constexpr size_t kOutputSize = 1; @@ -58,7 +79,6 @@ int Reduce::InferShape(std::vector inputs_, std::vectorset_data_type(input->data_type()); return RET_OK; } - // reduce on selected axes for (size_t i = 0; i < in_shape.size(); i++) { bool reduce_axis = false; @@ -77,7 +97,7 @@ int Reduce::InferShape(std::vector inputs_, std::vectorset_shape(out_shape); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/reduce.h b/mindspore/lite/src/ops/reduce.h similarity index 85% rename from mindspore/lite/c_ops/reduce.h rename to mindspore/lite/src/ops/reduce.h index 0b50be1219..5cce3a8fb6 100644 --- a/mindspore/lite/c_ops/reduce.h +++ b/mindspore/lite/src/ops/reduce.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_REDUCE_H_ namespace mindspore { +namespace lite { class Reduce : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Reduce(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Reduce(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Reduce(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetAxes() const; int GetKeepDims() const; @@ -44,6 +42,7 @@ class Reduce : public PrimitiveC { void SetKeepDims(int keep_dims); void SetMode(int mode); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_REDUCE_H_ diff --git a/mindspore/lite/src/ops/reshape.cc b/mindspore/lite/src/ops/reshape.cc index 1794daed85..e9662f22b0 100644 --- a/mindspore/lite/src/ops/reshape.cc +++ b/mindspore/lite/src/ops/reshape.cc @@ -14,18 +14,38 @@ * limitations under the License. */ -#include "src/ops/ops.h" +#include "src/ops/reshape.h" +#include #include "include/errorcode.h" #include "utils/log_adapter.h" #include "src/ir/tensor.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int Reshape::GetFormat() const { return this->primitive->value.AsReshape()->format; } +std::vector Reshape::GetShape() const { return this->primitive->value.AsReshape()->shape; } + +void Reshape::SetFormat(int format) { this->primitive->value.AsReshape()->format = format; } +void Reshape::SetShape(const std::vector &shape) { this->primitive->value.AsReshape()->shape = shape; } + +#else + +int Reshape::GetFormat() const { return this->primitive->value_as_Reshape()->format(); } +std::vector Reshape::GetShape() const { + auto fb_vector = this->primitive->value_as_Reshape()->shape(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void Reshape::SetFormat(int format) {} +void Reshape::SetShape(const std::vector &shape) {} +#endif + int Reshape::CalNewShape(const tensor::Tensor *in_tensor, std::vector *out_shape) const { size_t in_shape_size = 1; for (size_t i = 0; i < in_tensor->shape().size(); i++) { in_shape_size *= in_tensor->shape()[i]; } - int64_t inferIndex = -1; size_t out_shapeSize = 1; for (size_t i = 0; i < out_shape->size(); i++) { @@ -46,7 +66,6 @@ int Reshape::CalNewShape(const tensor::Tensor *in_tensor, std::vector *out_ out_shapeSize *= out_shape->at(i); } } - if (inferIndex == -1 && out_shapeSize != in_shape_size) { MS_LOG(ERROR) << "output shapeSize: " << out_shapeSize << " should be equal to input shapeSize: " << in_shape_size; return RET_INFER_ERR; @@ -56,11 +75,9 @@ int Reshape::CalNewShape(const tensor::Tensor *in_tensor, std::vector *out_ } return RET_OK; } - template void CalShape(const T *data, const std::vector &inputs, std::vector *out_shape, int shape_size) { int input_count = inputs[0]->ElementsNum(); - int index = 0; int size = 1; for (size_t i = 0; i < shape_size; i++) { @@ -75,7 +92,6 @@ void CalShape(const T *data, const std::vector &inputs, std::v (*out_shape)[index] = input_count / size; } } - int Reshape::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -89,7 +105,6 @@ int Reshape::InferShape(std::vector inputs_, std::vectorprimitive->value_as_Reshape(); MS_ASSERT(reshape_prim != nullptr); - std::vector out_shape; if (inputs_.size() == kDoubleNum) { auto shape_tensor = inputs_.at(1); @@ -130,14 +145,13 @@ int Reshape::InferShape(std::vector inputs_, std::vectorset_shape(out_shape); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/reshape.h b/mindspore/lite/src/ops/reshape.h similarity index 85% rename from mindspore/lite/c_ops/reshape.h rename to mindspore/lite/src/ops/reshape.h index 7fa1244a2d..c7f77974f6 100644 --- a/mindspore/lite/c_ops/reshape.h +++ b/mindspore/lite/src/ops/reshape.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_RESHAPE_H_ namespace mindspore { +namespace lite { class Reshape : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Reshape(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Reshape(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Reshape(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; std::vector GetShape() const; @@ -45,6 +43,7 @@ class Reshape : public PrimitiveC { private: int CalNewShape(const lite::tensor::Tensor *in_tensor, std::vector *out_shape) const; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_RESHAPE_H_ diff --git a/mindspore/lite/src/ops/resize.cc b/mindspore/lite/src/ops/resize.cc index 3ceae2ea1d..be21df3a8c 100644 --- a/mindspore/lite/src/ops/resize.cc +++ b/mindspore/lite/src/ops/resize.cc @@ -14,31 +14,60 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" -#include "src/runtime/kernel/arm/nnacl/op_base.h" +#include "src/ops/resize.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int Resize::GetFormat() const { return this->primitive->value.AsResize()->format; } +int Resize::GetMethod() const { return this->primitive->value.AsResize()->method; } +long Resize::GetNewHeight() const { return this->primitive->value.AsResize()->newHeight; } +long Resize::GetNewWidth() const { return this->primitive->value.AsResize()->newWidth; } +bool Resize::GetAlignCorners() const { return this->primitive->value.AsResize()->alignCorners; } +bool Resize::GetPreserveAspectRatio() const { return this->primitive->value.AsResize()->preserveAspectRatio; } + +void Resize::SetFormat(int format) { this->primitive->value.AsResize()->format = (schema::Format)format; } +void Resize::SetMethod(int method) { this->primitive->value.AsResize()->method = (schema::ResizeMethod)method; } +void Resize::SetNewHeight(long new_height) { this->primitive->value.AsResize()->newHeight = new_height; } +void Resize::SetNewWidth(long new_width) { this->primitive->value.AsResize()->newWidth = new_width; } +void Resize::SetAlignCorners(bool align_corners) { this->primitive->value.AsResize()->alignCorners = align_corners; } +void Resize::SetPreserveAspectRatio(bool preserve_aspect_ratio) { + this->primitive->value.AsResize()->preserveAspectRatio = preserve_aspect_ratio; +} + +#else + +int Resize::GetFormat() const { return this->primitive->value_as_Resize()->format(); } +int Resize::GetMethod() const { return this->primitive->value_as_Resize()->method(); } +long Resize::GetNewHeight() const { return this->primitive->value_as_Resize()->newHeight(); } +long Resize::GetNewWidth() const { return this->primitive->value_as_Resize()->newWidth(); } +bool Resize::GetAlignCorners() const { return this->primitive->value_as_Resize()->alignCorners(); } +bool Resize::GetPreserveAspectRatio() const { return this->primitive->value_as_Resize()->preserveAspectRatio(); } + +void Resize::SetFormat(int format) {} +void Resize::SetMethod(int method) {} +void Resize::SetNewHeight(long new_height) {} +void Resize::SetNewWidth(long new_width) {} +void Resize::SetAlignCorners(bool align_corners) {} +void Resize::SetPreserveAspectRatio(bool preserve_aspect_ratio) {} +#endif namespace { constexpr int kInputRank = 4; } // namespace -int Resize::InferShape(std::vector inputs_, std::vector outputs_) { +int Resize::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); if (input == nullptr) { - return RET_NULL_PTR; + return 1; } MS_ASSERT(input->shape().size() == kInputRank); auto output = outputs_.front(); if (output == nullptr) { - return RET_NULL_PTR; + return 1; } - auto resize = GetAttrbute(); - auto new_height = resize->newHeight(); - auto new_width = resize->newWidth(); + auto new_height = GetNewHeight(); + auto new_width = GetNewWidth(); std::vector output_shape; output_shape.push_back(input->Batch()); @@ -49,6 +78,7 @@ int Resize::InferShape(std::vector inputs_, std::vectorset_data_type(input->data_type()); output->SetFormat(input->GetFormat()); - return RET_OK; + return 0; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/resize.h b/mindspore/lite/src/ops/resize.h similarity index 86% rename from mindspore/lite/c_ops/resize.h rename to mindspore/lite/src/ops/resize.h index b04c6fceea..0f365a7c06 100644 --- a/mindspore/lite/c_ops/resize.h +++ b/mindspore/lite/src/ops/resize.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_RESIZE_H_ namespace mindspore { +namespace lite { class Resize : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Resize(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Resize(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Resize(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; int GetMethod() const; @@ -50,6 +48,7 @@ class Resize : public PrimitiveC { void SetAlignCorners(bool align_corners); void SetPreserveAspectRatio(bool preserve_aspect_ratio); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_RESIZE_H_ diff --git a/mindspore/lite/c_ops/reverse.cc b/mindspore/lite/src/ops/reverse.cc similarity index 94% rename from mindspore/lite/c_ops/reverse.cc rename to mindspore/lite/src/ops/reverse.cc index fc8c161cd7..ad941dc705 100644 --- a/mindspore/lite/c_ops/reverse.cc +++ b/mindspore/lite/src/ops/reverse.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/reverse.h" +#include "src/ops/reverse.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE std::vector Reverse::GetAxis() const { return this->primitive->value.AsReverse()->axis; } @@ -31,4 +32,5 @@ std::vector Reverse::GetAxis() const { void Reverse::SetAxis(const std::vector &axis) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/reverse.h b/mindspore/lite/src/ops/reverse.h similarity index 82% rename from mindspore/lite/c_ops/reverse.h rename to mindspore/lite/src/ops/reverse.h index b8ac1fa151..44c45c6843 100644 --- a/mindspore/lite/c_ops/reverse.h +++ b/mindspore/lite/src/ops/reverse.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,16 +29,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_REVERSE_H_ namespace mindspore { +namespace lite { class Reverse : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Reverse(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Reverse(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Reverse(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + std::vector GetAxis() const; void SetAxis(const std::vector &axis); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_REVERSE_H_ diff --git a/mindspore/lite/src/ops/reverse_sequence.cc b/mindspore/lite/src/ops/reverse_sequence.cc index e7ff1e7e62..f0512cd522 100644 --- a/mindspore/lite/src/ops/reverse_sequence.cc +++ b/mindspore/lite/src/ops/reverse_sequence.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,22 +14,48 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/reverse_sequence.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int ReverseSequence::GetSeqAxis() const { return this->primitive->value.AsReverseSequence()->seqAxis; } +int ReverseSequence::GetBatchAxis() const { return this->primitive->value.AsReverseSequence()->batchAxis; } +std::vector ReverseSequence::GetSeqLengths() const { + return this->primitive->value.AsReverseSequence()->seqLengths; +} + +void ReverseSequence::SetSeqAxis(int seq_axis) { this->primitive->value.AsReverseSequence()->seqAxis = seq_axis; } +void ReverseSequence::SetBatchAxis(int batch_axis) { + this->primitive->value.AsReverseSequence()->batchAxis = batch_axis; +} +void ReverseSequence::SetSeqLengths(const std::vector &seq_lengths) { + this->primitive->value.AsReverseSequence()->seqLengths = seq_lengths; +} + +#else + +int ReverseSequence::GetSeqAxis() const { return this->primitive->value_as_ReverseSequence()->seqAxis(); } +int ReverseSequence::GetBatchAxis() const { return this->primitive->value_as_ReverseSequence()->batchAxis(); } +std::vector ReverseSequence::GetSeqLengths() const { + auto fb_vector = this->primitive->value_as_ReverseSequence()->seqLengths(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void ReverseSequence::SetSeqAxis(int seq_axis) {} +void ReverseSequence::SetBatchAxis(int batch_axis) {} +void ReverseSequence::SetSeqLengths(const std::vector &seq_lengths) {} +#endif -namespace mindspore::lite { int ReverseSequence::InferShape(std::vector inputs, std::vector outputs) { auto input = inputs.front(); auto output = outputs.front(); MS_ASSERT(input != nullptr); MS_ASSERT(output != nullptr); - output->set_shape(input->shape()); output->set_data_type(input->data_type()); output->SetFormat(input->GetFormat()); return RET_OK; } -} // namespace mindspore::lite - +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/reverse_sequence.h b/mindspore/lite/src/ops/reverse_sequence.h similarity index 84% rename from mindspore/lite/c_ops/reverse_sequence.h rename to mindspore/lite/src/ops/reverse_sequence.h index 962d125873..95c798da02 100644 --- a/mindspore/lite/c_ops/reverse_sequence.h +++ b/mindspore/lite/src/ops/reverse_sequence.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_REVERSE_SEQUENCE_H_ namespace mindspore { +namespace lite { class ReverseSequence : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit ReverseSequence(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit ReverseSequence(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit ReverseSequence(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetSeqAxis() const; int GetBatchAxis() const; @@ -44,6 +42,7 @@ class ReverseSequence : public PrimitiveC { void SetBatchAxis(int batch_axis); void SetSeqLengths(const std::vector &seq_lengths); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_REVERSE_SEQUENCE_H_ diff --git a/mindspore/lite/src/ops/roi_pooling.cc b/mindspore/lite/src/ops/roi_pooling.cc index ed73ac0a1e..114c10c2b2 100644 --- a/mindspore/lite/src/ops/roi_pooling.cc +++ b/mindspore/lite/src/ops/roi_pooling.cc @@ -14,12 +14,30 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/roi_pooling.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int ROIPooling::GetPooledH() const { return this->primitive->value.AsROIPooling()->pooledH; } +int ROIPooling::GetPooledW() const { return this->primitive->value.AsROIPooling()->pooledW; } +float ROIPooling::GetScale() const { return this->primitive->value.AsROIPooling()->scale; } + +void ROIPooling::SetPooledH(int pooled_h) { this->primitive->value.AsROIPooling()->pooledH = pooled_h; } +void ROIPooling::SetPooledW(int pooled_w) { this->primitive->value.AsROIPooling()->pooledW = pooled_w; } +void ROIPooling::SetScale(float scale) { this->primitive->value.AsROIPooling()->scale = scale; } + +#else + +int ROIPooling::GetPooledH() const { return this->primitive->value_as_ROIPooling()->pooledH(); } +int ROIPooling::GetPooledW() const { return this->primitive->value_as_ROIPooling()->pooledW(); } +float ROIPooling::GetScale() const { return this->primitive->value_as_ROIPooling()->scale(); } + +void ROIPooling::SetPooledH(int pooled_h) {} +void ROIPooling::SetPooledW(int pooled_w) {} +void ROIPooling::SetScale(float scale) {} +#endif -namespace mindspore::lite { int ROIPooling::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); if (inputs_.size() != kDoubleNum) { @@ -38,12 +56,10 @@ int ROIPooling::InferShape(std::vector inputs_, std::vectorprimitive->value_as_ROIPooling(); auto new_h = ROIPooling->pooledH(); auto new_w = ROIPooling->pooledW(); - auto shape_data = roi->shape(); - std::vector output_shape; output_shape.push_back(shape_data[0]); output_shape.push_back(new_h); @@ -52,7 +68,7 @@ int ROIPooling::InferShape(std::vector inputs_, std::vectorset_shape(output_shape); output->set_data_type(input->data_type()); output->SetFormat(input->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/roi_pooling.h b/mindspore/lite/src/ops/roi_pooling.h similarity index 84% rename from mindspore/lite/c_ops/roi_pooling.h rename to mindspore/lite/src/ops/roi_pooling.h index 16f833c2ac..9daa506f0b 100644 --- a/mindspore/lite/c_ops/roi_pooling.h +++ b/mindspore/lite/src/ops/roi_pooling.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_R_O_I_POOLING_H_ namespace mindspore { +namespace lite { class ROIPooling : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit ROIPooling(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit ROIPooling(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit ROIPooling(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetPooledH() const; int GetPooledW() const; @@ -44,6 +42,7 @@ class ROIPooling : public PrimitiveC { void SetPooledW(int pooled_w); void SetScale(float scale); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_R_O_I_POOLING_H_ diff --git a/mindspore/lite/src/ops/round.h b/mindspore/lite/src/ops/round.h new file mode 100644 index 0000000000..48740d9412 --- /dev/null +++ b/mindspore/lite/src/ops/round.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "ir/dtype/type_id.h" +#include "src/ops/arithmetic_self.h" +#ifdef PRIMITIVE_WRITEABLE +#include "schema/inner/model_generated.h" +#else +#include "schema/model_generated.h" +#endif + +#ifndef LITE_MINDSPORE_LITE_C_OPS_ROUND_H_ +#define LITE_MINDSPORE_LITE_C_OPS_ROUND_H_ + +namespace mindspore { +namespace lite { +class Round : public ArithmeticSelf { + public: + explicit Round(OriginPrimitive *primitive) : ArithmeticSelf(primitive) {} +}; +} // namespace lite +} // namespace mindspore + +#endif // LITE_MINDSPORE_LITE_C_OPS_ROUND_H_ diff --git a/mindspore/lite/c_ops/rsqrt.h b/mindspore/lite/src/ops/rsqrt.h similarity index 72% rename from mindspore/lite/c_ops/rsqrt.h rename to mindspore/lite/src/ops/rsqrt.h index f108e2db86..6f27497e93 100644 --- a/mindspore/lite/c_ops/rsqrt.h +++ b/mindspore/lite/src/ops/rsqrt.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic_self.h" +#include "src/ops/arithmetic_self.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_RSQRT_H_ namespace mindspore { +namespace lite { class Rsqrt : public ArithmeticSelf { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Rsqrt(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - explicit Rsqrt(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} -#endif + explicit Rsqrt(OriginPrimitive *primitive) : ArithmeticSelf(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_RSQRT_H_ diff --git a/mindspore/lite/c_ops/scale.cc b/mindspore/lite/src/ops/scale.cc similarity index 93% rename from mindspore/lite/c_ops/scale.cc rename to mindspore/lite/src/ops/scale.cc index 4b5e82c741..13cefbe69f 100644 --- a/mindspore/lite/c_ops/scale.cc +++ b/mindspore/lite/src/ops/scale.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/scale.h" +#include "src/ops/scale.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int Scale::GetAxis() const { return this->primitive->value.AsScale()->axis; } @@ -28,4 +29,5 @@ int Scale::GetAxis() const { return this->primitive->value_as_Scale()->axis(); } void Scale::SetAxis(int axis) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/scale.h b/mindspore/lite/src/ops/scale.h similarity index 82% rename from mindspore/lite/c_ops/scale.h rename to mindspore/lite/src/ops/scale.h index 6c6daff345..2940cb4700 100644 --- a/mindspore/lite/c_ops/scale.h +++ b/mindspore/lite/src/ops/scale.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,16 +29,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_SCALE_H_ namespace mindspore { +namespace lite { class Scale : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Scale(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Scale(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Scale(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int GetAxis() const; void SetAxis(int axis); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SCALE_H_ diff --git a/mindspore/lite/src/ops/scatter_nd.cc b/mindspore/lite/src/ops/scatter_nd.cc index 5384edbf91..0fa21c636a 100644 --- a/mindspore/lite/src/ops/scatter_nd.cc +++ b/mindspore/lite/src/ops/scatter_nd.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,12 +14,11 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/scatter_nd.h" + +namespace mindspore { +namespace lite { -namespace mindspore::lite { namespace { constexpr int kScatterNDInputNum = 3; constexpr int kScatterNDOutputNum = 1; @@ -27,7 +26,6 @@ constexpr int kScatterShapeIndex = 0; constexpr int kScatterIndicesIndex = 1; constexpr int kScatterUpdateIndex = 2; } // namespace - int ScatterND::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != kScatterNDInputNum) { MS_LOG(ERROR) << "inputs number is not equal to " << kScatterNDInputNum; @@ -58,7 +56,7 @@ int ScatterND::InferShape(std::vector inputs_, std::vectorset_shape(out_shape); output->set_data_type(update->data_type()); output->SetFormat(update->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/scatter_nd.h b/mindspore/lite/src/ops/scatter_nd.h similarity index 74% rename from mindspore/lite/c_ops/scatter_nd.h rename to mindspore/lite/src/ops/scatter_nd.h index 3ce4b86dd6..cd5eef9b9f 100644 --- a/mindspore/lite/c_ops/scatter_nd.h +++ b/mindspore/lite/src/ops/scatter_nd.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,15 +29,14 @@ #define LITE_MINDSPORE_LITE_C_OPS_SCATTER_ND_H_ namespace mindspore { +namespace lite { class ScatterND : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit ScatterND(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit ScatterND(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit ScatterND(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SCATTER_ND_H_ diff --git a/mindspore/lite/src/ops/shape.cc b/mindspore/lite/src/ops/shape.cc index e5aef734c6..20e72f08e0 100644 --- a/mindspore/lite/src/ops/shape.cc +++ b/mindspore/lite/src/ops/shape.cc @@ -14,16 +14,17 @@ * limitations under the License. */ -#include "src/ops/ops.h" +#include "src/ops/shape.h" #include "include/errorcode.h" #include "utils/log_adapter.h" #include "src/ir/tensor.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { + namespace { constexpr int kShapeInputNum = 1; constexpr int kShapeOutputNum = 1; - } // namespace int Shape::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != kShapeInputNum) { @@ -34,7 +35,6 @@ int Shape::InferShape(std::vector inputs_, std::vectorset_data_type(kNumberTypeInt32); @@ -42,20 +42,17 @@ int Shape::InferShape(std::vector inputs_, std::vector out_shape; out_shape.push_back(static_cast(in_tensor->shape().size())); - auto ret_shape = out_tensor->set_shape(out_shape); if (ret_shape != 1 || size_t(out_tensor->shape()[0]) != in_tensor->shape().size()) { MS_LOG(ERROR) << "Set shape fails."; return RET_ERROR; } - return RET_OK; } -} // namespace mindspore::lite - +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/shape.h b/mindspore/lite/src/ops/shape.h similarity index 74% rename from mindspore/lite/c_ops/shape.h rename to mindspore/lite/src/ops/shape.h index 06ab67591d..f96e8cb7ea 100644 --- a/mindspore/lite/c_ops/shape.h +++ b/mindspore/lite/src/ops/shape.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,15 +29,14 @@ #define LITE_MINDSPORE_LITE_C_OPS_SHAPE_H_ namespace mindspore { +namespace lite { class Shape : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Shape(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Shape(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Shape(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SHAPE_H_ diff --git a/mindspore/lite/c_ops/sin.h b/mindspore/lite/src/ops/sin.h similarity index 72% rename from mindspore/lite/c_ops/sin.h rename to mindspore/lite/src/ops/sin.h index ba93a69045..d9753f7871 100644 --- a/mindspore/lite/c_ops/sin.h +++ b/mindspore/lite/src/ops/sin.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic_self.h" +#include "src/ops/arithmetic_self.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_SIN_H_ namespace mindspore { +namespace lite { class Sin : public ArithmeticSelf { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Sin(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - explicit Sin(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} -#endif + explicit Sin(OriginPrimitive *primitive) : ArithmeticSelf(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SIN_H_ diff --git a/mindspore/lite/src/ops/slice.cc b/mindspore/lite/src/ops/slice.cc index f2c4be8921..3190bb57dd 100644 --- a/mindspore/lite/src/ops/slice.cc +++ b/mindspore/lite/src/ops/slice.cc @@ -14,18 +14,44 @@ * limitations under the License. */ -#include "src/ops/ops.h" +#include "src/ops/slice.h" #include "include/errorcode.h" #include "utils/log_adapter.h" #include "src/ir/tensor.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { namespace { constexpr int kSliceInputNum = 1; constexpr int kSliceOutputNum = 1; } // namespace +#ifdef PRIMITIVE_WRITEABLE +int SliceOp::GetFormat() const { return this->primitive->value.AsSlice()->format; } +std::vector SliceOp::GetBegin() const { return this->primitive->value.AsSlice()->begin; } +std::vector SliceOp::GetSize() const { return this->primitive->value.AsSlice()->size; } -int Slice::InferShape(std::vector inputs, std::vector outputs) { +void SliceOp::SetFormat(int format) { this->primitive->value.AsSlice()->format = format; } +void SliceOp::SetBegin(const std::vector &begin) { this->primitive->value.AsSlice()->begin = begin; } +void SliceOp::SetSize(const std::vector &size) { this->primitive->value.AsSlice()->size = size; } + +#else + +int SliceOp::GetFormat() const { return this->primitive->value_as_Slice()->format(); } +std::vector SliceOp::GetBegin() const { + auto fb_vector = this->primitive->value_as_Slice()->begin(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +std::vector SliceOp::GetSize() const { + auto fb_vector = this->primitive->value_as_Slice()->size(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void SliceOp::SetFormat(int format) {} +void SliceOp::SetBegin(const std::vector &begin) {} +void SliceOp::SetSize(const std::vector &size) {} +#endif + +int SliceOp::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive != nullptr); if (inputs.size() != kSliceInputNum || outputs.size() != kSliceOutputNum) { MS_LOG(ERROR) << "input size:" << inputs.size() << ",output size:" << outputs.size(); @@ -38,9 +64,8 @@ int Slice::InferShape(std::vector inputs, std::vectorshape(); - auto slice_prim = this->primitive->value_as_Slice(); - std::vector slice_begin(slice_prim->begin()->begin(), slice_prim->begin()->end()); - std::vector slice_size(slice_prim->size()->begin(), slice_prim->size()->end()); + std::vector slice_begin(GetBegin().begin(), GetBegin().end()); + std::vector slice_size(GetSize().begin(), GetSize().end()); std::vector output_shape(input_shape.size()); for (int i = 0; i < input_shape.size(); ++i) { if (slice_size[i] < 0 && slice_size[i] != -1) { @@ -68,4 +93,5 @@ int Slice::InferShape(std::vector inputs, std::vectorset_shape(output_shape); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/slice.h b/mindspore/lite/src/ops/slice.h similarity index 85% rename from mindspore/lite/c_ops/slice.h rename to mindspore/lite/src/ops/slice.h index 7ac75f8d12..2710a321ba 100644 --- a/mindspore/lite/c_ops/slice.h +++ b/mindspore/lite/src/ops/slice.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_SLICE_H_ namespace mindspore { +namespace lite { class SliceOp : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit SliceOp(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit SliceOp(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit SliceOp(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetFormat() const; std::vector GetBegin() const; @@ -44,6 +42,6 @@ class SliceOp : public PrimitiveC { void SetBegin(const std::vector &begin); void SetSize(const std::vector &size); }; +} // namespace lite } // namespace mindspore - #endif // LITE_MINDSPORE_LITE_C_OPS_SLICE_H_ diff --git a/mindspore/lite/src/ops/softmax.cc b/mindspore/lite/src/ops/softmax.cc index 3772b1bb64..338f0a5375 100644 --- a/mindspore/lite/src/ops/softmax.cc +++ b/mindspore/lite/src/ops/softmax.cc @@ -14,12 +14,22 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/softmax.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int SoftMax::GetAxis() const { return this->primitive->value.AsSoftMax()->axis; } + +void SoftMax::SetAxis(int axis) { this->primitive->value.AsSoftMax()->axis = axis; } + +#else + +int SoftMax::GetAxis() const { return this->primitive->value_as_SoftMax()->axis(); } + +void SoftMax::SetAxis(int axis) {} +#endif -namespace mindspore::lite { int SoftMax::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -32,7 +42,7 @@ int SoftMax::InferShape(std::vector inputs_, std::vectorset_shape(input->shape()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/softmax.h b/mindspore/lite/src/ops/softmax.h similarity index 83% rename from mindspore/lite/c_ops/softmax.h rename to mindspore/lite/src/ops/softmax.h index e1bb81838f..96b439edf3 100644 --- a/mindspore/lite/c_ops/softmax.h +++ b/mindspore/lite/src/ops/softmax.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_SOFT_MAX_H_ namespace mindspore { +namespace lite { class SoftMax : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit SoftMax(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit SoftMax(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit SoftMax(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; void SetAxis(int axis); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SOFT_MAX_H_ diff --git a/mindspore/lite/c_ops/softmax_cross_entropy.cc b/mindspore/lite/src/ops/softmax_cross_entropy.cc similarity index 93% rename from mindspore/lite/c_ops/softmax_cross_entropy.cc rename to mindspore/lite/src/ops/softmax_cross_entropy.cc index 53827142f4..9e647e5c67 100644 --- a/mindspore/lite/c_ops/softmax_cross_entropy.cc +++ b/mindspore/lite/src/ops/softmax_cross_entropy.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/softmax_cross_entropy.h" +#include "src/ops/softmax_cross_entropy.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE std::vector SoftmaxCrossEntropy::GetAxis() const { return this->primitive->value.AsSoftmaxCrossEntropy()->axis; } @@ -33,4 +34,5 @@ std::vector SoftmaxCrossEntropy::GetAxis() const { void SoftmaxCrossEntropy::SetAxis(const std::vector &axis) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/softmax_cross_entropy.h b/mindspore/lite/src/ops/softmax_cross_entropy.h similarity index 81% rename from mindspore/lite/c_ops/softmax_cross_entropy.h rename to mindspore/lite/src/ops/softmax_cross_entropy.h index 327591692f..d3816cdd88 100644 --- a/mindspore/lite/c_ops/softmax_cross_entropy.h +++ b/mindspore/lite/src/ops/softmax_cross_entropy.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,16 +29,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_SOFTMAX_CROSS_ENTROPY_H_ namespace mindspore { +namespace lite { class SoftmaxCrossEntropy : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit SoftmaxCrossEntropy(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit SoftmaxCrossEntropy(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit SoftmaxCrossEntropy(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + std::vector GetAxis() const; void SetAxis(const std::vector &axis); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SOFTMAX_CROSS_ENTROPY_H_ diff --git a/mindspore/lite/src/ops/space_to_batch.cc b/mindspore/lite/src/ops/space_to_batch.cc index c3cecc75d9..08d411f4c6 100644 --- a/mindspore/lite/src/ops/space_to_batch.cc +++ b/mindspore/lite/src/ops/space_to_batch.cc @@ -14,13 +14,36 @@ * limitations under the License. */ -#include -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/space_to_batch.h" +#include "src/common/common.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector SpaceToBatch::GetBlockShape() const { return this->primitive->value.AsSpaceToBatch()->blockShape; } +std::vector SpaceToBatch::GetPaddings() const { return this->primitive->value.AsSpaceToBatch()->paddings; } + +void SpaceToBatch::SetBlockShape(const std::vector &block_shape) { + this->primitive->value.AsSpaceToBatch()->blockShape = block_shape; +} +void SpaceToBatch::SetPaddings(const std::vector &paddings) { + this->primitive->value.AsSpaceToBatch()->paddings = paddings; +} + +#else + +std::vector SpaceToBatch::GetBlockShape() const { + auto fb_vector = this->primitive->value_as_SpaceToBatch()->blockShape(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +std::vector SpaceToBatch::GetPaddings() const { + auto fb_vector = this->primitive->value_as_SpaceToBatch()->paddings(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void SpaceToBatch::SetBlockShape(const std::vector &block_shape) {} +void SpaceToBatch::SetPaddings(const std::vector &paddings) {} +#endif namespace { constexpr int kSpaceToBatchNDOutputNum = 1; constexpr int kSpaceToBatchNDInputNum = 1; @@ -32,39 +55,38 @@ int SpaceToBatch::InferShape(std::vector inputs, std::ve MS_ASSERT(this->primitive != nullptr); if (outputs.size() != kSpaceToBatchNDOutputNum || inputs.size() != kSpaceToBatchNDInputNum) { MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return RET_PARAM_INVALID; + return 1; } auto input = inputs.at(0); if (input->GetFormat() != schema::Format_NHWC) { MS_LOG(ERROR) << "space_to_batch only support NHWC now!"; - return RET_FORMAT_ERR; + return 1; } auto input_shape = input->shape(); if (input_shape.size() != kDimension_4d) { MS_LOG(ERROR) << "input shape dimension size should == " << kDimension_4d; - return RET_PARAM_INVALID; + return 1; } - auto prim = this->primitive->value_as_SpaceToBatch(); - if (prim->blockShape()->size() != kBlockSizesSize) { + if (GetBlockShape().size() != kBlockSizesSize) { MS_LOG(ERROR) << "Block shape size should be " << kBlockSizesSize; - return RET_PARAM_INVALID; + return 1; } - if (prim->paddings()->size() != kPaddingsSize) { + if (GetPaddings().size() != kPaddingsSize) { MS_LOG(ERROR) << "Crops size should be " << kPaddingsSize; - return RET_PARAM_INVALID; + return 1; } - for (auto iter = prim->blockShape()->begin(); iter != prim->blockShape()->end(); ++iter) { - block_sizes_.emplace_back(*iter); + for (int &iter : GetBlockShape()) { + block_sizes_.emplace_back(iter); } in_shape_.clear(); padded_in_shape_.clear(); paddings_.clear(); - in_shape_.emplace_back(input_shape.at(kNHWC_n_index)); - padded_in_shape_.emplace_back(input_shape.at(kNHWC_n_index)); + in_shape_.emplace_back(input_shape.at(NHWC_N)); + padded_in_shape_.emplace_back(input_shape.at(NHWC_N)); for (int i = 0; i < kBlockSizesSize; i++) { in_shape_.emplace_back(input_shape.at(i + 1)); padded_in_shape_.emplace_back(input_shape.at(i + 1) + (paddings_.at(2 * i) + paddings_.at(2 * i + 1))); @@ -72,20 +94,20 @@ int SpaceToBatch::InferShape(std::vector inputs, std::ve paddings_.emplace_back(paddings_.at(2 * i + 1)); if (paddings_.back() % block_sizes_.at(i)) { MS_LOG(ERROR) << "Padded shape does not divide block size " << block_sizes_.at(i); - return RET_PARAM_INVALID; + return 1; } } - in_shape_.emplace_back(input_shape.at(kNHWC_c_index)); - padded_in_shape_.emplace_back(input_shape.at(kNHWC_c_index)); + in_shape_.emplace_back(input_shape.at(NHWC_C)); + padded_in_shape_.emplace_back(input_shape.at(NHWC_C)); std::vector output_shape(input_shape.size()); - output_shape[kNHWC_n_index] = - input_shape[kNHWC_n_index] * (block_sizes_[kNHWC_n_index] * block_sizes_[kNHWC_h_index]); - output_shape[kNHWC_h_index] = input_shape[kNHWC_h_index] / block_sizes_[kNHWC_n_index]; - output_shape[kNHWC_w_index] = input_shape[kNHWC_w_index] / block_sizes_[kNHWC_h_index]; - output_shape[kNHWC_c_index] = input_shape[kNHWC_c_index]; + output_shape[NHWC_N] = input_shape[NHWC_N] * (block_sizes_[NHWC_N] * block_sizes_[NHWC_H]); + output_shape[NHWC_H] = input_shape[NHWC_H] / block_sizes_[NHWC_N]; + output_shape[NHWC_W] = input_shape[NHWC_W] / block_sizes_[NHWC_H]; + output_shape[NHWC_C] = input_shape[NHWC_C]; outputs[0]->set_shape(output_shape); outputs[0]->set_data_type(input->data_type()); - return RET_OK; + return 0; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/space_to_batch.h b/mindspore/lite/src/ops/space_to_batch.h similarity index 87% rename from mindspore/lite/c_ops/space_to_batch.h rename to mindspore/lite/src/ops/space_to_batch.h index 0f5d7ee45d..4402dc3ddd 100644 --- a/mindspore/lite/c_ops/space_to_batch.h +++ b/mindspore/lite/src/ops/space_to_batch.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_BATCH_H_ namespace mindspore { +namespace lite { class SpaceToBatch : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit SpaceToBatch(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit SpaceToBatch(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit SpaceToBatch(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetBlockShape() const; std::vector GetPaddings() const; @@ -53,6 +51,7 @@ class SpaceToBatch : public PrimitiveC { std::vector in_shape_; std::vector padded_in_shape_; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_BATCH_H_ diff --git a/mindspore/lite/c_ops/space_to_batch_nd.cc b/mindspore/lite/src/ops/space_to_batch_nd.cc similarity index 95% rename from mindspore/lite/c_ops/space_to_batch_nd.cc rename to mindspore/lite/src/ops/space_to_batch_nd.cc index 02e4231786..2f6e14ddde 100644 --- a/mindspore/lite/c_ops/space_to_batch_nd.cc +++ b/mindspore/lite/src/ops/space_to_batch_nd.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/space_to_batch_nd.h" +#include "src/ops/space_to_batch_nd.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE std::vector SpaceToBatchND::GetBlockShape() const { return this->primitive->value.AsSpaceToBatchND()->blockShape; } std::vector SpaceToBatchND::GetPaddings() const { return this->primitive->value.AsSpaceToBatchND()->paddings; } @@ -42,4 +43,5 @@ std::vector SpaceToBatchND::GetPaddings() const { void SpaceToBatchND::SetBlockShape(const std::vector &block_shape) {} void SpaceToBatchND::SetPaddings(const std::vector &paddings) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/space_to_batch_nd.h b/mindspore/lite/src/ops/space_to_batch_nd.h similarity index 83% rename from mindspore/lite/c_ops/space_to_batch_nd.h rename to mindspore/lite/src/ops/space_to_batch_nd.h index e99ea2954e..c02a20490a 100644 --- a/mindspore/lite/c_ops/space_to_batch_nd.h +++ b/mindspore/lite/src/ops/space_to_batch_nd.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,18 +29,17 @@ #define LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_BATCH_N_D_H_ namespace mindspore { +namespace lite { class SpaceToBatchND : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit SpaceToBatchND(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit SpaceToBatchND(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit SpaceToBatchND(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + std::vector GetBlockShape() const; std::vector GetPaddings() const; void SetBlockShape(const std::vector &block_shape); void SetPaddings(const std::vector &paddings); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_BATCH_N_D_H_ diff --git a/mindspore/lite/src/ops/space_to_depth.cc b/mindspore/lite/src/ops/space_to_depth.cc index 647c64c909..e0df66b25f 100644 --- a/mindspore/lite/src/ops/space_to_depth.cc +++ b/mindspore/lite/src/ops/space_to_depth.cc @@ -14,49 +14,63 @@ * limitations under the License. */ -#include -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/space_to_depth.h" +#include "src/common/common.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int SpaceToDepth::GetBlockSize() const { return this->primitive->value.AsSpaceToDepth()->blockSize; } +int SpaceToDepth::GetFormat() const { return this->primitive->value.AsSpaceToDepth()->format; } + +void SpaceToDepth::SetBlockSize(int block_size) { this->primitive->value.AsSpaceToDepth()->blockSize = block_size; } +void SpaceToDepth::SetFormat(int format) { this->primitive->value.AsSpaceToDepth()->format = format; } + +#else + +int SpaceToDepth::GetBlockSize() const { return this->primitive->value_as_SpaceToDepth()->blockSize(); } +int SpaceToDepth::GetFormat() const { return this->primitive->value_as_SpaceToDepth()->format(); } + +void SpaceToDepth::SetBlockSize(int block_size) {} +void SpaceToDepth::SetFormat(int format) {} +#endif namespace { constexpr int kSpaceToDepthOutputNum = 1; constexpr int kSpaceToDepthInputNum = 1; -} +} // namespace int SpaceToDepth::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive != nullptr); if (outputs.size() != kSpaceToDepthOutputNum || inputs.size() != kSpaceToDepthInputNum) { MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return RET_PARAM_INVALID; + return 1; } auto input = inputs.at(0); if (input->GetFormat() != schema::Format_NHWC) { MS_LOG(ERROR) << "space_to_depth only support NHWC now!"; - return RET_FORMAT_ERR; + return 1; } auto input_shape = input->shape(); if (input_shape.size() != kDimension_4d) { MS_LOG(ERROR) << "input shape dimension size should == " << kDimension_4d; - return RET_PARAM_INVALID; + return 1; } - auto prim = this->primitive->value_as_SpaceToDepth(); - int32_t block_size = prim->blockSize(); - if (input_shape[kNHWC_c_index] % (block_size * block_size) != 0 || input_shape[kNHWC_c_index] == 0) { - MS_LOG(ERROR) << "input dimension c size " << input_shape[kNHWC_c_index] << " should be mulitple of block_size(" + + int32_t block_size = GetBlockSize(); + if (input_shape[NHWC_C] % (block_size * block_size) != 0 || input_shape[NHWC_C] == 0) { + MS_LOG(ERROR) << "input dimension c size " << input_shape[NHWC_C] << " should be mulitple of block_size(" << block_size << ") * block_size)!"; - return RET_PARAM_INVALID; + return 1; } std::vector output_shape(input_shape.size()); - output_shape[kNHWC_n_index] = input_shape[kNHWC_n_index]; - output_shape[kNHWC_h_index] = input_shape[kNHWC_h_index] / block_size; - output_shape[kNHWC_w_index] = input_shape[kNHWC_w_index] / block_size; - output_shape[kNHWC_c_index] = input_shape[kNHWC_c_index] * (block_size * block_size); + output_shape[NHWC_N] = input_shape[NHWC_N]; + output_shape[NHWC_H] = input_shape[NHWC_H] / block_size; + output_shape[NHWC_W] = input_shape[NHWC_W] / block_size; + output_shape[NHWC_C] = input_shape[NHWC_C] * (block_size * block_size); outputs[0]->set_shape(output_shape); outputs[0]->set_data_type(input->data_type()); - return RET_OK; + return 0; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/space_to_depth.h b/mindspore/lite/src/ops/space_to_depth.h similarity index 84% rename from mindspore/lite/c_ops/space_to_depth.h rename to mindspore/lite/src/ops/space_to_depth.h index eff8c58ce8..eddc3e4e2c 100644 --- a/mindspore/lite/c_ops/space_to_depth.h +++ b/mindspore/lite/src/ops/space_to_depth.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_DEPTH_H_ namespace mindspore { +namespace lite { class SpaceToDepth : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit SpaceToDepth(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit SpaceToDepth(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit SpaceToDepth(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetBlockSize() const; int GetFormat() const; void SetBlockSize(int block_size); void SetFormat(int format); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_DEPTH_H_ diff --git a/mindspore/lite/c_ops/sparse_to_dense.cc b/mindspore/lite/src/ops/sparse_to_dense.cc similarity index 97% rename from mindspore/lite/c_ops/sparse_to_dense.cc rename to mindspore/lite/src/ops/sparse_to_dense.cc index b26c63f584..8b5aab94f1 100644 --- a/mindspore/lite/c_ops/sparse_to_dense.cc +++ b/mindspore/lite/src/ops/sparse_to_dense.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/sparse_to_dense.h" +#include "src/ops/sparse_to_dense.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE std::vector SparseToDense::GetOutputShape() const { return this->primitive->value.AsSparseToDense()->outputShape; } std::vector SparseToDense::GetSparseValue() const { return this->primitive->value.AsSparseToDense()->sparseValue; } @@ -59,4 +60,5 @@ void SparseToDense::SetSparseValue(const std::vector &sparse_value) {} void SparseToDense::SetDefaultValue(const std::vector &default_value) {} void SparseToDense::SetValidateIndices(bool validate_indices) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/sparse_to_dense.h b/mindspore/lite/src/ops/sparse_to_dense.h similarity index 85% rename from mindspore/lite/c_ops/sparse_to_dense.h rename to mindspore/lite/src/ops/sparse_to_dense.h index e3705cf73c..07b2094365 100644 --- a/mindspore/lite/c_ops/sparse_to_dense.h +++ b/mindspore/lite/src/ops/sparse_to_dense.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_SPARSE_TO_DENSE_H_ namespace mindspore { +namespace lite { class SparseToDense : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit SparseToDense(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit SparseToDense(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit SparseToDense(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + std::vector GetOutputShape() const; std::vector GetSparseValue() const; std::vector GetDefaultValue() const; @@ -45,6 +43,7 @@ class SparseToDense : public PrimitiveC { void SetDefaultValue(const std::vector &default_value); void SetValidateIndices(bool validate_indices); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SPARSE_TO_DENSE_H_ diff --git a/mindspore/lite/src/ops/split.cc b/mindspore/lite/src/ops/split.cc index f8b175fa96..8d3afd8aa7 100644 --- a/mindspore/lite/src/ops/split.cc +++ b/mindspore/lite/src/ops/split.cc @@ -14,12 +14,35 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/split.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int Split::GetNumberSplit() const { return this->primitive->value.AsSplit()->numberSplit; } +std::vector Split::GetSizeSplits() const { return this->primitive->value.AsSplit()->sizeSplits; } +int Split::GetSplitDim() const { return this->primitive->value.AsSplit()->splitDim; } + +void Split::SetNumberSplit(int number_split) { this->primitive->value.AsSplit()->numberSplit = number_split; } +void Split::SetSizeSplits(const std::vector &size_splits) { + this->primitive->value.AsSplit()->sizeSplits = size_splits; +} +void Split::SetSplitDim(int split_dim) { this->primitive->value.AsSplit()->splitDim = split_dim; } + +#else + +int Split::GetNumberSplit() const { return this->primitive->value_as_Split()->numberSplit(); } +std::vector Split::GetSizeSplits() const { + auto fb_vector = this->primitive->value_as_Split()->sizeSplits(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +int Split::GetSplitDim() const { return this->primitive->value_as_Split()->splitDim(); } + +void Split::SetNumberSplit(int number_split) {} +void Split::SetSizeSplits(const std::vector &size_splits) {} +void Split::SetSplitDim(int split_dim) {} +#endif -namespace mindspore::lite { namespace { constexpr int kSplitInputNum = 1; } // namespace @@ -47,7 +70,6 @@ int Split::InferShape(std::vector inputs_, std::vector input_shape = input->shape(); std::vector size_split; size_split.insert(size_split.begin(), spilt_prim->sizeSplits()->begin(), spilt_prim->sizeSplits()->end()); - for (int i = 0; i < number_split; ++i) { std::vector output_shape; output_shape.insert(output_shape.begin(), input_shape.begin(), input_shape.end()); @@ -59,4 +81,5 @@ int Split::InferShape(std::vector inputs_, std::vector #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_SPLIT_H_ namespace mindspore { +namespace lite { class Split : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Split(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Split(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Split(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetNumberSplit() const; std::vector GetSizeSplits() const; @@ -44,6 +42,7 @@ class Split : public PrimitiveC { void SetSizeSplits(const std::vector &size_splits); void SetSplitDim(int split_dim); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SPLIT_H_ diff --git a/mindspore/lite/c_ops/sqrt.h b/mindspore/lite/src/ops/sqrt.h similarity index 72% rename from mindspore/lite/c_ops/sqrt.h rename to mindspore/lite/src/ops/sqrt.h index ffb1f11e0e..2d861251cb 100644 --- a/mindspore/lite/c_ops/sqrt.h +++ b/mindspore/lite/src/ops/sqrt.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic_self.h" +#include "src/ops/arithmetic_self.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_SQRT_H_ namespace mindspore { +namespace lite { class Sqrt : public ArithmeticSelf { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Sqrt(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - explicit Sqrt(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} -#endif + explicit Sqrt(OriginPrimitive *primitive) : ArithmeticSelf(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SQRT_H_ diff --git a/mindspore/lite/c_ops/square.h b/mindspore/lite/src/ops/square.h similarity index 72% rename from mindspore/lite/c_ops/square.h rename to mindspore/lite/src/ops/square.h index 0e2953295b..eb847039c0 100644 --- a/mindspore/lite/c_ops/square.h +++ b/mindspore/lite/src/ops/square.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic_self.h" +#include "src/ops/arithmetic_self.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_SQUARE_H_ namespace mindspore { +namespace lite { class Square : public ArithmeticSelf { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Square(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - explicit Square(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} -#endif + explicit Square(OriginPrimitive *primitive) : ArithmeticSelf(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SQUARE_H_ diff --git a/mindspore/lite/c_ops/squared_difference.h b/mindspore/lite/src/ops/squared_difference.h similarity index 73% rename from mindspore/lite/c_ops/squared_difference.h rename to mindspore/lite/src/ops/squared_difference.h index 6c2198e63d..3f4768fc11 100644 --- a/mindspore/lite/c_ops/squared_difference.h +++ b/mindspore/lite/src/ops/squared_difference.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "c_ops/arithmetic.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,14 +29,12 @@ #define LITE_MINDSPORE_LITE_C_OPS_SQUARED_DIFFERENCE_H_ namespace mindspore { +namespace lite { class SquaredDifference : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit SquaredDifference(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit SquaredDifference(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit SquaredDifference(OriginPrimitive *primitive) : Arithmetic(primitive) {} }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SQUARED_DIFFERENCE_H_ diff --git a/mindspore/lite/src/ops/squeeze.cc b/mindspore/lite/src/ops/squeeze.cc index 4446f3ead5..efa625aea0 100644 --- a/mindspore/lite/src/ops/squeeze.cc +++ b/mindspore/lite/src/ops/squeeze.cc @@ -14,12 +14,25 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/squeeze.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector Squeeze::GetAxis() const { return this->primitive->value.AsSqueeze()->axis; } + +void Squeeze::SetAxis(const std::vector &axis) { this->primitive->value.AsSqueeze()->axis = axis; } + +#else + +std::vector Squeeze::GetAxis() const { + auto fb_vector = this->primitive->value_as_Squeeze()->axis(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void Squeeze::SetAxis(const std::vector &axis) {} +#endif -namespace mindspore::lite { namespace { constexpr int kSqueezeInputNum = 1; constexpr int kSqueezeOutputNum = 1; @@ -37,7 +50,6 @@ int Squeeze::InferShape(std::vector inputs_, std::vectorshape(); std::vector out_shape; - // todo: getAxis auto squeeze_prim = this->primitive->value_as_Squeeze(); MS_EXCEPTION_IF_NULL(squeeze_prim); @@ -46,7 +58,6 @@ int Squeeze::InferShape(std::vector inputs_, std::vectorbegin(); iter != axis->end(); iter++) { axes_.push_back(*iter); } - if (axes_.size() == 0) { for (int i = 0; i < in_shape.size(); i++) { if (in_shape[i] != 1) { @@ -65,11 +76,10 @@ int Squeeze::InferShape(std::vector inputs_, std::vectorset_shape(out_shape); outputs_.front()->set_data_type(in_tensor->data_type()); outputs_.front()->SetFormat(in_tensor->GetFormat()); - return 0; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/squeeze.h b/mindspore/lite/src/ops/squeeze.h similarity index 83% rename from mindspore/lite/c_ops/squeeze.h rename to mindspore/lite/src/ops/squeeze.h index f2397d94d4..c9b576f876 100644 --- a/mindspore/lite/c_ops/squeeze.h +++ b/mindspore/lite/src/ops/squeeze.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_SQUEEZE_H_ namespace mindspore { +namespace lite { class Squeeze : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Squeeze(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Squeeze(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Squeeze(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetAxis() const; void SetAxis(const std::vector &axis); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SQUEEZE_H_ diff --git a/mindspore/lite/src/ops/stack.cc b/mindspore/lite/src/ops/stack.cc index 4b6ac93249..985aaa79be 100644 --- a/mindspore/lite/src/ops/stack.cc +++ b/mindspore/lite/src/ops/stack.cc @@ -14,17 +14,37 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/stack.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int Stack::GetAxis() const { return this->primitive->value.AsStack()->axis; } +int Stack::GetN() const { return this->primitive->value.AsStack()->n; } +std::vector Stack::GetIsScale() const { return this->primitive->value.AsStack()->isScale; } + +void Stack::SetAxis(int axis) { this->primitive->value.AsStack()->axis = axis; } +void Stack::SetN(int n) { this->primitive->value.AsStack()->n = n; } +void Stack::SetIsScale(const std::vector &is_scale) { this->primitive->value.AsStack()->isScale = is_scale; } + +#else + +int Stack::GetAxis() const { return this->primitive->value_as_Stack()->axis(); } +int Stack::GetN() const { return this->primitive->value_as_Stack()->n(); } +std::vector Stack::GetIsScale() const { + auto fb_vector = this->primitive->value_as_Stack()->isScale(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void Stack::SetAxis(int axis) {} +void Stack::SetN(int n) {} +void Stack::SetIsScale(const std::vector &is_scale) {} +#endif -namespace mindspore::lite { namespace { constexpr int kStackOutputNum = 1; constexpr int kStackMinInputNum = 2; } // namespace - int Stack::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive != nullptr); if (outputs.size() != kStackOutputNum) { @@ -50,7 +70,6 @@ int Stack::InferShape(std::vector inputs, std::vectorshape(); if (input_shape_tmp.size() != input_shape.size()) { MS_LOG(ERROR) << "All input shape size should be the same!"; @@ -63,12 +82,11 @@ int Stack::InferShape(std::vector inputs, std::vectorset_shape(output_shape); outputs[0]->set_data_type(input->data_type()); outputs[0]->SetFormat(input->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/stack.h b/mindspore/lite/src/ops/stack.h similarity index 85% rename from mindspore/lite/c_ops/stack.h rename to mindspore/lite/src/ops/stack.h index 5bc06c2002..14c78a079e 100644 --- a/mindspore/lite/c_ops/stack.h +++ b/mindspore/lite/src/ops/stack.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_STACK_H_ namespace mindspore { +namespace lite { class Stack : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Stack(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Stack(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Stack(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetAxis() const; int GetN() const; @@ -44,6 +42,7 @@ class Stack : public PrimitiveC { void SetN(int n); void SetIsScale(const std::vector &is_scale); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_STACK_H_ diff --git a/mindspore/lite/src/ops/strided_slice.cc b/mindspore/lite/src/ops/strided_slice.cc index 6127c8a601..8722d88930 100644 --- a/mindspore/lite/src/ops/strided_slice.cc +++ b/mindspore/lite/src/ops/strided_slice.cc @@ -14,14 +14,75 @@ * limitations under the License. */ -#include -#include "include/errorcode.h" -#include "src/ops/ops.h" -#include "src/runtime/kernel/arm/nnacl/op_base.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" - -namespace mindspore::lite { +#include "src/ops/strided_slice.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int StridedSlice::GetBeginMask() const { return this->primitive->value.AsStridedSlice()->beginMask; } +int StridedSlice::GetEndMask() const { return this->primitive->value.AsStridedSlice()->endMask; } +int StridedSlice::GetEllipsisMask() const { return this->primitive->value.AsStridedSlice()->ellipsisMask; } +int StridedSlice::GetNewAxisMask() const { return this->primitive->value.AsStridedSlice()->newAxisMask; } +int StridedSlice::GetShrinkAxisMask() const { return this->primitive->value.AsStridedSlice()->shrinkAxisMask; } +std::vector StridedSlice::GetBegin() const { return this->primitive->value.AsStridedSlice()->begin; } +std::vector StridedSlice::GetEnd() const { return this->primitive->value.AsStridedSlice()->end; } +std::vector StridedSlice::GetStride() const { return this->primitive->value.AsStridedSlice()->stride; } +std::vector StridedSlice::GetIsScale() const { return this->primitive->value.AsStridedSlice()->isScale; } + +void StridedSlice::SetBeginMask(int begin_mask) { this->primitive->value.AsStridedSlice()->beginMask = begin_mask; } +void StridedSlice::SetEndMask(int end_mask) { this->primitive->value.AsStridedSlice()->endMask = end_mask; } +void StridedSlice::SetEllipsisMask(int ellipsis_mask) { + this->primitive->value.AsStridedSlice()->ellipsisMask = ellipsis_mask; +} +void StridedSlice::SetNewAxisMask(int new_axis_mask) { + this->primitive->value.AsStridedSlice()->newAxisMask = new_axis_mask; +} +void StridedSlice::SetShrinkAxisMask(int shrink_axis_mask) { + this->primitive->value.AsStridedSlice()->shrinkAxisMask = shrink_axis_mask; +} +void StridedSlice::SetBegin(const std::vector &begin) { this->primitive->value.AsStridedSlice()->begin = begin; } +void StridedSlice::SetEnd(const std::vector &end) { this->primitive->value.AsStridedSlice()->end = end; } +void StridedSlice::SetStride(const std::vector &stride) { + this->primitive->value.AsStridedSlice()->stride = stride; +} +void StridedSlice::SetIsScale(const std::vector &is_scale) { + this->primitive->value.AsStridedSlice()->isScale = is_scale; +} + +#else + +int StridedSlice::GetBeginMask() const { return this->primitive->value_as_StridedSlice()->beginMask(); } +int StridedSlice::GetEndMask() const { return this->primitive->value_as_StridedSlice()->endMask(); } +int StridedSlice::GetEllipsisMask() const { return this->primitive->value_as_StridedSlice()->ellipsisMask(); } +int StridedSlice::GetNewAxisMask() const { return this->primitive->value_as_StridedSlice()->newAxisMask(); } +int StridedSlice::GetShrinkAxisMask() const { return this->primitive->value_as_StridedSlice()->shrinkAxisMask(); } +std::vector StridedSlice::GetBegin() const { + auto fb_vector = this->primitive->value_as_StridedSlice()->begin(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +std::vector StridedSlice::GetEnd() const { + auto fb_vector = this->primitive->value_as_StridedSlice()->end(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +std::vector StridedSlice::GetStride() const { + auto fb_vector = this->primitive->value_as_StridedSlice()->stride(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +std::vector StridedSlice::GetIsScale() const { + auto fb_vector = this->primitive->value_as_StridedSlice()->isScale(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void StridedSlice::SetBeginMask(int begin_mask) {} +void StridedSlice::SetEndMask(int end_mask) {} +void StridedSlice::SetEllipsisMask(int ellipsis_mask) {} +void StridedSlice::SetNewAxisMask(int new_axis_mask) {} +void StridedSlice::SetShrinkAxisMask(int shrink_axis_mask) {} +void StridedSlice::SetBegin(const std::vector &begin) {} +void StridedSlice::SetEnd(const std::vector &end) {} +void StridedSlice::SetStride(const std::vector &stride) {} +void StridedSlice::SetIsScale(const std::vector &is_scale) {} +#endif namespace { constexpr int kStridedSliceOutputNum = 1; constexpr int kStridedSliceInputNum = 1; @@ -92,7 +153,7 @@ void StridedSlice::ApplyEndMask() { } } -int StridedSlice::InferShape(std::vector inputs, std::vector outputs) { +int StridedSlice::InferShape(std::vector inputs, std::vector outputs) { MS_ASSERT(this->primitive != nullptr); if (outputs.size() != kStridedSliceOutputNum) { MS_LOG(ERROR) << "Invalid output size:" << outputs.size(); @@ -106,8 +167,7 @@ int StridedSlice::InferShape(std::vector inputs, std::vectorshape(); std::vector output_shape; - auto strided_slice_prim = this->primitive->value_as_StridedSlice(); - ndim_ = static_cast(strided_slice_prim->begin()->size()); + ndim_ = static_cast(GetBegin().size()); MS_ASSERT(ndim_ == static_cast(strided_slice_prim->end()->size())); MS_ASSERT(ndim_ == static_cast(strided_slice_prim->stride()->size())); @@ -115,9 +175,9 @@ int StridedSlice::InferShape(std::vector inputs, std::vectorbegin()))[i]); - ends_.emplace_back((*(strided_slice_prim->end()))[i]); - strides_.emplace_back((*(strided_slice_prim->stride()))[i]); + begins_.emplace_back((GetBegin())[i]); + ends_.emplace_back((GetEnd())[i]); + strides_.emplace_back((GetStride())[i]); } // set all mask to original input shape @@ -129,11 +189,11 @@ int StridedSlice::InferShape(std::vector inputs, std::vector(strided_slice_prim->beginMask()) & (1 << i); - ends_mask_.at(i) = static_cast(strided_slice_prim->endMask()) & (1 << i); - ellipsis_mask_.at(i) = static_cast(strided_slice_prim->ellipsisMask()) & (1 << i); - new_axis_mask_.at(i) = static_cast(strided_slice_prim->newAxisMask()) & (1 << i); - shrink_axis_mask_.at(i) = static_cast(strided_slice_prim->shrinkAxisMask()) & (1 << i); + begins_mask_.at(i) = static_cast(GetBeginMask()) & (1 << i); + ends_mask_.at(i) = static_cast(GetEndMask()) & (1 << i); + ellipsis_mask_.at(i) = static_cast(GetEllipsisMask()) & (1 << i); + new_axis_mask_.at(i) = static_cast(GetNewAxisMask()) & (1 << i); + shrink_axis_mask_.at(i) = static_cast(GetShrinkAxisMask()) & (1 << i); } ApplyNewAxisMask(); @@ -159,4 +219,5 @@ int StridedSlice::InferShape(std::vector inputs, std::vector #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,13 +29,11 @@ #define LITE_MINDSPORE_LITE_C_OPS_STRIDED_SLICE_H_ namespace mindspore { +namespace lite { class StridedSlice : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit StridedSlice(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit StridedSlice(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit StridedSlice(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetBeginMask() const; int GetEndMask() const; @@ -79,6 +77,7 @@ class StridedSlice : public PrimitiveC { std::vector new_axis_mask_; std::vector shrink_axis_mask_; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_STRIDED_SLICE_H_ diff --git a/mindspore/lite/c_ops/sub.cc b/mindspore/lite/src/ops/sub.cc similarity index 94% rename from mindspore/lite/c_ops/sub.cc rename to mindspore/lite/src/ops/sub.cc index 330e149cda..6ad68eb2b4 100644 --- a/mindspore/lite/c_ops/sub.cc +++ b/mindspore/lite/src/ops/sub.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/sub.h" +#include "src/ops/sub.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE int Sub::GetActivationType() const { return this->primitive->value.AsSub()->activationType; } @@ -30,4 +31,5 @@ int Sub::GetActivationType() const { return this->primitive->value_as_Sub()->act void Sub::SetActivationType(int activation_type) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/sub.h b/mindspore/lite/src/ops/sub.h similarity index 81% rename from mindspore/lite/c_ops/sub.h rename to mindspore/lite/src/ops/sub.h index f8703cb60c..2724a83356 100644 --- a/mindspore/lite/c_ops/sub.h +++ b/mindspore/lite/src/ops/sub.h @@ -18,8 +18,8 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" -#include "c_ops/arithmetic.h" +#include "src/ops/primitive_c.h" +#include "src/ops/arithmetic.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" @@ -31,16 +31,15 @@ #define LITE_MINDSPORE_LITE_C_OPS_SUB_H_ namespace mindspore { +namespace lite { class Sub : public Arithmetic { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Sub(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - explicit Sub(schema::Primitive *primitive) : Arithmetic(primitive) {} -#endif + explicit Sub(OriginPrimitive *primitive) : Arithmetic(primitive) {} + int GetActivationType() const; void SetActivationType(int activation_type); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_SUB_H_ diff --git a/mindspore/lite/src/ops/tile.cc b/mindspore/lite/src/ops/tile.cc index 42f6bd2071..5332882a9e 100644 --- a/mindspore/lite/src/ops/tile.cc +++ b/mindspore/lite/src/ops/tile.cc @@ -14,12 +14,26 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/tile.h" +#include + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector Tile::GetMultiples() const { return this->primitive->value.AsTile()->multiples; } + +void Tile::SetMultiples(const std::vector &multiples) { this->primitive->value.AsTile()->multiples = multiples; } + +#else + +std::vector Tile::GetMultiples() const { + auto fb_vector = this->primitive->value_as_Tile()->multiples(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void Tile::SetMultiples(const std::vector &multiples) {} +#endif -namespace mindspore::lite { int Tile::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -28,7 +42,6 @@ int Tile::InferShape(std::vector inputs_, std::vectorprimitive->value_as_Tile(); MS_ASSERT(tile_prim != nullptr); - std::vector out_shape; std::vector multiples; std::copy(tile_prim->multiples()->begin(), tile_prim->multiples()->end(), std::back_inserter(multiples)); @@ -36,10 +49,10 @@ int Tile::InferShape(std::vector inputs_, std::vectorshape()[i] * multiples[i]; out_shape.push_back(tmp); } - output->SetFormat(input->GetFormat()); output->set_shape(out_shape); output->set_data_type(input->data_type()); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/tile.h b/mindspore/lite/src/ops/tile.h similarity index 84% rename from mindspore/lite/c_ops/tile.h rename to mindspore/lite/src/ops/tile.h index b84e76afa5..8c43a0a04f 100644 --- a/mindspore/lite/c_ops/tile.h +++ b/mindspore/lite/src/ops/tile.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_TILE_H_ namespace mindspore { +namespace lite { class Tile : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Tile(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Tile(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Tile(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetMultiples() const; void SetMultiples(const std::vector &multiples); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_TILE_H_ diff --git a/mindspore/lite/src/ops/topk.cc b/mindspore/lite/src/ops/topk.cc index e3dbee034e..a5e1abdc04 100644 --- a/mindspore/lite/src/ops/topk.cc +++ b/mindspore/lite/src/ops/topk.cc @@ -14,12 +14,26 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/topk.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int TopK::GetK() const { return this->primitive->value.AsTopK()->k; } +bool TopK::GetSorted() const { return this->primitive->value.AsTopK()->sorted; } + +void TopK::SetK(int k) { this->primitive->value.AsTopK()->k = k; } +void TopK::SetSorted(bool sorted) { this->primitive->value.AsTopK()->sorted = sorted; } + +#else + +int TopK::GetK() const { return this->primitive->value_as_TopK()->k(); } +bool TopK::GetSorted() const { return this->primitive->value_as_TopK()->sorted(); } + +void TopK::SetK(int k) {} +void TopK::SetSorted(bool sorted) {} +#endif -namespace mindspore::lite { int TopK::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); if (inputs_.size() != kSingleNum || outputs_.size() != kDoubleNum) { @@ -34,18 +48,15 @@ int TopK::InferShape(std::vector inputs_, std::vectorprimitive->value_as_TopK(); MS_ASSERT(topk_prim != nullptr); - auto out_shape = input->shape(); out_shape[out_shape.size() - 1] = topk_prim->k(); - output0->set_shape(out_shape); output0->set_data_type(input->data_type()); output0->SetFormat(input->GetFormat()); - output1->set_shape(out_shape); output1->set_data_type(kNumberTypeInt32); output1->SetFormat(input->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/topk.h b/mindspore/lite/src/ops/topk.h similarity index 84% rename from mindspore/lite/c_ops/topk.h rename to mindspore/lite/src/ops/topk.h index 6e08babe58..3b7ec46cce 100644 --- a/mindspore/lite/c_ops/topk.h +++ b/mindspore/lite/src/ops/topk.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_TOP_K_H_ namespace mindspore { +namespace lite { class TopK : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit TopK(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit TopK(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit TopK(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetK() const; bool GetSorted() const; void SetK(int k); void SetSorted(bool sorted); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_TOP_K_H_ diff --git a/mindspore/lite/src/ops/transpose.cc b/mindspore/lite/src/ops/transpose.cc index 086b49c145..f8830d26ca 100644 --- a/mindspore/lite/src/ops/transpose.cc +++ b/mindspore/lite/src/ops/transpose.cc @@ -14,12 +14,31 @@ * limitations under the License. */ -#include "src/ops/ops.h" +#include "src/ops/transpose.h" #include "include/errorcode.h" #include "utils/log_adapter.h" -#include "src/ir/tensor.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector Transpose::GetPerm() const { return this->primitive->value.AsTranspose()->perm; } +bool Transpose::GetConjugate() const { return this->primitive->value.AsTranspose()->conjugate; } + +void Transpose::SetPerm(const std::vector &perm) { this->primitive->value.AsTranspose()->perm = perm; } +void Transpose::SetConjugate(bool conjugate) { this->primitive->value.AsTranspose()->conjugate = conjugate; } + +#else + +std::vector Transpose::GetPerm() const { + auto fb_vector = this->primitive->value_as_Transpose()->perm(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} +bool Transpose::GetConjugate() const { return this->primitive->value_as_Transpose()->conjugate(); } + +void Transpose::SetPerm(const std::vector &perm) {} +void Transpose::SetConjugate(bool conjugate) {} +#endif + int Transpose::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -41,16 +60,14 @@ int Transpose::InferShape(std::vector inputs_, std::vector perm; perm.insert(perm.begin(), transpore_prim->perm()->begin(), transpore_prim->perm()->end()); - std::vector in_shape = input->shape(); std::vector out_shape; out_shape.resize(perm.size()); for (int i = 0; i < perm.size(); ++i) { out_shape[i] = in_shape[perm[i]]; } - output->set_shape(out_shape); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/transpose.h b/mindspore/lite/src/ops/transpose.h similarity index 84% rename from mindspore/lite/c_ops/transpose.h rename to mindspore/lite/src/ops/transpose.h index dc595a16fe..6092c9d2da 100644 --- a/mindspore/lite/c_ops/transpose.h +++ b/mindspore/lite/src/ops/transpose.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_TRANSPOSE_H_ namespace mindspore { +namespace lite { class Transpose : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Transpose(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Transpose(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Transpose(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetPerm() const; bool GetConjugate() const; void SetPerm(const std::vector &perm); void SetConjugate(bool conjugate); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_TRANSPOSE_H_ diff --git a/mindspore/lite/src/ops/unique.cc b/mindspore/lite/src/ops/unique.cc index 139e0e7113..e2561690fe 100644 --- a/mindspore/lite/src/ops/unique.cc +++ b/mindspore/lite/src/ops/unique.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,12 +14,22 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/unique.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int Unique::GetOutType() const { return this->primitive->value.AsUnique()->outType; } + +void Unique::SetOutType(int out_type) { this->primitive->value.AsUnique()->outType = out_type; } + +#else + +int Unique::GetOutType() const { return this->primitive->value_as_Unique()->outType(); } + +void Unique::SetOutType(int out_type) {} +#endif -namespace mindspore::lite { int Unique::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); if (inputs_.size() != kSingleNum || outputs_.size() != kDoubleNum) { @@ -38,7 +48,7 @@ int Unique::InferShape(std::vector inputs_, std::vectorset_data_type(kNumberTypeInt32); output1->SetFormat(input->GetFormat()); output0->SetFormat(input->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/unique.h b/mindspore/lite/src/ops/unique.h similarity index 83% rename from mindspore/lite/c_ops/unique.h rename to mindspore/lite/src/ops/unique.h index 96cb0ce584..3091d96718 100644 --- a/mindspore/lite/c_ops/unique.h +++ b/mindspore/lite/src/ops/unique.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_UNIQUE_H_ namespace mindspore { +namespace lite { class Unique : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Unique(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Unique(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Unique(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetOutType() const; void SetOutType(int out_type); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_UNIQUE_H_ diff --git a/mindspore/lite/src/ops/unsqueeze.cc b/mindspore/lite/src/ops/unsqueeze.cc index 9d85f843a7..140368e2e4 100644 --- a/mindspore/lite/src/ops/unsqueeze.cc +++ b/mindspore/lite/src/ops/unsqueeze.cc @@ -14,12 +14,28 @@ * limitations under the License. */ -#include "src/ops/ops.h" +#include "src/ops/unsqueeze.h" #include "include/errorcode.h" #include "utils/log_adapter.h" #include "src/ir/tensor.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector Unsqueeze::GetAxis() const { return this->primitive->value.AsUnsqueeze()->axis; } + +void Unsqueeze::SetAxis(const std::vector &axis) { this->primitive->value.AsUnsqueeze()->axis = axis; } + +#else +bool predicate(int n) { return n != 1; } +std::vector Unsqueeze::GetAxis() const { + auto fb_vector = this->primitive->value_as_Unsqueeze()->axis(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void Unsqueeze::SetAxis(const std::vector &axis) {} +#endif + int Unsqueeze::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -43,7 +59,6 @@ int Unsqueeze::InferShape(std::vector inputs_, std::vectoraxis()->size(); std::vector out_shape; - if (dim_rank == 0) { for (auto d : in_shape) { if (d != 1) { @@ -69,8 +84,8 @@ int Unsqueeze::InferShape(std::vector inputs_, std::vectorset_shape(out_shape); return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/unsqueeze.h b/mindspore/lite/src/ops/unsqueeze.h similarity index 83% rename from mindspore/lite/c_ops/unsqueeze.h rename to mindspore/lite/src/ops/unsqueeze.h index bfeba00a57..98c02165f9 100644 --- a/mindspore/lite/c_ops/unsqueeze.h +++ b/mindspore/lite/src/ops/unsqueeze.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_UNSQUEEZE_H_ namespace mindspore { +namespace lite { class Unsqueeze : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Unsqueeze(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Unsqueeze(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Unsqueeze(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetAxis() const; void SetAxis(const std::vector &axis); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_UNSQUEEZE_H_ diff --git a/mindspore/lite/src/ops/unstack.cc b/mindspore/lite/src/ops/unstack.cc index e11f0d71e1..63444e50d0 100644 --- a/mindspore/lite/src/ops/unstack.cc +++ b/mindspore/lite/src/ops/unstack.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,12 +14,26 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/unstack.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +int Unstack::GetNum() const { return this->primitive->value.AsUnstack()->num; } +int Unstack::GetAxis() const { return this->primitive->value.AsUnstack()->axis; } + +void Unstack::SetNum(int num) { this->primitive->value.AsUnstack()->num = num; } +void Unstack::SetAxis(int axis) { this->primitive->value.AsUnstack()->axis = axis; } + +#else + +int Unstack::GetNum() const { return this->primitive->value_as_Unstack()->num(); } +int Unstack::GetAxis() const { return this->primitive->value_as_Unstack()->axis(); } + +void Unstack::SetNum(int num) {} +void Unstack::SetAxis(int axis) {} +#endif -namespace mindspore::lite { int Unstack::InferShape(std::vector inputs, std::vector outputs) { auto input = inputs.at(0); MS_ASSERT(input != nullptr); @@ -30,7 +44,6 @@ int Unstack::InferShape(std::vector inputs, std::vectoraxis(); return RET_PARAM_INVALID; } - std::vector output_shape; for (size_t i = 0; i < input_shape.size(); ++i) { if (i != axis) { @@ -45,4 +58,5 @@ int Unstack::InferShape(std::vector inputs, std::vector #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,19 +29,18 @@ #define LITE_MINDSPORE_LITE_C_OPS_UNSTACK_H_ namespace mindspore { +namespace lite { class Unstack : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Unstack(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Unstack(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Unstack(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; int GetNum() const; int GetAxis() const; void SetNum(int num); void SetAxis(int axis); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_UNSTACK_H_ diff --git a/mindspore/lite/c_ops/upsample.cc b/mindspore/lite/src/ops/upsample.cc similarity index 74% rename from mindspore/lite/c_ops/upsample.cc rename to mindspore/lite/src/ops/upsample.cc index e7aaac2a79..96941e22c6 100644 --- a/mindspore/lite/c_ops/upsample.cc +++ b/mindspore/lite/src/ops/upsample.cc @@ -14,25 +14,27 @@ * limitations under the License. */ -#include "c_ops/upsample.h" +#include "src/ops/upsample.h" namespace mindspore { +namespace lite { #ifdef PRIMITIVE_WRITEABLE -string Upsample::GetMode() const { return this->primitive->value.AsUpsample()->mode; } +std::string Upsample::GetMode() const { return this->primitive->value.AsUpsample()->mode; } std::vector Upsample::GetScales() const { return this->primitive->value.AsUpsample()->scales; } -void Upsample::SetMode(string mode) { this->primitive->value.AsUpsample()->mode = mode; } +void Upsample::SetMode(std::string mode) { this->primitive->value.AsUpsample()->mode = mode; } void Upsample::SetScales(const std::vector &scales) { this->primitive->value.AsUpsample()->scales = scales; } #else -string Upsample::GetMode() const { return this->primitive->value_as_Upsample()->mode()->str(); } +std::string Upsample::GetMode() const { return this->primitive->value_as_Upsample()->mode()->str(); } std::vector Upsample::GetScales() const { auto fb_vector = this->primitive->value_as_Upsample()->scales(); return std::vector(fb_vector->begin(), fb_vector->end()); } -void Upsample::SetMode(string mode) {} +void Upsample::SetMode(std::string mode) {} void Upsample::SetScales(const std::vector &scales) {} #endif +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/upsample.h b/mindspore/lite/src/ops/upsample.h similarity index 79% rename from mindspore/lite/c_ops/upsample.h rename to mindspore/lite/src/ops/upsample.h index c113e46800..ced71016c5 100644 --- a/mindspore/lite/c_ops/upsample.h +++ b/mindspore/lite/src/ops/upsample.h @@ -19,7 +19,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -30,18 +30,17 @@ #define LITE_MINDSPORE_LITE_C_OPS_UPSAMPLE_H_ namespace mindspore { +namespace lite { class Upsample : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Upsample(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Upsample(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif - string GetMode() const; + explicit Upsample(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + + std::string GetMode() const; std::vector GetScales() const; - void SetMode(string mode); + void SetMode(std::string mode); void SetScales(const std::vector &scales); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_UPSAMPLE_H_ diff --git a/mindspore/lite/src/ops/where.cc b/mindspore/lite/src/ops/where.cc index ef3d3d0d03..36b8c8caa4 100644 --- a/mindspore/lite/src/ops/where.cc +++ b/mindspore/lite/src/ops/where.cc @@ -14,12 +14,27 @@ * limitations under the License. */ -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" +#include "src/ops/where.h" + +namespace mindspore { +namespace lite { +#ifdef PRIMITIVE_WRITEABLE +std::vector Where::GetCondition() const { return this->primitive->value.AsWhere()->condition; } + +void Where::SetCondition(const std::vector &condition) { + this->primitive->value.AsWhere()->condition = condition; +} + +#else + +std::vector Where::GetCondition() const { + auto fb_vector = this->primitive->value_as_Where()->condition(); + return std::vector(fb_vector->begin(), fb_vector->end()); +} + +void Where::SetCondition(const std::vector &condition) {} +#endif -namespace mindspore::lite { int Where::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -42,7 +57,6 @@ int Where::InferShape(std::vector inputs_, std::vectorElementsNum(); int num2 = input2->ElementsNum(); int nummax = num > num1 ? num : (num1 > num2 ? num1 : num2); - auto shape_tmp = inputs_.at(0)->shape(); auto shape_tmp1 = inputs_.at(1)->shape(); auto shape_tmp2 = inputs_.at(2)->shape(); @@ -68,13 +82,12 @@ int Where::InferShape(std::vector inputs_, std::vectorset_shape(output_shape); output->set_data_type(input->data_type()); output->SetFormat(input->GetFormat()); - return RET_OK; } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/c_ops/where.h b/mindspore/lite/src/ops/where.h similarity index 84% rename from mindspore/lite/c_ops/where.h rename to mindspore/lite/src/ops/where.h index ea7403cb9c..ef9a9f179e 100644 --- a/mindspore/lite/c_ops/where.h +++ b/mindspore/lite/src/ops/where.h @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,17 +29,16 @@ #define LITE_MINDSPORE_LITE_C_OPS_WHERE_H_ namespace mindspore { +namespace lite { class Where : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit Where(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit Where(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit Where(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; std::vector GetCondition() const; void SetCondition(const std::vector &condition); }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_WHERE_H_ diff --git a/mindspore/lite/c_ops/zeros_like.cc b/mindspore/lite/src/ops/zeros_like.cc similarity index 83% rename from mindspore/lite/c_ops/zeros_like.cc rename to mindspore/lite/src/ops/zeros_like.cc index 20cc2a8ceb..83fe130c37 100644 --- a/mindspore/lite/c_ops/zeros_like.cc +++ b/mindspore/lite/src/ops/zeros_like.cc @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "c_ops/zeros_like.h" +#include "src/ops/zeros_like.h" namespace mindspore { +namespace lite { int ZerosLike::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive != nullptr); auto input = inputs_.front(); @@ -26,12 +27,13 @@ int ZerosLike::InferShape(std::vector inputs_, std::vect if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) { MS_LOG(ERROR) << "zeroslike input or output number invalid, Input size:" << inputs_.size() << ", output size: " << outputs_.size(); - return 1; + return RET_INPUT_TENSOR_ERROR; } output->set_shape(input->shape()); output->set_data_type(input->data_type()); output->SetFormat(input->GetFormat()); - return 0; + return RET_OK; } +} // namespace lite } // namespace mindspore diff --git a/mindspore/lite/c_ops/zeros_like.h b/mindspore/lite/src/ops/zeros_like.h similarity index 74% rename from mindspore/lite/c_ops/zeros_like.h rename to mindspore/lite/src/ops/zeros_like.h index 0968fede79..bc323972af 100644 --- a/mindspore/lite/c_ops/zeros_like.h +++ b/mindspore/lite/src/ops/zeros_like.h @@ -1,7 +1,7 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * - * Licensed under the Apache License, Version 2.0 (the License); + * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -18,7 +18,7 @@ #include #include #include "ir/dtype/type_id.h" -#include "mindspore/lite/c_ops/primitive_c.h" +#include "src/ops/primitive_c.h" #ifdef PRIMITIVE_WRITEABLE #include "schema/inner/model_generated.h" #else @@ -29,15 +29,14 @@ #define LITE_MINDSPORE_LITE_C_OPS_ZEROS_LIKE_H_ namespace mindspore { +namespace lite { class ZerosLike : public PrimitiveC { public: -#ifdef PRIMITIVE_WRITEABLE - explicit ZerosLike(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - explicit ZerosLike(schema::Primitive *primitive) : PrimitiveC(primitive) {} -#endif + explicit ZerosLike(OriginPrimitive *primitive) : PrimitiveC(primitive) {} + int InferShape(std::vector inputs_, std::vector outputs_) override; }; +} // namespace lite } // namespace mindspore #endif // LITE_MINDSPORE_LITE_C_OPS_ZEROS_LIKE_H_ diff --git a/mindspore/lite/src/ops/zeroslike.cc b/mindspore/lite/src/ops/zeroslike.cc deleted file mode 100644 index d14b96b385..0000000000 --- a/mindspore/lite/src/ops/zeroslike.cc +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/ops.h" -#include "include/errorcode.h" -#include "utils/log_adapter.h" -#include "src/ir/tensor.h" - -namespace mindspore::lite { -int ZerosLike::InferShape(std::vector inputs_, std::vector outputs_) { - MS_ASSERT(this->primitive != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "zeroslike input or output number invalid, Input size:" << inputs_.size() - << ", output size: " << outputs_.size(); - return RET_INPUT_TENSOR_ERROR; - } - output->set_shape(input->shape()); - output->set_data_type(input->data_type()); - output->SetFormat(input->GetFormat()); - - return RET_OK; -} -} // namespace mindspore::lite diff --git a/mindspore/lite/src/param_value_lite.h b/mindspore/lite/src/param_value_lite.h index e1908e4d8d..ba4094584c 100644 --- a/mindspore/lite/src/param_value_lite.h +++ b/mindspore/lite/src/param_value_lite.h @@ -65,7 +65,7 @@ class ParamValueLite : public Value { } bool operator==(const Value &other) const override { - this == &other; + return this == &other; } private: diff --git a/mindspore/lite/src/populate_parameter.cc b/mindspore/lite/src/populate_parameter.cc index 0ee685edf1..b6c1702a24 100644 --- a/mindspore/lite/src/populate_parameter.cc +++ b/mindspore/lite/src/populate_parameter.cc @@ -14,9 +14,100 @@ * limitations under the License. */ +#include "src/ops/constant_of_shape.h" +#include "src/ops/space_to_batch.h" +#include "src/ops/conv2d.h" +#include "src/ops/roi_pooling.h" +#include "src/ops/topk.h" +#include "src/ops/broadcast_to.h" +#include "src/ops/unsqueeze.h" +#include "src/ops/unstack.h" +#include "src/ops/depth_to_space.h" +#include "src/ops/batch_to_space.h" +#include "src/ops/prior_box.h" +#include "src/ops/lstm.h" +#include "src/ops/softmax.h" +#include "src/ops/activation.h" +#include "src/ops/deconv2d.h" +#include "src/ops/reduce.h" +#include "src/ops/pooling.h" +#include "src/ops/fused_batchnorm.h" +#include "src/ops/batch_norm.h" +#include "src/ops/power.h" +#include "src/ops/range.h" +#include "src/ops/add.h" +#include "src/ops/sub.h" +#include "src/ops/div.h" +#include "src/ops/bias_add.h" +#include "src/ops/expand_dims.h" +#include "src/ops/full_connection.h" +#include "src/ops/shape.h" +#include "src/ops/elu.h" +#include "src/ops/embedding_lookup.h" +#include "src/ops/quant_dtype_cast.h" +#include "src/ops/matmul.h" +#include "src/ops/resize.h" +#include "src/ops/tile.h" +#include "src/ops/one_hot.h" +#include "src/ops/space_to_depth.h" +#include "src/ops/split.h" +#include "src/ops/argmax.h" +#include "src/ops/argmin.h" +#include "src/ops/cast.h" +#include "src/ops/reshape.h" +#include "src/ops/scale.h" +#include "src/ops/concat.h" +#include "src/ops/nchw2nhwc.h" +#include "src/ops/slice.h" +#include "src/ops/squeeze.h" +#include "src/ops/flatten.h" +#include "src/ops/mean.h" +#include "src/ops/nhwc2nchw.h" +#include "src/ops/stack.h" +#include "src/ops/crop.h" +#include "src/ops/addn.h" +#include "src/ops/gather.h" +#include "src/ops/gather_nd.h" +#include "src/ops/local_response_normalization.h" +#include "src/ops/pad.h" +#include "src/ops/prelu.h" +#include "src/ops/caffe_p_relu.h" +#include "src/ops/reverse_sequence.h" +#include "src/ops/dedepthwise_conv2d.h" +#include "src/ops/depthwise_conv2d.h" +#include "src/ops/mul.h" +#include "src/ops/eltwise.h" +#include "src/ops/fill.h" +#include "src/ops/transpose.h" +#include "src/ops/log.h" +#include "src/ops/abs.h" +#include "src/ops/sin.h" +#include "src/ops/cos.h" +#include "src/ops/sqrt.h" +#include "src/ops/square.h" +#include "src/ops/exp.h" +#include "src/ops/rsqrt.h" +#include "src/ops/maximum.h" +#include "src/ops/minimum.h" +#include "src/ops/strided_slice.h" +#include "src/ops/reverse.h" +#include "src/ops/logical_and.h" +#include "src/ops/logical_or.h" +#include "src/ops/logical_not.h" +#include "src/ops/floor_div.h" +#include "src/ops/floor_mod.h" +#include "src/ops/equal.h" +#include "src/ops/not_equal.h" +#include "src/ops/less.h" +#include "src/ops/less_equal.h" +#include "src/ops/greater_equal.h" +#include "src/ops/greater.h" +#include "src/ops/floor.h" +#include "src/ops/squared_difference.h" +#include "src/ops/ceil.h" +#include "src/ops/round.h" +#include "src/ops/primitive_c.h" #include "src/populate_parameter.h" -#include -#include "src/ops/ops.h" #include "utils/log_adapter.h" #include "schema/ops_generated.h" #include "src/runtime/kernel/arm/nnacl/op_base.h" @@ -78,112 +169,113 @@ namespace mindspore::kernel { -OpParameter *PopulateROIPoolingParameter(const lite::Primitive *primitive) { - auto pooling_primitive = primitive->Value()->value_as_ROIPooling(); - ROIPoolingParameter *param = new (std::nothrow) ROIPoolingParameter(); +OpParameter *PopulateROIPoolingParameter(const mindspore::lite::PrimitiveC *primitive) { + const auto param = dynamic_cast(primitive); + auto *roi_pooling_param = new (std::nothrow) ROIPoolingParameter(); if (param == nullptr) { MS_LOG(ERROR) << "new PoolingParameter failed."; return nullptr; } - param->op_parameter_.type_ = primitive->Type(); - param->pooledH_ = pooling_primitive->pooledH(); - param->pooledW_ = pooling_primitive->pooledW(); - param->scale_ = pooling_primitive->scale(); - return reinterpret_cast(param); + roi_pooling_param->op_parameter_.type_ = param->Type(); + roi_pooling_param->pooledH_ = param->GetPooledW(); + roi_pooling_param->pooledW_ = param->GetPooledW(); + roi_pooling_param->scale_ = param->GetScale(); + return reinterpret_cast(roi_pooling_param); } -OpParameter *PopulateBatchNorm(const lite::Primitive *primitive) { - BatchNormParameter *batch_norm_param = new (std::nothrow) BatchNormParameter(); +OpParameter *PopulateBatchNorm(const mindspore::lite::PrimitiveC *primitive) { + const auto param = dynamic_cast(primitive); + auto *batch_norm_param = new (std::nothrow) BatchNormParameter(); if (batch_norm_param == nullptr) { MS_LOG(ERROR) << "new BatchNormParameter failed."; return nullptr; } batch_norm_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_BatchNorm(); - batch_norm_param->epsilon_ = param->epsilon(); + batch_norm_param->epsilon_ = param->GetEpsilon(); return reinterpret_cast(batch_norm_param); } -OpParameter *PopulateFillParameter(const lite::Primitive *primitive) { - auto param = primitive->Value()->value_as_Fill(); - FillParameter *fill_param = new (std::nothrow) FillParameter(); +OpParameter *PopulateFillParameter(const mindspore::lite::PrimitiveC *primitive) { + const auto param = dynamic_cast(primitive); + auto *fill_param = new (std::nothrow) FillParameter(); if (fill_param == nullptr) { MS_LOG(ERROR) << "new FillParameter failed."; return nullptr; } fill_param->op_parameter_.type_ = primitive->Type(); - auto flatDims = param->dims(); - fill_param->num_dims_ = flatDims->size(); + auto flatDims = param->GetDims(); + fill_param->num_dims_ = flatDims.size(); int i = 0; - for (auto iter = flatDims->begin(); iter != flatDims->end(); iter++) { + for (auto iter = flatDims.begin(); iter != flatDims.end(); iter++) { fill_param->dims_[i++] = *iter; } return reinterpret_cast(fill_param); } -OpParameter *PopulateExpandDimsParameter(const lite::Primitive *primitive) { - auto param = primitive->Value()->value_as_ExpandDims(); - ExpandDimsParameter *expand_dims_param = new (std::nothrow) ExpandDimsParameter(); +OpParameter *PopulateExpandDimsParameter(const mindspore::lite::PrimitiveC *primitive) { + auto param = dynamic_cast(primitive); + auto *expand_dims_param = new (std::nothrow) ExpandDimsParameter(); if (expand_dims_param == nullptr) { MS_LOG(ERROR) << "new ExpandDimsParameter failed."; return nullptr; } expand_dims_param->op_parameter_.type_ = primitive->Type(); - expand_dims_param->dim_ = param->dim(); + expand_dims_param->dim_ = param->GetDim(); return reinterpret_cast(expand_dims_param); } -OpParameter *PopulateCaffePReLUParameter(const lite::Primitive *primitive) { - auto param = primitive->Value()->value_as_CaffePReLU(); - CaffePreluParameter *caffePrelu_param = new (std::nothrow) CaffePreluParameter(); +OpParameter *PopulateCaffePReLUParameter(const mindspore::lite::PrimitiveC *primitive) { + auto param = dynamic_cast(primitive); + auto *caffePrelu_param = new (std::nothrow) CaffePreluParameter(); if (caffePrelu_param == nullptr) { MS_LOG(ERROR) << "new caffePReluParameter failed."; return nullptr; } caffePrelu_param->op_parameter_.type_ = primitive->Type(); - caffePrelu_param->channelShared = param->channelShared(); + caffePrelu_param->channelShared = param->GetChannelShared(); return reinterpret_cast(caffePrelu_param); } -OpParameter *PopulatePreluParameter(const lite::Primitive *primitive) { - auto param = primitive->Value()->value_as_Prelu(); - PreluParameter *Prelu_param = new (std::nothrow) PreluParameter(); - if (Prelu_param == nullptr) { +OpParameter *PopulatePreluParameter(const mindspore::lite::PrimitiveC *primitive) { + auto param = dynamic_cast(primitive); + auto *prelu_param = new (std::nothrow) PreluParameter(); + if (prelu_param == nullptr) { MS_LOG(ERROR) << "new caffePReluParameter failed."; return nullptr; } - Prelu_param->op_parameter_.type_ = primitive->Type(); - auto temp = param->slope(); - for (int i = 0; i < temp->size(); i++) { - Prelu_param->slope_[i] = temp->Get(i); + prelu_param->op_parameter_.type_ = primitive->Type(); + auto temp = param->GetSlope(); + for (int i = 0; i < temp.size(); i++) { + prelu_param->slope_[i] = temp[i]; } - return reinterpret_cast(Prelu_param); + return reinterpret_cast(prelu_param); } -OpParameter *PopulatePoolingParameter(const lite::Primitive *primitive) { - auto pooling_primitive = primitive->Value()->value_as_Pooling(); +OpParameter *PopulatePoolingParameter(const mindspore::lite::PrimitiveC *primitive) { + auto pooling_primitive = dynamic_cast(primitive); // todo use malloc instead - PoolingParameter *pooling_param = new (std::nothrow) PoolingParameter(); + auto *pooling_param = new (std::nothrow) PoolingParameter(); if (pooling_param == nullptr) { MS_LOG(ERROR) << "new PoolingParameter failed."; return nullptr; } pooling_param->op_parameter_.type_ = primitive->Type(); - pooling_param->global_ = pooling_primitive->global(); - pooling_param->window_w_ = pooling_primitive->windowW(); - pooling_param->window_h_ = pooling_primitive->windowH(); + pooling_param->global_ = pooling_primitive->GetGlobal(); + pooling_param->window_w_ = pooling_primitive->GetWindowW(); + pooling_param->window_h_ = pooling_primitive->GetWindowH(); + // todo format auto pooling_lite_primitive = (lite::Pooling *)primitive; MS_ASSERT(nullptr != pooling_lite_primitive); pooling_param->pad_u_ = pooling_lite_primitive->PadUp(); pooling_param->pad_d_ = pooling_lite_primitive->PadDown(); pooling_param->pad_l_ = pooling_lite_primitive->PadLeft(); pooling_param->pad_r_ = pooling_lite_primitive->PadRight(); - pooling_param->stride_w_ = pooling_primitive->strideW(); - pooling_param->stride_h_ = pooling_primitive->strideH(); + pooling_param->stride_w_ = pooling_primitive->GetStrideW(); + pooling_param->stride_h_ = pooling_primitive->GetStrideH(); - auto is_global = pooling_primitive->global(); + auto is_global = pooling_primitive->GetGlobal(); pooling_param->global_ = is_global; - auto pool_mode = pooling_primitive->poolingMode(); + auto pool_mode = pooling_primitive->GetPoolingMode(); switch (pool_mode) { case schema::PoolMode_MAX_POOLING: pooling_param->max_pooling_ = true; @@ -199,7 +291,7 @@ OpParameter *PopulatePoolingParameter(const lite::Primitive *primitive) { break; } - auto round_mode = pooling_primitive->roundMode(); + auto round_mode = pooling_primitive->GetRoundMode(); switch (round_mode) { case schema::RoundMode_FLOOR: pooling_param->round_floor_ = true; @@ -217,9 +309,9 @@ OpParameter *PopulatePoolingParameter(const lite::Primitive *primitive) { return reinterpret_cast(pooling_param); } -OpParameter *PopulateFullconnectionParameter(const lite::Primitive *primitive) { - auto param = primitive->Value()->value_as_FullConnection(); - MatMulParameter *matmul_param = new (std::nothrow) MatMulParameter(); +OpParameter *PopulateFullconnectionParameter(const mindspore::lite::PrimitiveC *primitive) { + auto param = dynamic_cast(primitive); + auto *matmul_param = new (std::nothrow) MatMulParameter(); if (matmul_param == nullptr) { MS_LOG(ERROR) << "new FullconnectionParameter failed."; return nullptr; @@ -227,10 +319,10 @@ OpParameter *PopulateFullconnectionParameter(const lite::Primitive *primitive) { matmul_param->op_parameter_.type_ = primitive->Type(); matmul_param->b_transpose_ = true; matmul_param->a_transpose_ = false; - matmul_param->has_bias_ = param->hasBias(); - if (param->activationType() == schema::ActivationType_RELU) { + matmul_param->has_bias_ = param->GetHasBias(); + if (param->GetActivationType() == schema::ActivationType_RELU) { matmul_param->act_type_ = ActType_Relu; - } else if (param->activationType() == schema::ActivationType_RELU6) { + } else if (param->GetActivationType() == schema::ActivationType_RELU6) { matmul_param->act_type_ = ActType_Relu6; } else { matmul_param->act_type_ = ActType_No; @@ -239,35 +331,35 @@ OpParameter *PopulateFullconnectionParameter(const lite::Primitive *primitive) { return reinterpret_cast(matmul_param); } -OpParameter *PopulateMatMulParameter(const lite::Primitive *primitive) { - auto param = primitive->Value()->value_as_MatMul(); - MatMulParameter *matmul_param = new (std::nothrow) MatMulParameter(); +OpParameter *PopulateMatMulParameter(const mindspore::lite::PrimitiveC *primitive) { + auto param = dynamic_cast(primitive); + auto *matmul_param = new (std::nothrow) MatMulParameter(); if (matmul_param == nullptr) { MS_LOG(ERROR) << "new FullconnectionParameter failed."; return nullptr; } matmul_param->op_parameter_.type_ = primitive->Type(); - matmul_param->b_transpose_ = param->transposeB(); - matmul_param->a_transpose_ = param->transposeA(); + matmul_param->b_transpose_ = param->GetTransposeB(); + matmul_param->a_transpose_ = param->GetTransposeA(); matmul_param->has_bias_ = false; matmul_param->act_type_ = ActType_No; return reinterpret_cast(matmul_param); } -OpParameter *PopulateConvParameter(const lite::Primitive *primitive) { - ConvParameter *conv_param = new (std::nothrow) ConvParameter(); +OpParameter *PopulateConvParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *conv_param = new (std::nothrow) ConvParameter(); if (conv_param == nullptr) { MS_LOG(ERROR) << "new ConvParameter failed."; return nullptr; } conv_param->op_parameter_.type_ = primitive->Type(); - auto conv_primitive = primitive->Value()->value_as_Conv2D(); - conv_param->kernel_h_ = conv_primitive->kernelH(); - conv_param->kernel_w_ = conv_primitive->kernelW(); + auto conv_primitive = dynamic_cast(primitive); + conv_param->kernel_h_ = conv_primitive->GetKernelH(); + conv_param->kernel_w_ = conv_primitive->GetKernelW(); // todo format - conv_param->group_ = conv_primitive->group(); - conv_param->stride_h_ = conv_primitive->strideH(); - conv_param->stride_w_ = conv_primitive->strideW(); + conv_param->group_ = conv_primitive->GetGroup(); + conv_param->stride_h_ = conv_primitive->GetStrideH(); + conv_param->stride_w_ = conv_primitive->GetStrideW(); auto conv2d_lite_primitive = (lite::Conv2D *)primitive; MS_ASSERT(nullptr != conv2d_lite_primitive); @@ -277,12 +369,12 @@ OpParameter *PopulateConvParameter(const lite::Primitive *primitive) { conv_param->pad_r_ = conv2d_lite_primitive->PadRight(); conv_param->pad_h_ = conv2d_lite_primitive->PadUp(); conv_param->pad_w_ = conv2d_lite_primitive->PadLeft(); - conv_param->dilation_h_ = conv_primitive->dilateH(); - conv_param->dilation_w_ = conv_primitive->dilateW(); - conv_param->input_channel_ = conv_primitive->channelIn(); - conv_param->output_channel_ = conv_primitive->channelOut(); - conv_param->group_ = conv_primitive->group(); - auto act_type = conv_primitive->activationType(); + conv_param->dilation_h_ = conv_primitive->GetDilateH(); + conv_param->dilation_w_ = conv_primitive->GetDilateW(); + conv_param->input_channel_ = conv_primitive->GetChannelIn(); + conv_param->output_channel_ = conv_primitive->GetChannelOut(); + conv_param->group_ = conv_primitive->GetGroup(); + auto act_type = conv_primitive->GetActivationType(); switch (act_type) { case schema::ActivationType_RELU: conv_param->is_relu_ = true; @@ -300,21 +392,22 @@ OpParameter *PopulateConvParameter(const lite::Primitive *primitive) { return reinterpret_cast(conv_param); } -OpParameter *PopulateConvDwParameter(const lite::Primitive *primitive) { - ConvParameter *conv_param = new (std::nothrow) ConvParameter(); +OpParameter *PopulateConvDwParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *conv_param = new (std::nothrow) ConvParameter(); if (conv_param == nullptr) { MS_LOG(ERROR) << "new ConvParameter failed."; return nullptr; } conv_param->op_parameter_.type_ = primitive->Type(); - auto conv_primitive = primitive->Value()->value_as_DepthwiseConv2D(); - conv_param->kernel_h_ = conv_primitive->kernelH(); - conv_param->kernel_w_ = conv_primitive->kernelW(); + + auto conv_primitive = dynamic_cast(primitive); + conv_param->kernel_h_ = conv_primitive->GetKernelH(); + conv_param->kernel_w_ = conv_primitive->GetKernelW(); // todo format, group - conv_param->stride_h_ = conv_primitive->strideH(); - conv_param->stride_w_ = conv_primitive->strideW(); + conv_param->stride_h_ = conv_primitive->GetStrideH(); + conv_param->stride_w_ = conv_primitive->GetStrideW(); - auto pad_mode = conv_primitive->padMode(); + auto pad_mode = conv_primitive->GetPadMode(); auto convdw_lite_primitive = (lite::DepthwiseConv2D *)primitive; MS_ASSERT(nullptr != convdw_lite_primitive); conv_param->pad_u_ = convdw_lite_primitive->PadUp(); @@ -323,9 +416,9 @@ OpParameter *PopulateConvDwParameter(const lite::Primitive *primitive) { conv_param->pad_r_ = convdw_lite_primitive->PadRight(); conv_param->pad_h_ = convdw_lite_primitive->PadUp(); conv_param->pad_w_ = convdw_lite_primitive->PadLeft(); - conv_param->dilation_h_ = conv_primitive->dilateH(); - conv_param->dilation_w_ = conv_primitive->dilateW(); - auto act_type = conv_primitive->activationType(); + conv_param->dilation_h_ = conv_primitive->GetDilateH(); + conv_param->dilation_w_ = conv_primitive->GetDilateW(); + auto act_type = conv_primitive->GetActivationType(); switch (act_type) { case schema::ActivationType_RELU: conv_param->is_relu_ = true; @@ -343,21 +436,21 @@ OpParameter *PopulateConvDwParameter(const lite::Primitive *primitive) { return reinterpret_cast(conv_param); } -OpParameter *PopulateDeconvDwParameter(const lite::Primitive *primitive) { - ConvParameter *conv_param = new ConvParameter(); +OpParameter *PopulateDeconvDwParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *conv_param = new ConvParameter(); if (conv_param == nullptr) { MS_LOG(ERROR) << "new ConvParameter failed."; return nullptr; } conv_param->op_parameter_.type_ = primitive->Type(); - auto conv_primitive = primitive->Value()->value_as_DeDepthwiseConv2D(); - conv_param->kernel_h_ = conv_primitive->kernelH(); - conv_param->kernel_w_ = conv_primitive->kernelW(); + auto conv_primitive = dynamic_cast(primitive); + conv_param->kernel_h_ = conv_primitive->GetKernelH(); + conv_param->kernel_w_ = conv_primitive->GetKernelW(); // todo format, group - conv_param->stride_h_ = conv_primitive->strideH(); - conv_param->stride_w_ = conv_primitive->strideW(); + conv_param->stride_h_ = conv_primitive->GetStrideH(); + conv_param->stride_w_ = conv_primitive->GetStrideW(); - auto deconvdw_lite_primitive = (lite::DeconvDepthwiseConv2D *)primitive; + auto deconvdw_lite_primitive = (mindspore::lite::DeDepthwiseConv2D *)primitive; MS_ASSERT(nullptr != deconvdw_lite_primitive); conv_param->pad_u_ = deconvdw_lite_primitive->PadUp(); conv_param->pad_d_ = deconvdw_lite_primitive->PadDown(); @@ -365,9 +458,9 @@ OpParameter *PopulateDeconvDwParameter(const lite::Primitive *primitive) { conv_param->pad_r_ = deconvdw_lite_primitive->PadRight(); conv_param->pad_h_ = deconvdw_lite_primitive->PadUp(); conv_param->pad_w_ = deconvdw_lite_primitive->PadLeft(); - conv_param->dilation_h_ = conv_primitive->dilateH(); - conv_param->dilation_w_ = conv_primitive->dilateW(); - auto act_type = conv_primitive->activationType(); + conv_param->dilation_h_ = conv_primitive->GetDilateH(); + conv_param->dilation_w_ = conv_primitive->GetDilateW(); + auto act_type = conv_primitive->GetActivationType(); switch (act_type) { case schema::ActivationType_RELU: conv_param->is_relu_ = true; @@ -385,18 +478,18 @@ OpParameter *PopulateDeconvDwParameter(const lite::Primitive *primitive) { return reinterpret_cast(conv_param); } -OpParameter *PopulateDeconvParameter(const lite::Primitive *primitive) { - ConvParameter *conv_param = new ConvParameter(); +OpParameter *PopulateDeconvParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *conv_param = new ConvParameter(); if (conv_param == nullptr) { MS_LOG(ERROR) << "new ConvParameter failed."; return nullptr; } conv_param->op_parameter_.type_ = primitive->Type(); - auto conv_primitive = primitive->Value()->value_as_DeConv2D(); - conv_param->kernel_h_ = conv_primitive->kernelH(); - conv_param->kernel_w_ = conv_primitive->kernelW(); - conv_param->stride_h_ = conv_primitive->strideH(); - conv_param->stride_w_ = conv_primitive->strideW(); + auto conv_primitive = dynamic_cast(primitive); + conv_param->kernel_h_ = conv_primitive->GetKernelH(); + conv_param->kernel_w_ = conv_primitive->GetKernelW(); + conv_param->stride_h_ = conv_primitive->GetStrideH(); + conv_param->stride_w_ = conv_primitive->GetStrideW(); auto deconv_lite_primitive = (lite::DeConv2D *)primitive; MS_ASSERT(nullptr != deconvdw_lite_primitive); @@ -404,9 +497,9 @@ OpParameter *PopulateDeconvParameter(const lite::Primitive *primitive) { conv_param->pad_d_ = deconv_lite_primitive->PadDown(); conv_param->pad_l_ = deconv_lite_primitive->PadLeft(); conv_param->pad_r_ = deconv_lite_primitive->PadRight(); - conv_param->dilation_h_ = conv_primitive->dilateH(); - conv_param->dilation_w_ = conv_primitive->dilateW(); - auto act_type = conv_primitive->activationType(); + conv_param->dilation_h_ = conv_primitive->GetDilateH(); + conv_param->dilation_w_ = conv_primitive->GetDilateW(); + auto act_type = conv_primitive->GetActivationType(); switch (act_type) { case schema::ActivationType_RELU: conv_param->is_relu_ = true; @@ -422,7 +515,7 @@ OpParameter *PopulateDeconvParameter(const lite::Primitive *primitive) { break; } - auto pad_mode = conv_primitive->padMode(); + auto pad_mode = conv_primitive->GetPadMode(); switch (pad_mode) { case schema::PadMode_SAME: conv_param->pad_h_ = (conv_param->kernel_h_ - 1) / 2; @@ -444,84 +537,84 @@ OpParameter *PopulateDeconvParameter(const lite::Primitive *primitive) { return reinterpret_cast(conv_param); } -OpParameter *PopulateSoftmaxParameter(const lite::Primitive *primitive) { - auto softmax_primitive = primitive->Value()->value_as_SoftMax(); - SoftmaxParameter *softmax_param = new (std::nothrow) SoftmaxParameter(); +OpParameter *PopulateSoftmaxParameter(const mindspore::lite::PrimitiveC *primitive) { + auto softmax_primitive = dynamic_cast(primitive); + auto *softmax_param = new (std::nothrow) SoftmaxParameter(); if (softmax_param == nullptr) { MS_LOG(ERROR) << "new SoftmaxParameter failed."; return nullptr; } softmax_param->op_parameter_.type_ = primitive->Type(); - softmax_param->axis_ = softmax_primitive->axis(); + softmax_param->axis_ = softmax_primitive->GetAxis(); return reinterpret_cast(softmax_param); } -OpParameter *PopulateReduceParameter(const lite::Primitive *primitive) { - ReduceParameter *reduce_param = new (std::nothrow) ReduceParameter(); +OpParameter *PopulateReduceParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *reduce_param = new (std::nothrow) ReduceParameter(); if (reduce_param == nullptr) { MS_LOG(ERROR) << "new ReduceParameter failed."; return nullptr; } reduce_param->op_parameter_.type_ = primitive->Type(); - auto reduce = primitive->Value()->value_as_Reduce(); - reduce_param->keep_dims_ = reduce->keepDims(); - auto axisVector = reduce->axes(); - if (axisVector->size() > REDUCE_MAX_AXES_NUM) { - MS_LOG(ERROR) << "Reduce axes size " << axisVector->size() << " exceed limit " << REDUCE_MAX_AXES_NUM; + auto reduce = dynamic_cast(primitive); + reduce_param->keep_dims_ = reduce->GetKeepDims(); + auto axisVector = reduce->GetAxes(); + if (axisVector.size() > REDUCE_MAX_AXES_NUM) { + MS_LOG(ERROR) << "Reduce axes size " << axisVector.size() << " exceed limit " << REDUCE_MAX_AXES_NUM; delete (reduce_param); return nullptr; } - reduce_param->num_axes_ = static_cast(axisVector->size()); + reduce_param->num_axes_ = static_cast(axisVector.size()); int i = 0; - for (auto iter = axisVector->begin(); iter != axisVector->end(); iter++) { + for (auto iter = axisVector.begin(); iter != axisVector.end(); iter++) { reduce_param->axes_[i++] = *iter; } - reduce_param->mode_ = static_cast(reduce->mode()); + reduce_param->mode_ = static_cast(reduce->GetMode()); return reinterpret_cast(reduce_param); } -OpParameter *PopulateMeanParameter(const lite::Primitive *primitive) { - ReduceParameter *mean_param = new (std::nothrow) ReduceParameter(); +OpParameter *PopulateMeanParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *mean_param = new (std::nothrow) ReduceParameter(); if (mean_param == nullptr) { MS_LOG(ERROR) << "new ReduceParameter failed."; return nullptr; } mean_param->op_parameter_.type_ = primitive->Type(); - auto mean = primitive->Value()->value_as_Mean(); - mean_param->keep_dims_ = mean->keepDims(); - auto axisVector = mean->axis(); - if (axisVector->size() > REDUCE_MAX_AXES_NUM) { - MS_LOG(ERROR) << "Reduce axes size " << axisVector->size() << " exceed limit " << REDUCE_MAX_AXES_NUM; + auto mean = dynamic_cast(primitive); + mean_param->keep_dims_ = mean->GetKeepDims(); + auto axisVector = mean->GetAxis(); + if (axisVector.size() > REDUCE_MAX_AXES_NUM) { + MS_LOG(ERROR) << "Reduce axes size " << axisVector.size() << " exceed limit " << REDUCE_MAX_AXES_NUM; delete (mean_param); return nullptr; } - mean_param->num_axes_ = static_cast(axisVector->size()); + mean_param->num_axes_ = static_cast(axisVector.size()); int i = 0; - for (auto iter = axisVector->begin(); iter != axisVector->end(); iter++) { + for (auto iter = axisVector.begin(); iter != axisVector.end(); iter++) { mean_param->axes_[i++] = *iter; } mean_param->mode_ = static_cast(schema::ReduceMode_ReduceMean); return reinterpret_cast(mean_param); } -OpParameter *PopulatePadParameter(const lite::Primitive *primitive) { - PadParameter *pad_param = new (std::nothrow) PadParameter(); +OpParameter *PopulatePadParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *pad_param = new (std::nothrow) PadParameter(); if (pad_param == nullptr) { MS_LOG(ERROR) << "new PadParameter failed."; return nullptr; } pad_param->op_parameter_.type_ = primitive->Type(); - auto pad_node = primitive->Value()->value_as_Pad(); - pad_param->pad_mode_ = pad_node->paddingMode(); + auto pad_node = dynamic_cast(primitive); + pad_param->pad_mode_ = pad_node->GetPaddingMode(); if (pad_param->pad_mode_ == schema::PaddingMode_CONSTANT) { - pad_param->constant_value_ = pad_node->constantValue(); + pad_param->constant_value_ = pad_node->GetConstantValue(); } else { MS_LOG(ERROR) << "Invalid padding mode: " << pad_param->pad_mode_; delete (pad_param); return nullptr; } - auto size = pad_node->paddings()->size(); + auto size = pad_node->GetPaddings().size(); if (size > MAX_PAD_SIZE) { MS_LOG(ERROR) << "Invalid padding size: " << size; delete (pad_param); @@ -529,37 +622,37 @@ OpParameter *PopulatePadParameter(const lite::Primitive *primitive) { } for (size_t i = 0; i < size; i++) { - pad_param->paddings_[MAX_PAD_SIZE - size + i] = (*(pad_node->paddings()))[i]; + pad_param->paddings_[MAX_PAD_SIZE - size + i] = pad_node->GetPaddings()[i]; } return reinterpret_cast(pad_param); } -OpParameter *PopulateActivationParameter(const lite::Primitive *primitive) { - ActivationParameter *act_param = new (std::nothrow) ActivationParameter(); +OpParameter *PopulateActivationParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *act_param = new (std::nothrow) ActivationParameter(); if (act_param == nullptr) { MS_LOG(ERROR) << "new ActivationParameter failed."; return nullptr; } - auto activation = primitive->Value()->value_as_Activation(); - act_param->type_ = static_cast(activation->type()); - act_param->alpha_ = activation->alpha(); + auto activation = dynamic_cast(primitive); + act_param->type_ = static_cast(activation->GetType()); + act_param->alpha_ = activation->GetAlpha(); return reinterpret_cast(act_param); } -OpParameter *PopulateFusedBatchNorm(const lite::Primitive *primitive) { - BatchNormParameter *batch_norm_param = new (std::nothrow) BatchNormParameter(); +OpParameter *PopulateFusedBatchNorm(const mindspore::lite::PrimitiveC *primitive) { + auto *batch_norm_param = new (std::nothrow) BatchNormParameter(); if (batch_norm_param == nullptr) { MS_LOG(ERROR) << "new FusedBatchNormParameter failed."; return nullptr; } batch_norm_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_FusedBatchNorm(); - batch_norm_param->epsilon_ = param->epsilon(); + auto param = dynamic_cast(primitive); + batch_norm_param->epsilon_ = param->GetEpsilon(); return reinterpret_cast(batch_norm_param); } -OpParameter *PopulateArithmetic(const lite::Primitive *primitive) { - ArithmeticParameter *arithmetic_param = new (std::nothrow) ArithmeticParameter(); +OpParameter *PopulateArithmetic(const mindspore::lite::PrimitiveC *primitive) { + auto *arithmetic_param = new (std::nothrow) ArithmeticParameter(); if (arithmetic_param == nullptr) { MS_LOG(ERROR) << "new ArithmeticParameter failed."; return nullptr; @@ -569,16 +662,16 @@ OpParameter *PopulateArithmetic(const lite::Primitive *primitive) { arithmetic_param->ndim_ = ((lite::Arithmetic *)primitive)->NDims(); switch (primitive->Type()) { case schema::PrimitiveType_Add: - arithmetic_param->activation_type_ = primitive->Value()->value_as_Add()->activationType(); + arithmetic_param->activation_type_ = dynamic_cast(primitive)->GetActivationType(); break; case schema::PrimitiveType_Sub: - arithmetic_param->activation_type_ = primitive->Value()->value_as_Sub()->activationType(); + arithmetic_param->activation_type_ = dynamic_cast(primitive)->GetActivationType(); break; case schema::PrimitiveType_Mul: - arithmetic_param->activation_type_ = primitive->Value()->value_as_Mul()->activationType(); + arithmetic_param->activation_type_ = dynamic_cast(primitive)->GetActivationType(); break; case schema::PrimitiveType_Div: - arithmetic_param->activation_type_ = primitive->Value()->value_as_Div()->activationType(); + arithmetic_param->activation_type_ = dynamic_cast(primitive)->GetActivationType(); break; default: arithmetic_param->activation_type_ = 0; @@ -593,14 +686,14 @@ OpParameter *PopulateArithmetic(const lite::Primitive *primitive) { return reinterpret_cast(arithmetic_param); } -OpParameter *PopulateEltwiseParameter(const lite::Primitive *primitive) { - ArithmeticParameter *arithmetic_param = new (std::nothrow) ArithmeticParameter(); +OpParameter *PopulateEltwiseParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *arithmetic_param = new (std::nothrow) ArithmeticParameter(); if (arithmetic_param == nullptr) { MS_LOG(ERROR) << "new ArithmeticParameter failed."; return nullptr; } - auto eltwise = primitive->Value()->value_as_Eltwise(); - switch (eltwise->mode()) { + auto eltwise = dynamic_cast(primitive); + switch (eltwise->GetMode()) { case schema::EltwiseMode_PROD: arithmetic_param->op_parameter_.type_ = schema::PrimitiveType_Mul; break; @@ -617,8 +710,8 @@ OpParameter *PopulateEltwiseParameter(const lite::Primitive *primitive) { return reinterpret_cast(arithmetic_param); } -OpParameter *PopulateArithmeticSelf(const lite::Primitive *primitive) { - ArithmeticSelfParameter *arithmetic_self_param = new (std::nothrow) ArithmeticSelfParameter(); +OpParameter *PopulateArithmeticSelf(const mindspore::lite::PrimitiveC *primitive) { + auto *arithmetic_self_param = new (std::nothrow) ArithmeticSelfParameter(); if (arithmetic_self_param == nullptr) { MS_LOG(ERROR) << "new ArithmeticParameter failed."; return nullptr; @@ -627,138 +720,138 @@ OpParameter *PopulateArithmeticSelf(const lite::Primitive *primitive) { return reinterpret_cast(arithmetic_self_param); } -OpParameter *PopulatePowerParameter(const lite::Primitive *primitive) { - PowerParameter *power_param = new (std::nothrow) PowerParameter(); +OpParameter *PopulatePowerParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *power_param = new (std::nothrow) PowerParameter(); if (power_param == nullptr) { MS_LOG(ERROR) << "new PowerParameter failed."; return nullptr; } power_param->op_parameter_.type_ = primitive->Type(); - auto power = primitive->Value()->value_as_Power(); - power_param->power_ = power->power(); - power_param->scale_ = power->scale(); - power_param->shift_ = power->shift(); + auto power = dynamic_cast(primitive); + power_param->power_ = power->GetPower(); + power_param->scale_ = power->GetScale(); + power_param->shift_ = power->GetShift(); return reinterpret_cast(power_param); } -OpParameter *PopulateArgMaxParameter(const lite::Primitive *primitive) { - ArgMinMaxParameter *arg_param = new (std::nothrow) ArgMinMaxParameter(); +OpParameter *PopulateArgMaxParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *arg_param = new (std::nothrow) ArgMinMaxParameter(); if (arg_param == nullptr) { MS_LOG(ERROR) << "new ArgMinMaxParameter failed."; return nullptr; } arg_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_ArgMax(); - arg_param->axis_ = param->axis(); - arg_param->topk_ = param->topK(); - arg_param->axis_type_ = param->axisType(); - arg_param->out_value_ = param->outMaxValue(); - arg_param->keep_dims_ = param->keepDims(); + auto param = dynamic_cast(primitive); + arg_param->axis_ = param->GetAxis(); + arg_param->topk_ = param->GetTopK(); + arg_param->axis_type_ = param->GetAxisType(); + arg_param->out_value_ = param->GetOutMaxValue(); + arg_param->keep_dims_ = param->GetKeepDims(); return reinterpret_cast(arg_param); } -OpParameter *PopulateArgMinParameter(const lite::Primitive *primitive) { - ArgMinMaxParameter *arg_param = new (std::nothrow) ArgMinMaxParameter(); +OpParameter *PopulateArgMinParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *arg_param = new (std::nothrow) ArgMinMaxParameter(); if (arg_param == nullptr) { MS_LOG(ERROR) << "new ArgMinMaxParameter failed."; return nullptr; } arg_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_ArgMin(); - arg_param->axis_ = param->axis(); - arg_param->topk_ = param->topK(); - arg_param->axis_type_ = param->axisType(); - arg_param->out_value_ = param->outMaxValue(); - arg_param->keep_dims_ = param->keepDims(); + auto param = dynamic_cast(primitive); + arg_param->axis_ = param->GetAxis(); + arg_param->topk_ = param->GetTopK(); + arg_param->axis_type_ = param->GetAxisType(); + arg_param->out_value_ = param->GetOutMaxValue(); + arg_param->keep_dims_ = param->GetKeepDims(); return reinterpret_cast(arg_param); } -OpParameter *PopulateCastParameter(const lite::Primitive *primitive) { - CastParameter *cast_param = new (std::nothrow) CastParameter(); +OpParameter *PopulateCastParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *cast_param = new (std::nothrow) CastParameter(); if (cast_param == nullptr) { MS_LOG(ERROR) << "new CastParameter failed."; return nullptr; } cast_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_Cast(); - cast_param->src_type_ = param->srcT(); - cast_param->dst_type_ = param->dstT(); + auto param = dynamic_cast(primitive); + cast_param->src_type_ = param->GetSrcT(); + cast_param->dst_type_ = param->GetDstT(); return reinterpret_cast(cast_param); } -OpParameter *PopulateLocalResponseNormParameter(const lite::Primitive *primitive) { - auto local_response_norm_attr = primitive->Value()->value_as_LocalResponseNormalization(); - LocalResponseNormParameter *lrn_param = new (std::nothrow) LocalResponseNormParameter(); +OpParameter *PopulateLocalResponseNormParameter(const mindspore::lite::PrimitiveC *primitive) { + auto local_response_norm_attr = dynamic_cast(primitive); + auto *lrn_param = new (std::nothrow) LocalResponseNormParameter(); if (lrn_param == nullptr) { MS_LOG(ERROR) << "new LocalResponseNormParameter failed."; return nullptr; } lrn_param->op_parameter_.type_ = primitive->Type(); - lrn_param->depth_radius_ = local_response_norm_attr->depth_radius(); - lrn_param->bias_ = local_response_norm_attr->bias(); - lrn_param->alpha_ = local_response_norm_attr->alpha(); - lrn_param->beta_ = local_response_norm_attr->beta(); + lrn_param->depth_radius_ = local_response_norm_attr->GetDepthRadius(); + lrn_param->bias_ = local_response_norm_attr->GetBias(); + lrn_param->alpha_ = local_response_norm_attr->GetAlpha(); + lrn_param->beta_ = local_response_norm_attr->GetBeta(); return reinterpret_cast(lrn_param); } -OpParameter *PopulateRangeParameter(const lite::Primitive *primitive) { - auto range_attr = primitive->Value()->value_as_Range(); - RangeParameter *range_param = new (std::nothrow) RangeParameter(); +OpParameter *PopulateRangeParameter(const mindspore::lite::PrimitiveC *primitive) { + auto range_attr = dynamic_cast(primitive); + auto *range_param = new (std::nothrow) RangeParameter(); if (range_param == nullptr) { MS_LOG(ERROR) << "new RangeParameter failed."; return nullptr; } range_param->op_parameter_.type_ = primitive->Type(); - range_param->start_ = range_attr->start(); - range_param->limit_ = range_attr->limit(); - range_param->delta_ = range_attr->delta(); - range_param->dType_ = range_attr->dType(); + range_param->start_ = range_attr->GetStart(); + range_param->limit_ = range_attr->GetLimit(); + range_param->delta_ = range_attr->GetDelta(); + range_param->dType_ = range_attr->GetDType(); return reinterpret_cast(range_param); } -OpParameter *PopulateConcatParameter(const lite::Primitive *primitive) { - ConcatParameter *concat_param = new (std::nothrow) ConcatParameter(); +OpParameter *PopulateConcatParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *concat_param = new (std::nothrow) ConcatParameter(); if (concat_param == nullptr) { MS_LOG(ERROR) << "new ConcatParameter failed."; return nullptr; } concat_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_Concat(); - concat_param->axis_ = param->axis(); + auto param = dynamic_cast(primitive); + concat_param->axis_ = param->GetAxis(); return reinterpret_cast(concat_param); } -OpParameter *PopulateTileParameter(const lite::Primitive *primitive) { - TileParameter *tile_param = new (std::nothrow) TileParameter(); +OpParameter *PopulateTileParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *tile_param = new (std::nothrow) TileParameter(); if (tile_param == nullptr) { MS_LOG(ERROR) << "new TileParameter failed."; return nullptr; } tile_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_Tile(); - auto multiples = param->multiples(); - tile_param->in_dim_ = multiples->size(); + auto param = dynamic_cast(primitive); + auto multiples = param->GetMultiples(); + tile_param->in_dim_ = multiples.size(); for (size_t i = 0; i < tile_param->in_dim_; ++i) { - tile_param->multiples_[i] = multiples->Get(i); + tile_param->multiples_[i] = multiples[i]; } return reinterpret_cast(tile_param); } -OpParameter *PopulateTopKParameter(const lite::Primitive *primitive) { - TopkParameter *topk_param = new (std::nothrow) TopkParameter(); +OpParameter *PopulateTopKParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *topk_param = new (std::nothrow) TopkParameter(); if (topk_param == nullptr) { MS_LOG(ERROR) << "new TopkParameter failed."; return nullptr; } topk_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_TopK(); - topk_param->k_ = param->k(); - topk_param->sorted_ = param->sorted(); + auto param = dynamic_cast(primitive); + topk_param->k_ = param->GetK(); + topk_param->sorted_ = param->GetSorted(); return reinterpret_cast(topk_param); } -OpParameter *PopulateNhwc2NchwParameter(const lite::Primitive *primitive) { - OpParameter *parameter = new (std::nothrow) OpParameter(); +OpParameter *PopulateNhwc2NchwParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *parameter = new (std::nothrow) OpParameter(); if (parameter == nullptr) { MS_LOG(ERROR) << "new Nhwc2NchwParameter failed."; return nullptr; @@ -767,8 +860,8 @@ OpParameter *PopulateNhwc2NchwParameter(const lite::Primitive *primitive) { return parameter; } -OpParameter *PopulateNchw2NhwcParameter(const lite::Primitive *primitive) { - OpParameter *parameter = new (std::nothrow) OpParameter(); +OpParameter *PopulateNchw2NhwcParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *parameter = new (std::nothrow) OpParameter(); if (parameter == nullptr) { MS_LOG(ERROR) << "new Nchw2NhwcParameter failed."; return nullptr; @@ -777,45 +870,45 @@ OpParameter *PopulateNchw2NhwcParameter(const lite::Primitive *primitive) { return parameter; } -OpParameter *PopulateTransposeParameter(const lite::Primitive *primitive) { - TransposeParameter *transpose_param = new (std::nothrow) TransposeParameter(); +OpParameter *PopulateTransposeParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *transpose_param = new (std::nothrow) TransposeParameter(); if (transpose_param == nullptr) { MS_LOG(ERROR) << "new TransposeParameter failed."; return nullptr; } - auto param = primitive->Value()->value_as_Transpose(); + auto param = dynamic_cast(primitive); transpose_param->op_parameter_.type_ = primitive->Type(); - auto perm_vector_ = param->perm(); + auto perm_vector_ = param->GetPerm(); int i = 0; - for (auto iter = perm_vector_->begin(); iter != perm_vector_->end(); iter++) { + for (auto iter = perm_vector_.begin(); iter != perm_vector_.end(); iter++) { transpose_param->perm_[i++] = *iter; } transpose_param->num_axes_ = i; - transpose_param->conjugate_ = param->conjugate(); + transpose_param->conjugate_ = param->GetConjugate(); return reinterpret_cast(transpose_param); } -OpParameter *PopulateSplitParameter(const lite::Primitive *primitive) { - SplitParameter *split_param = new (std::nothrow) SplitParameter(); +OpParameter *PopulateSplitParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *split_param = new (std::nothrow) SplitParameter(); if (split_param == nullptr) { MS_LOG(ERROR) << "new SplitParameter failed."; return nullptr; } - auto param = primitive->Value()->value_as_Split(); + auto param = dynamic_cast(primitive); split_param->op_parameter_.type_ = primitive->Type(); - split_param->num_split_ = param->numberSplit(); - auto split_sizes_vector_ = param->sizeSplits(); + split_param->num_split_ = param->GetNumberSplit(); + auto split_sizes_vector_ = param->GetSizeSplits(); int i = 0; - for (auto iter = split_sizes_vector_->begin(); iter != split_sizes_vector_->end(); iter++) { + for (auto iter = split_sizes_vector_.begin(); iter != split_sizes_vector_.end(); iter++) { split_param->split_sizes_[i++] = *iter; } - split_param->split_dim_ = param->splitDim(); - split_param->num_split_ = param->numberSplit(); + split_param->split_dim_ = param->GetSplitDim(); + split_param->num_split_ = param->GetNumberSplit(); return reinterpret_cast(split_param); } -OpParameter *PopulateSqueezeParameter(const lite::Primitive *primitive) { - SqueezeParameter *squeeze_param = new (std::nothrow) SqueezeParameter(); +OpParameter *PopulateSqueezeParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *squeeze_param = new (std::nothrow) SqueezeParameter(); if (squeeze_param == nullptr) { MS_LOG(ERROR) << "new SqueezeParameter failed."; return nullptr; @@ -824,53 +917,53 @@ OpParameter *PopulateSqueezeParameter(const lite::Primitive *primitive) { return reinterpret_cast(squeeze_param); } -OpParameter *PopulateScaleParameter(const lite::Primitive *primitive) { +OpParameter *PopulateScaleParameter(const mindspore::lite::PrimitiveC *primitive) { if (primitive == nullptr) { MS_LOG(ERROR) << "input primitive is nullptr"; return nullptr; } - ScaleParameter *scale_param = new (std::nothrow) ScaleParameter(); + auto *scale_param = new (std::nothrow) ScaleParameter(); if (scale_param == nullptr) { MS_LOG(ERROR) << "new ScaleParameter failed."; return nullptr; } scale_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_Scale(); + auto param = dynamic_cast(primitive); if (param == nullptr) { MS_LOG(ERROR) << "value_as_Scale return nullptr"; return nullptr; } - scale_param->axis_ = param->axis(); + scale_param->axis_ = param->GetAxis(); return reinterpret_cast(scale_param); } -OpParameter *PopulateGatherParameter(const lite::Primitive *primitive) { - auto gather_attr = primitive->Value()->value_as_Gather(); - GatherParameter *gather_param = new (std::nothrow) GatherParameter(); +OpParameter *PopulateGatherParameter(const mindspore::lite::PrimitiveC *primitive) { + auto gather_attr = dynamic_cast(primitive); + auto *gather_param = new (std::nothrow) GatherParameter(); if (gather_param == nullptr) { MS_LOG(ERROR) << "new GatherParameter failed."; return nullptr; } gather_param->op_parameter_.type_ = primitive->Type(); - gather_param->axis_ = gather_attr->axis(); - gather_param->batchDims_ = gather_attr->batchDims(); + gather_param->axis_ = gather_attr->GetAxis(); + gather_param->batchDims_ = gather_attr->GetBatchDims(); return reinterpret_cast(gather_param); } -OpParameter *PopulateGatherNdParameter(const lite::Primitive *primitive) { - GatherNdParameter *gather_nd_param = new (std::nothrow) GatherNdParameter(); +OpParameter *PopulateGatherNdParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *gather_nd_param = new (std::nothrow) GatherNdParameter(); if (gather_nd_param == nullptr) { MS_LOG(ERROR) << "new GatherNDParameter failed."; return nullptr; } gather_nd_param->op_parameter_.type_ = primitive->Type(); - auto gatherNd_attr = primitive->Value()->value_as_GatherNd(); - gather_nd_param->batchDims_ = gatherNd_attr->batchDims(); + auto gatherNd_attr = dynamic_cast(primitive); + gather_nd_param->batchDims_ = gatherNd_attr->GetBatchDims(); return reinterpret_cast(gather_nd_param); } -OpParameter *PopulateScatterNDParameter(const lite::Primitive *primitive) { - ScatterNDParameter *scatter_nd_param = new (std::nothrow) ScatterNDParameter(); +OpParameter *PopulateScatterNDParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *scatter_nd_param = new (std::nothrow) ScatterNDParameter(); if (scatter_nd_param == nullptr) { MS_LOG(ERROR) << "new ScatterNDParameter failed."; return nullptr; @@ -880,46 +973,46 @@ OpParameter *PopulateScatterNDParameter(const lite::Primitive *primitive) { return reinterpret_cast(scatter_nd_param); } -OpParameter *PopulateSliceParameter(const lite::Primitive *primitive) { - SliceParameter *slice_param = new (std::nothrow) SliceParameter(); +OpParameter *PopulateSliceParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *slice_param = new (std::nothrow) SliceParameter(); if (slice_param == nullptr) { MS_LOG(ERROR) << "new SliceParameter failed."; return nullptr; } - auto param = primitive->Value()->value_as_Slice(); + auto param = dynamic_cast(primitive); slice_param->op_parameter_.type_ = primitive->Type(); - auto param_begin = param->begin(); - auto param_size = param->size(); - if (param_begin->size() != param_size->size()) { + auto param_begin = param->GetBegin(); + auto param_size = param->GetSize(); + if (param_begin.size() != param_size.size()) { delete slice_param; return nullptr; } - slice_param->param_length_ = static_cast(param_begin->size()); + slice_param->param_length_ = static_cast(param_begin.size()); for (int32_t i = 0; i < slice_param->param_length_; ++i) { - slice_param->begin_[i] = param_begin->Get(i); - slice_param->size_[i] = param_size->Get(i); + slice_param->begin_[i] = param_begin[1]; + slice_param->size_[i] = param_size[i]; } return reinterpret_cast(slice_param); } -OpParameter *PopulateBroadcastToParameter(const lite::Primitive *primitive) { - BroadcastToParameter *broadcast_param = new (std::nothrow) BroadcastToParameter(); +OpParameter *PopulateBroadcastToParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *broadcast_param = new (std::nothrow) BroadcastToParameter(); if (broadcast_param == nullptr) { MS_LOG(ERROR) << "new BroadcastToParameter failed."; return nullptr; } - auto param = primitive->Value()->value_as_BroadcastTo(); + auto param = dynamic_cast(primitive); broadcast_param->op_parameter_.type_ = primitive->Type(); - auto dst_shape = param->dst_shape(); - broadcast_param->shape_size_ = dst_shape->size(); + auto dst_shape = param->GetDstShape(); + broadcast_param->shape_size_ = dst_shape.size(); for (size_t i = 0; i < broadcast_param->shape_size_; ++i) { - broadcast_param->shape_[i] = dst_shape->Get(i); + broadcast_param->shape_[i] = dst_shape[i]; } return reinterpret_cast(broadcast_param); } -OpParameter *PopulateReshapeParameter(const lite::Primitive *primitive) { - ReshapeParameter *reshape_param = new (std::nothrow) ReshapeParameter(); +OpParameter *PopulateReshapeParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *reshape_param = new (std::nothrow) ReshapeParameter(); if (reshape_param == nullptr) { MS_LOG(ERROR) << "new ReshapeParameter failed."; return nullptr; @@ -928,8 +1021,8 @@ OpParameter *PopulateReshapeParameter(const lite::Primitive *primitive) { return reinterpret_cast(reshape_param); } -OpParameter *PopulateShapeParameter(const lite::Primitive *primitive) { - ShapeParameter *shape_param = new (std::nothrow) ShapeParameter(); +OpParameter *PopulateShapeParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *shape_param = new (std::nothrow) ShapeParameter(); if (shape_param == nullptr) { MS_LOG(ERROR) << "new ShapeParameter failed."; return nullptr; @@ -938,92 +1031,92 @@ OpParameter *PopulateShapeParameter(const lite::Primitive *primitive) { return reinterpret_cast(shape_param); } -OpParameter *PopulateConstantOfShapeParameter(const lite::Primitive *primitive) { - auto attr = primitive->Value()->value_as_ConstantOfShape(); +OpParameter *PopulateConstantOfShapeParameter(const mindspore::lite::PrimitiveC *primitive) { + auto attr = dynamic_cast(primitive); ConstantOfShapeParameter *param = new (std::nothrow) ConstantOfShapeParameter(); if (param == nullptr) { MS_LOG(ERROR) << "new ConstantOfShapeParameter failed."; return nullptr; } param->op_parameter_.type_ = primitive->Type(); - param->value_ = attr->value(); + param->value_ = attr->GetValue(); return reinterpret_cast(param); } -OpParameter *PopulateReverseParameter(const lite::Primitive *primitive) { - auto reverse_attr = primitive->Value()->value_as_Reverse(); +OpParameter *PopulateReverseParameter(const mindspore::lite::PrimitiveC *primitive) { + auto reverse_attr = dynamic_cast(primitive); ReverseParameter *reverse_param = new (std::nothrow) ReverseParameter(); if (reverse_param == nullptr) { MS_LOG(ERROR) << "new ReverseParameter failed."; return nullptr; } reverse_param->op_parameter_.type_ = primitive->Type(); - auto flatAxis = reverse_attr->axis(); - reverse_param->num_axis_ = flatAxis->size(); + auto flatAxis = reverse_attr->GetAxis(); + reverse_param->num_axis_ = flatAxis.size(); int i = 0; - for (auto iter = flatAxis->begin(); iter != flatAxis->end(); iter++) { + for (auto iter = flatAxis.begin(); iter != flatAxis.end(); iter++) { reverse_param->axis_[i++] = *iter; } return reinterpret_cast(reverse_param); } -OpParameter *PopulateUnsqueezeParameter(const lite::Primitive *primitive) { - auto unsqueeze_attr = primitive->Value()->value_as_Unsqueeze(); - UnsqueezeParameter *unsqueeze_param = new (std::nothrow) UnsqueezeParameter(); +OpParameter *PopulateUnsqueezeParameter(const mindspore::lite::PrimitiveC *primitive) { + auto unsqueeze_attr = dynamic_cast(primitive); + auto *unsqueeze_param = new (std::nothrow) UnsqueezeParameter(); if (unsqueeze_param == nullptr) { MS_LOG(ERROR) << "new ReverseParameter failed."; return nullptr; } unsqueeze_param->op_parameter_.type_ = primitive->Type(); - auto flatAxis = unsqueeze_attr->axis(); - unsqueeze_param->num_dim_ = flatAxis->size(); + auto flatAxis = unsqueeze_attr->GetAxis(); + unsqueeze_param->num_dim_ = flatAxis.size(); int i = 0; - for (auto iter = flatAxis->begin(); iter != flatAxis->end(); iter++) { + for (auto iter = flatAxis.begin(); iter != flatAxis.end(); iter++) { unsqueeze_param->dims_[i++] = *iter; } return reinterpret_cast(unsqueeze_param); } -OpParameter *PopulateStackParameter(const lite::Primitive *primitive) { - StackParameter *stack_param = new (std::nothrow) StackParameter(); +OpParameter *PopulateStackParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *stack_param = new (std::nothrow) StackParameter(); if (stack_param == nullptr) { MS_LOG(ERROR) << "new StackParameter failed."; return nullptr; } - auto param = primitive->Value()->value_as_Stack(); + auto param = dynamic_cast(primitive); stack_param->op_parameter_.type_ = primitive->Type(); - stack_param->axis_ = param->axis(); + stack_param->axis_ = param->GetAxis(); return reinterpret_cast(stack_param); } -OpParameter *PopulateUnstackParameter(const lite::Primitive *primitive) { - UnstackParameter *unstack_param = new (std::nothrow) UnstackParameter(); +OpParameter *PopulateUnstackParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *unstack_param = new (std::nothrow) UnstackParameter(); if (unstack_param == nullptr) { MS_LOG(ERROR) << "new UnstackParameter failed."; return nullptr; } - auto param = primitive->Value()->value_as_Unstack(); + auto param = dynamic_cast(primitive); unstack_param->op_parameter_.type_ = primitive->Type(); - unstack_param->num_ = param->num(); - unstack_param->axis_ = param->axis(); + unstack_param->num_ = param->GetNum(); + unstack_param->axis_ = param->GetAxis(); return reinterpret_cast(unstack_param); } -OpParameter *PopulateReverseSequenceParameter(const lite::Primitive *primitive) { - ReverseSequenceParameter *reverse_sequence_param = new (std::nothrow) ReverseSequenceParameter(); +OpParameter *PopulateReverseSequenceParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *reverse_sequence_param = new (std::nothrow) ReverseSequenceParameter(); if (reverse_sequence_param == nullptr) { MS_LOG(ERROR) << "new ReverseSequenceParameter failed."; return nullptr; } - auto param = primitive->Value()->value_as_ReverseSequence(); + auto param = dynamic_cast(primitive); reverse_sequence_param->op_parameter_.type_ = primitive->Type(); - reverse_sequence_param->seq_axis_ = param->seqAxis(); - reverse_sequence_param->batch_axis_ = param->batchAxis(); + reverse_sequence_param->seq_axis_ = param->GetSeqAxis(); + reverse_sequence_param->batch_axis_ = param->GetBatchAxis(); return reinterpret_cast(reverse_sequence_param); } -OpParameter *PopulateUniqueParameter(const lite::Primitive *primitive) { - UniqueParameter *unique_param = new (std::nothrow) UniqueParameter(); +OpParameter *PopulateUniqueParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *unique_param = new (std::nothrow) UniqueParameter(); if (unique_param == nullptr) { MS_LOG(ERROR) << "new PopulateUniqueParam failed."; return nullptr; @@ -1032,140 +1125,140 @@ OpParameter *PopulateUniqueParameter(const lite::Primitive *primitive) { return reinterpret_cast(unique_param); } -OpParameter *PopulateDepthToSpaceParameter(const lite::Primitive *primitive) { - DepthToSpaceParameter *depth_space_param = new (std::nothrow) DepthToSpaceParameter(); +OpParameter *PopulateDepthToSpaceParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *depth_space_param = new (std::nothrow) DepthToSpaceParameter(); if (depth_space_param == nullptr) { MS_LOG(ERROR) << "new DepthToSpaceParameter failed."; return nullptr; } - auto param = primitive->Value()->value_as_DepthToSpace(); + auto param = dynamic_cast(primitive); depth_space_param->op_parameter_.type_ = primitive->Type(); - depth_space_param->block_size_ = param->blockSize(); + depth_space_param->block_size_ = param->GetBlockSize(); return reinterpret_cast(depth_space_param); } -OpParameter *PopulateSpaceToDepthParameter(const lite::Primitive *primitive) { - SpaceToDepthParameter *space_depth_param = new (std::nothrow) SpaceToDepthParameter(); +OpParameter *PopulateSpaceToDepthParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *space_depth_param = new (std::nothrow) SpaceToDepthParameter(); if (space_depth_param == nullptr) { MS_LOG(ERROR) << "new SpaceToDepthspace_depth_param failed."; return nullptr; } space_depth_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_SpaceToDepth(); + auto param = dynamic_cast(primitive); space_depth_param->op_parameter_.type_ = primitive->Type(); - space_depth_param->block_size_ = param->blockSize(); - if (param->format() != schema::Format_NHWC) { + space_depth_param->block_size_ = param->GetBlockSize(); + if (param->GetFormat() != schema::Format_NHWC) { MS_LOG(ERROR) << "Currently only NHWC format is supported."; return nullptr; } return reinterpret_cast(space_depth_param); } -OpParameter *PopulateSpaceToBatchParameter(const lite::Primitive *primitive) { - SpaceToBatchParameter *space_batch_param = new (std::nothrow) SpaceToBatchParameter(); +OpParameter *PopulateSpaceToBatchParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *space_batch_param = new (std::nothrow) SpaceToBatchParameter(); if (space_batch_param == nullptr) { MS_LOG(ERROR) << "new SpaceToBatchParameter failed."; return nullptr; } space_batch_param->op_parameter_.type_ = primitive->Type(); space_batch_param->op_parameter_.type_ = primitive->Type(); - auto block_sizes = ((lite::SpaceToBatch *)primitive)->BlockSizes(); + auto block_sizes = ((mindspore::lite::SpaceToBatch *)primitive)->BlockSizes(); (void)memcpy(space_batch_param->block_sizes_, (block_sizes.data()), block_sizes.size() * sizeof(int)); - auto paddings = ((lite::SpaceToBatch *)primitive)->Paddings(); + auto paddings = ((mindspore::lite::SpaceToBatch *)primitive)->Paddings(); (void)memcpy(space_batch_param->paddings_, (paddings.data()), paddings.size() * sizeof(int)); - auto in_shape = ((lite::SpaceToBatch *)primitive)->InShape(); + auto in_shape = ((mindspore::lite::SpaceToBatch *)primitive)->InShape(); (void)memcpy(space_batch_param->in_shape_, (in_shape.data()), in_shape.size() * sizeof(int)); - auto padded_in_shape = ((lite::SpaceToBatch *)primitive)->PaddedInShape(); + auto padded_in_shape = ((mindspore::lite::SpaceToBatch *)primitive)->PaddedInShape(); (void)memcpy(space_batch_param->padded_in_shape_, (padded_in_shape.data()), padded_in_shape.size() * sizeof(int)); return reinterpret_cast(space_batch_param); } -OpParameter *PopulateResizeParameter(const lite::Primitive *primitive) { - ResizeParameter *resize_param = new (std::nothrow) ResizeParameter(); +OpParameter *PopulateResizeParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *resize_param = new (std::nothrow) ResizeParameter(); if (resize_param == nullptr) { MS_LOG(ERROR) << "new ResizeParameter failed."; return nullptr; } resize_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_Resize(); - resize_param->method_ = static_cast(param->method()); - resize_param->new_height_ = param->newHeight(); - resize_param->new_width_ = param->newWidth(); - resize_param->align_corners_ = param->alignCorners(); - resize_param->preserve_aspect_ratio_ = param->preserveAspectRatio(); + auto param = dynamic_cast(primitive); + resize_param->method_ = static_cast(param->GetMethod()); + resize_param->new_height_ = param->GetNewHeight(); + resize_param->new_width_ = param->GetNewWidth(); + resize_param->align_corners_ = param->GetAlignCorners(); + resize_param->preserve_aspect_ratio_ = param->GetPreserveAspectRatio(); return reinterpret_cast(resize_param); } -OpParameter *PopulateBatchToSpaceParameter(const lite::Primitive *primitive) { - BatchToSpaceParameter *batch_space_param = new (std::nothrow) BatchToSpaceParameter(); +OpParameter *PopulateBatchToSpaceParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *batch_space_param = new (std::nothrow) BatchToSpaceParameter(); if (batch_space_param == nullptr) { MS_LOG(ERROR) << "New BatchToSpaceParameter fail!"; return nullptr; } batch_space_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_BatchToSpace(); - auto block_shape = param->blockShape(); - if (block_shape->size() != BATCH_TO_SPACE_BLOCK_SHAPE_SIZE) { + auto param = dynamic_cast(primitive); + auto block_shape = param->GetBlockShape(); + if (block_shape.size() != BATCH_TO_SPACE_BLOCK_SHAPE_SIZE) { MS_LOG(ERROR) << "batch_to_space blockShape size should be " << BATCH_TO_SPACE_BLOCK_SHAPE_SIZE; return nullptr; } - auto crops = param->crops(); - if (crops->size() != BATCH_TO_SPACE_CROPS_SIZE) { + auto crops = param->GetCrops(); + if (crops.size() != BATCH_TO_SPACE_CROPS_SIZE) { MS_LOG(ERROR) << "batch_to_space crops size should be " << BATCH_TO_SPACE_CROPS_SIZE; return nullptr; } for (int i = 0; i < BATCH_TO_SPACE_BLOCK_SHAPE_SIZE; ++i) { - batch_space_param->block_shape_[i] = block_shape->Get(i); + batch_space_param->block_shape_[i] = block_shape[i]; } for (int i = 0; i < BATCH_TO_SPACE_CROPS_SIZE; ++i) { - batch_space_param->crops_[i] = crops->Get(i); + batch_space_param->crops_[i] = crops[i]; } return reinterpret_cast(batch_space_param); } -OpParameter *PopulateCropParameter(const lite::Primitive *primitive) { - auto param = primitive->Value()->value_as_Crop(); - auto param_offset = param->offsets(); - if (param_offset->size() > CROP_OFFSET_MAX_SIZE) { - MS_LOG(ERROR) << "crop_param offset size(" << param_offset->size() << ") should <= " << CROP_OFFSET_MAX_SIZE; +OpParameter *PopulateCropParameter(const mindspore::lite::PrimitiveC *primitive) { + auto param = dynamic_cast(primitive); + auto param_offset = param->GetOffsets(); + if (param_offset.size() > CROP_OFFSET_MAX_SIZE) { + MS_LOG(ERROR) << "crop_param offset size(" << param_offset.size() << ") should <= " << CROP_OFFSET_MAX_SIZE; return nullptr; } - CropParameter *crop_param = new (std::nothrow) CropParameter(); + auto *crop_param = new (std::nothrow) CropParameter(); if (crop_param == nullptr) { MS_LOG(ERROR) << "new CropParameter fail!"; return nullptr; } crop_param->op_parameter_.type_ = primitive->Type(); - crop_param->axis_ = param->axis(); - crop_param->offset_size_ = param_offset->size(); - for (int i = 0; i < param_offset->size(); ++i) { - crop_param->offset_[i] = param_offset->Get(i); + crop_param->axis_ = param->GetAxis(); + crop_param->offset_size_ = param_offset.size(); + for (int i = 0; i < param_offset.size(); ++i) { + crop_param->offset_[i] = param_offset[i]; } return reinterpret_cast(crop_param); } -OpParameter *PopulateOneHotParameter(const lite::Primitive *primitive) { - OneHotParameter *one_hot_param = new (std::nothrow) OneHotParameter(); +OpParameter *PopulateOneHotParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *one_hot_param = new (std::nothrow) OneHotParameter(); if (one_hot_param == nullptr) { MS_LOG(ERROR) << "new OneHotParameter fail!"; return nullptr; } one_hot_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_OneHot(); + auto param = dynamic_cast(primitive); if (param == nullptr) { delete (one_hot_param); MS_LOG(ERROR) << "get OneHot param nullptr."; return nullptr; } - one_hot_param->axis_ = param->axis(); + one_hot_param->axis_ = param->GetAxis(); return reinterpret_cast(one_hot_param); } -OpParameter *PopulateFlattenParameter(const lite::Primitive *primitive) { - FlattenParameter *flatten_param = new (std::nothrow) FlattenParameter(); +OpParameter *PopulateFlattenParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *flatten_param = new (std::nothrow) FlattenParameter(); if (flatten_param == nullptr) { MS_LOG(ERROR) << "new FlattenParameter fail!"; return nullptr; @@ -1174,21 +1267,21 @@ OpParameter *PopulateFlattenParameter(const lite::Primitive *primitive) { return reinterpret_cast(flatten_param); } -OpParameter *PopulateQuantDTypeCastParameter(const lite::Primitive *primitive) { - QuantDTypeCastParameter *parameter = new (std::nothrow) QuantDTypeCastParameter(); +OpParameter *PopulateQuantDTypeCastParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *parameter = new (std::nothrow) QuantDTypeCastParameter(); if (parameter == nullptr) { MS_LOG(ERROR) << "new QuantDTypeCastParameter fail!"; return nullptr; } parameter->op_parameter_.type_ = primitive->Type(); - auto quant_dtype_cast_param = primitive->Value()->value_as_QuantDTypeCast(); - parameter->srcT = quant_dtype_cast_param->srcT(); - parameter->dstT = quant_dtype_cast_param->dstT(); + auto quant_dtype_cast_param = dynamic_cast(primitive); + parameter->srcT = quant_dtype_cast_param->GetSrcT(); + parameter->dstT = quant_dtype_cast_param->GetDstT(); return reinterpret_cast(parameter); } -OpParameter *PopulateStridedSliceParameter(const lite::Primitive *primitive) { - StridedSliceParameter *strided_slice_param = new (std::nothrow) StridedSliceParameter(); +OpParameter *PopulateStridedSliceParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *strided_slice_param = new (std::nothrow) StridedSliceParameter(); if (strided_slice_param == nullptr) { MS_LOG(ERROR) << "new StridedSliceParameter failed."; return nullptr; @@ -1207,7 +1300,7 @@ OpParameter *PopulateStridedSliceParameter(const lite::Primitive *primitive) { return reinterpret_cast(strided_slice_param); } -OpParameter *PopulateAddNParameter(const lite::Primitive *primitive) { +OpParameter *PopulateAddNParameter(const mindspore::lite::PrimitiveC *primitive) { auto addn_param = new (std::nothrow) OpParameter(); if (addn_param == nullptr) { MS_LOG(ERROR) << "new OpParameter fail!"; @@ -1217,86 +1310,86 @@ OpParameter *PopulateAddNParameter(const lite::Primitive *primitive) { return reinterpret_cast(addn_param); } -OpParameter *PopulatePriorBoxParameter(const lite::Primitive *primitive) { - PriorBoxParameter *prior_box_param = new (std::nothrow) PriorBoxParameter(); +OpParameter *PopulatePriorBoxParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *prior_box_param = new (std::nothrow) PriorBoxParameter(); if (prior_box_param == nullptr) { MS_LOG(ERROR) << "new PriorBoxParameter failed."; return nullptr; } prior_box_param->op_parameter_.type_ = primitive->Type(); - auto prior_box_attr = primitive->Value()->value_as_PriorBox(); + auto prior_box_attr = dynamic_cast(primitive); - if (prior_box_attr->min_sizes()->size() > PRIOR_BOX_MAX_NUM) { + if (prior_box_attr->GetMinSizes().size() > PRIOR_BOX_MAX_NUM) { MS_LOG(ERROR) << "PriorBox min_sizes size exceeds max num " << PRIOR_BOX_MAX_NUM << ", got " - << prior_box_attr->min_sizes(); + << prior_box_attr->GetMinSizes(); delete (prior_box_param); return nullptr; } - prior_box_param->min_sizes_size = prior_box_attr->min_sizes()->size(); - if (prior_box_attr->max_sizes()->size() > PRIOR_BOX_MAX_NUM) { + prior_box_param->min_sizes_size = prior_box_attr->GetMinSizes().size(); + if (prior_box_attr->GetMaxSizes().size() > PRIOR_BOX_MAX_NUM) { MS_LOG(ERROR) << "PriorBox max_sizes size exceeds max num " << PRIOR_BOX_MAX_NUM << ", got " - << prior_box_attr->max_sizes(); + << prior_box_attr->GetMaxSizes(); delete (prior_box_param); return nullptr; } - prior_box_param->max_sizes_size = prior_box_attr->max_sizes()->size(); - (void)memcpy(prior_box_param->max_sizes, prior_box_attr->max_sizes()->data(), - prior_box_attr->max_sizes()->size() * sizeof(int32_t)); - (void)memcpy(prior_box_param->min_sizes, prior_box_attr->min_sizes()->data(), - prior_box_attr->min_sizes()->size() * sizeof(int32_t)); + prior_box_param->max_sizes_size = prior_box_attr->GetMaxSizes().size(); + (void)memcpy(prior_box_param->max_sizes, prior_box_attr->GetMaxSizes().data(), + prior_box_attr->GetMaxSizes().size() * sizeof(int32_t)); + (void)memcpy(prior_box_param->min_sizes, prior_box_attr->GetMinSizes().data(), + prior_box_attr->GetMinSizes().size() * sizeof(int32_t)); - if (prior_box_attr->aspect_ratios()->size() > PRIOR_BOX_MAX_NUM) { + if (prior_box_attr->GetAspectRatios().size() > PRIOR_BOX_MAX_NUM) { MS_LOG(ERROR) << "PriorBox aspect_ratios size exceeds max num " << PRIOR_BOX_MAX_NUM << ", got " - << prior_box_attr->aspect_ratios(); + << prior_box_attr->GetAspectRatios(); delete (prior_box_param); return nullptr; } - prior_box_param->aspect_ratios_size = prior_box_attr->aspect_ratios()->size(); - (void)memcpy(prior_box_param->aspect_ratios, prior_box_attr->aspect_ratios()->data(), - prior_box_attr->aspect_ratios()->size() * sizeof(float)); - if (prior_box_attr->variances()->size() != PRIOR_BOX_VAR_NUM) { + prior_box_param->aspect_ratios_size = prior_box_attr->GetAspectRatios().size(); + (void)memcpy(prior_box_param->aspect_ratios, prior_box_attr->GetAspectRatios().data(), + prior_box_attr->GetAspectRatios().size() * sizeof(float)); + if (prior_box_attr->GetVariances().size() != PRIOR_BOX_VAR_NUM) { MS_LOG(ERROR) << "PriorBox variances size should be " << PRIOR_BOX_VAR_NUM << ", got " - << prior_box_attr->variances()->size(); + << prior_box_attr->GetVariances().size(); delete (prior_box_param); return nullptr; } - (void)memcpy(prior_box_param->variances, prior_box_attr->variances()->data(), PRIOR_BOX_VAR_NUM * sizeof(float)); - prior_box_param->flip = prior_box_attr->flip(); - prior_box_param->clip = prior_box_attr->clip(); - prior_box_param->offset = prior_box_attr->offset(); - prior_box_param->image_size_h = prior_box_attr->image_size_h(); - prior_box_param->image_size_w = prior_box_attr->image_size_w(); - prior_box_param->step_h = prior_box_attr->step_h(); - prior_box_param->step_w = prior_box_attr->step_w(); + (void)memcpy(prior_box_param->variances, prior_box_attr->GetVariances().data(), PRIOR_BOX_VAR_NUM * sizeof(float)); + prior_box_param->flip = prior_box_attr->GetFlip(); + prior_box_param->clip = prior_box_attr->GetClip(); + prior_box_param->offset = prior_box_attr->GetOffset(); + prior_box_param->image_size_h = prior_box_attr->GetImageSizeH(); + prior_box_param->image_size_w = prior_box_attr->GetImageSizeW(); + prior_box_param->step_h = prior_box_attr->GetStepH(); + prior_box_param->step_w = prior_box_attr->GetStepW(); return reinterpret_cast(prior_box_param); } -OpParameter *PopulateLstmParameter(const lite::Primitive *primitive) { - LstmParameter *lstm_param = new (std::nothrow) LstmParameter(); +OpParameter *PopulateLstmParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *lstm_param = new (std::nothrow) LstmParameter(); if (lstm_param == nullptr) { MS_LOG(ERROR) << "new LstmParameter fail!"; return nullptr; } lstm_param->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_Lstm(); + auto param = dynamic_cast(primitive); if (param == nullptr) { delete (lstm_param); MS_LOG(ERROR) << "get Lstm param nullptr."; return nullptr; } - lstm_param->bidirectional_ = param->bidirection(); + lstm_param->bidirectional_ = param->GetBidirection(); return reinterpret_cast(lstm_param); } -OpParameter *PopulateEmbeddingLookupParameter(const lite::Primitive *primitive) { - EmbeddingLookupParameter *embedding_lookup_parameter = new (std::nothrow) EmbeddingLookupParameter(); +OpParameter *PopulateEmbeddingLookupParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *embedding_lookup_parameter = new (std::nothrow) EmbeddingLookupParameter(); if (embedding_lookup_parameter == nullptr) { MS_LOG(ERROR) << "new EmbeddingLookupParameter failed"; return nullptr; } embedding_lookup_parameter->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_EmbeddingLookup(); - embedding_lookup_parameter->max_norm_ = param->maxNorm(); + auto param = dynamic_cast(primitive); + embedding_lookup_parameter->max_norm_ = param->GetMaxNorm(); if (embedding_lookup_parameter->max_norm_ < 0) { MS_LOG(ERROR) << "Embedding lookup max norm should be positive number, got " << embedding_lookup_parameter->max_norm_; @@ -1305,8 +1398,8 @@ OpParameter *PopulateEmbeddingLookupParameter(const lite::Primitive *primitive) return reinterpret_cast(embedding_lookup_parameter); } -OpParameter *PopulateBiasAddParameter(const lite::Primitive *primitive) { - ArithmeticParameter *arithmetic_param = new (std::nothrow) ArithmeticParameter(); +OpParameter *PopulateBiasAddParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *arithmetic_param = new (std::nothrow) ArithmeticParameter(); if (arithmetic_param == nullptr) { MS_LOG(ERROR) << "new Bias Add Parameter failed"; return nullptr; @@ -1316,15 +1409,15 @@ OpParameter *PopulateBiasAddParameter(const lite::Primitive *primitive) { return reinterpret_cast(arithmetic_param); } -OpParameter *PopulateEluParameter(const lite::Primitive *primitive) { - EluParameter *elu_parameter = new (std::nothrow) EluParameter(); +OpParameter *PopulateEluParameter(const mindspore::lite::PrimitiveC *primitive) { + auto *elu_parameter = new (std::nothrow) EluParameter(); if (elu_parameter == nullptr) { MS_LOG(ERROR) << "new EluParameter failed"; return nullptr; } elu_parameter->op_parameter_.type_ = primitive->Type(); - auto param = primitive->Value()->value_as_Elu(); - elu_parameter->alpha_ = param->alpha(); + auto param = dynamic_cast(primitive); + elu_parameter->alpha_ = param->GetAlpha(); return reinterpret_cast(elu_parameter); } @@ -1430,11 +1523,11 @@ PopulateParameterRegistry *PopulateParameterRegistry::GetInstance() { return &populate_parameter_instance; } -PopulateParameterFunc PopulateParameterRegistry::GetParameterFunc(const schema::PrimitiveType &type) { - return populate_parameter_funcs_[type]; +PopulateParameterFunc PopulateParameterRegistry::GetParameterFunc(int type) { + return populate_parameter_funcs_[schema::PrimitiveType(type)]; } -OpParameter *PopulateParameter(const lite::Primitive *primitive) { +OpParameter *PopulateParameter(const mindspore::lite::PrimitiveC *primitive) { if (primitive == nullptr) { MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; return nullptr; diff --git a/mindspore/lite/src/populate_parameter.h b/mindspore/lite/src/populate_parameter.h index 4bfde44d8d..2e27d65978 100644 --- a/mindspore/lite/src/populate_parameter.h +++ b/mindspore/lite/src/populate_parameter.h @@ -18,11 +18,12 @@ #define MINDSPORE_LITE_SRC_POPULATE_PARAMETER_H_ #include "schema/model_generated.h" -#include "src/ops/ops.h" + #include "src/runtime/kernel/arm/nnacl/op_base.h" +#include "src/ops/primitive_c.h" namespace mindspore::kernel { -typedef OpParameter *(*PopulateParameterFunc)(const lite::Primitive *); +typedef OpParameter *(*PopulateParameterFunc)(const mindspore::lite::PrimitiveC *); class PopulateParameterRegistry { public: @@ -30,12 +31,12 @@ class PopulateParameterRegistry { ~PopulateParameterRegistry() = default; static PopulateParameterRegistry *GetInstance(); - PopulateParameterFunc GetParameterFunc(const schema::PrimitiveType &type); + PopulateParameterFunc GetParameterFunc(int type); protected: PopulateParameterFunc populate_parameter_funcs_[schema::PrimitiveType_MAX + 1]; }; -OpParameter *PopulateParameter(const lite::Primitive *primitive); +OpParameter *PopulateParameter(const mindspore::lite::PrimitiveC *primitive); } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_POPULATE_PARAMETER_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc index 6fbf69ddbd..3e0c460505 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc @@ -108,7 +108,8 @@ void ArgMinMaxBaseCPUKernel::FreeTmpMemory() { kernel::LiteKernel *CpuArgMinMaxInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; return nullptr; @@ -132,7 +133,8 @@ kernel::LiteKernel *CpuArgMinMaxInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h b/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h index 9630fd4c79..8301f8401f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.h @@ -25,7 +25,7 @@ class ArgMinMaxBaseCPUKernel : public LiteKernel { public: ArgMinMaxBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), data_from_allocator_(false) {} virtual ~ArgMinMaxBaseCPUKernel() { FreeTmpMemory(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc index 56ca65067a..bad7c321dd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc @@ -50,7 +50,8 @@ int BatchToSpaceBaseCPUKernel::ReSize() { kernel::LiteKernel *CpuBatchToSpaceInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_BatchToSpace); if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; @@ -75,7 +76,8 @@ kernel::LiteKernel *CpuBatchToSpaceInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_BatchToSpace); if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h b/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h index 7a98ce6ff9..36a3ae5125 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h @@ -26,7 +26,7 @@ class BatchToSpaceBaseCPUKernel : public LiteKernel { public: BatchToSpaceBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} virtual ~BatchToSpaceBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/caffeprelu_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/caffeprelu_base.cc index dd4f9ed296..6ffc7803d8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/caffeprelu_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/caffeprelu_base.cc @@ -31,7 +31,8 @@ int CaffePreluBaseCPUKernel::Init() { return RET_OK; } kernel::LiteKernel *CpuCaffePreluFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/caffeprelu_base.h b/mindspore/lite/src/runtime/kernel/arm/base/caffeprelu_base.h index a575e012e9..c47b10bbd1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/caffeprelu_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/caffeprelu_base.h @@ -29,7 +29,7 @@ class CaffePreluBaseCPUKernel : public LiteKernel { public: CaffePreluBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { prelu_param_ = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc index 6c00261268..4e63b094c3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc @@ -39,7 +39,8 @@ int ConcatBaseCPUKernel::ReSize() { kernel::LiteKernel *CpuConcatInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -63,7 +64,8 @@ kernel::LiteKernel *CpuConcatInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -87,7 +89,8 @@ kernel::LiteKernel *CpuConcatInt32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -108,7 +111,6 @@ kernel::LiteKernel *CpuConcatFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { concat_param_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h index c4adf97885..72969ef360 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h @@ -39,7 +39,7 @@ class ConvolutionBaseCPUKernel : public LiteKernel { public: ConvolutionBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { op_parameter_->thread_num_ = ctx->thread_num_; conv_param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/crop_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/crop_base.cc index 0fc913bcfa..2322083c11 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/crop_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/crop_base.cc @@ -33,7 +33,8 @@ int CropBaseCPUKernel::Init() { return RET_OK; } kernel::LiteKernel *CpuCropInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -57,7 +58,8 @@ kernel::LiteKernel *CpuCropInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -81,7 +83,8 @@ kernel::LiteKernel *CpuCropInt32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/crop_base.h b/mindspore/lite/src/runtime/kernel/arm/base/crop_base.h index 82344dcc43..7168bd78ea 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/crop_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/crop_base.h @@ -28,7 +28,7 @@ class CropBaseCPUKernel : public LiteKernel { public: CropBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~CropBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc index 3289b11a95..4c84f3414e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc @@ -64,7 +64,8 @@ int DepthToSpaceBaseCPUKernel::ReSize() { kernel::LiteKernel *CpuDepthToSpaceInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_DepthToSpace); if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; @@ -89,7 +90,8 @@ kernel::LiteKernel *CpuDepthToSpaceInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_DepthToSpace); if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h index ffc666dda3..86c4b215d4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h @@ -26,7 +26,7 @@ class DepthToSpaceBaseCPUKernel : public LiteKernel { public: DepthToSpaceBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} virtual ~DepthToSpaceBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.cc index 41c13d0e0a..b33a03dceb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.cc @@ -36,7 +36,7 @@ kernel::LiteKernel *CpuFullConnectionInt8KernelCreator(const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Concat); auto kernel = new (std::nothrow) FullconnectionInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive); @@ -58,7 +58,7 @@ kernel::LiteKernel *CpuFullConnectionFp32KernelCreator(const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Concat); auto kernel = new (std::nothrow) FullconnectionCPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h b/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h index e08206c1a9..1107f4c2e9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h @@ -29,7 +29,7 @@ class FullconnectionBaseCPUKernel : public LiteKernel { public: FullconnectionBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { fc_param_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.cc index e7f4b7c0be..4d73c7c658 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.cc @@ -29,7 +29,7 @@ namespace mindspore::kernel { kernel::LiteKernel *CpuMatmulKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Concat); auto input_tensor = inputs.at(kInputIndex); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h b/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h index 5d089a4216..96cc639a13 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h @@ -29,7 +29,7 @@ class MatmulBaseCPUKernel : public LiteKernel { public: MatmulBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { params_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/pad.cc b/mindspore/lite/src/runtime/kernel/arm/base/pad.cc index 4a4933c95e..b1971db1db 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/pad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/pad.cc @@ -31,7 +31,8 @@ namespace mindspore::kernel { kernel::LiteKernel *CpuPadInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Pad); auto *kernel = new (std::nothrow) PadInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive); @@ -52,7 +53,8 @@ kernel::LiteKernel *CpuPadInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Pad); auto *kernel = new (std::nothrow) PadCPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc index c59f5b9f22..c16eea8dc4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc @@ -77,8 +77,8 @@ int PoolingBaseCPUKernel::ReSize() { pooling_param_->output_h_ = out_tensor->Height(); pooling_param_->output_w_ = out_tensor->Width(); if (pooling_param_->global_) { - pooling_param_->window_h_ = pooling_param_->input_h_; - pooling_param_->window_w_ = pooling_param_->input_w_; + pooling_param_->window_h_ = pooling_param_->input_h_; + pooling_param_->window_w_ = pooling_param_->input_w_; } return RET_OK; } @@ -86,7 +86,8 @@ int PoolingBaseCPUKernel::ReSize() { kernel::LiteKernel *CpuPoolingInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -110,7 +111,8 @@ kernel::LiteKernel *CpuPoolingInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h index afcb5bd91a..9c982cda39 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h @@ -30,7 +30,7 @@ class PoolingBaseCPUKernel : public LiteKernel { public: PoolingBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { pooling_param_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/prelu_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/prelu_base.cc index 4b232f6f3b..f9c2b40a5c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/prelu_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/prelu_base.cc @@ -32,7 +32,8 @@ int PreluBaseCPUKernel::Init() { return RET_OK; } kernel::LiteKernel *CpuPreluInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/prelu_base.h b/mindspore/lite/src/runtime/kernel/arm/base/prelu_base.h index 817219707d..6c85e6cf5e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/prelu_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/prelu_base.h @@ -29,7 +29,7 @@ class PreluBaseCPUKernel : public LiteKernel { public: PreluBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx) { prelu_param_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc index bd733a58b5..5329feae5a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc @@ -169,7 +169,8 @@ int PriorBoxCPUKernel::Run() { kernel::LiteKernel *CpuPriorBoxKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h index 4685090c56..54a04cc4ea 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h @@ -29,7 +29,7 @@ class PriorBoxCPUKernel : public LiteKernel { public: PriorBoxCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { prior_box_param_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc index a7d587c542..cb0c97d0f7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc @@ -132,7 +132,7 @@ kernel::LiteKernel *CpuQuantDTypeCastFp32KernelCreator(const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h index e7ca5edfc0..73510c6825 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h @@ -25,7 +25,7 @@ class QuantDTypeCastCPUKernel : public LiteKernel { public: QuantDTypeCastCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) {} ~QuantDTypeCastCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc index 3e330d80c4..8277d3de58 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc @@ -109,7 +109,8 @@ int ReduceBaseCPUKernel::Init() { kernel::LiteKernel *CpuReduceFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Reduce); if (opParameter == nullptr) { @@ -138,7 +139,8 @@ kernel::LiteKernel *CpuReduceFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Mean); if (opParameter == nullptr) { @@ -167,7 +169,8 @@ kernel::LiteKernel *CpuMeanFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Reduce); if (opParameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.h b/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.h index 3410dfff0f..47dc708a1b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.h @@ -27,7 +27,7 @@ class ReduceBaseCPUKernel : public LiteKernel { public: ReduceBaseCPUKernel(OpParameter *param, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(param, inputs, outputs, ctx, primitive) {} virtual ~ReduceBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc index 6553aa1a6c..8232f34149 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc @@ -33,7 +33,8 @@ int ReshapeBaseCPUKernel::Init() { return RET_OK; } kernel::LiteKernel *CpuReshapeInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -57,7 +58,8 @@ kernel::LiteKernel *CpuReshapeInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -81,7 +83,8 @@ kernel::LiteKernel *CpuReshapeInt32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h index db5e536ada..9ee70203da 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h @@ -28,7 +28,7 @@ class ReshapeBaseCPUKernel : public LiteKernel { public: ReshapeBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx) { reshape_param_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc index 8ae036b326..3f8992caa0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc @@ -107,7 +107,8 @@ int ResizeBaseCPUKernel::Init() { kernel::LiteKernel *CpuResizeFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -132,7 +133,8 @@ kernel::LiteKernel *CpuResizeFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h index 85a3537ba4..bf9662cb72 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h @@ -28,7 +28,7 @@ class ResizeBaseCPUKernel : public LiteKernel { public: ResizeBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), context_(ctx) {} virtual ~ResizeBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc index 02c032c6df..cc43cf68ca 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc @@ -64,7 +64,8 @@ int SliceBaseCPUKernel::ReSize() { kernel::LiteKernel *CpuSliceInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -88,7 +89,8 @@ kernel::LiteKernel *CpuSliceInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.h b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.h index a617bc3329..a266946bdb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.h @@ -26,7 +26,7 @@ class SliceBaseCPUKernel : public LiteKernel { public: SliceBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc index a35be0765c..18946e0ee9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc @@ -56,7 +56,8 @@ int SoftmaxBaseCPUKernel::ReSize() { kernel::LiteKernel *CpuSoftmaxInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -80,7 +81,8 @@ kernel::LiteKernel *CpuSoftmaxInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h index a933e7a25a..9213b8e239 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h @@ -26,7 +26,7 @@ class SoftmaxBaseCPUKernel : public LiteKernel { public: SoftmaxBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { softmax_param_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc index b7786daeca..7abcfe010c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc @@ -63,7 +63,8 @@ int SplitBaseCPUKernel::ReSize() { kernel::LiteKernel *CpuSplitInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -87,7 +88,8 @@ kernel::LiteKernel *CpuSplitInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; @@ -111,7 +113,8 @@ kernel::LiteKernel *CpuSplitInt32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/split_base.h b/mindspore/lite/src/runtime/kernel/arm/base/split_base.h index d5ae60ddc2..c2ccd0e730 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/split_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/split_base.h @@ -28,7 +28,7 @@ class SplitBaseCPUKernel : public LiteKernel { public: SplitBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { param = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.cc index 3d285787da..ff1d65ebfc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.cc @@ -32,7 +32,8 @@ int SqueezeBaseCPUKernel::Init() { return RET_OK; } kernel::LiteKernel *CpuSqueezeInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.h b/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.h index bbea214792..0c00ed45b7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.h @@ -29,7 +29,7 @@ class SqueezeBaseCPUKernel : public LiteKernel { public: SqueezeBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} virtual ~SqueezeBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc index 429b5a3f8b..46c2c6524c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc @@ -70,7 +70,8 @@ int StridedSliceCPUKernel::Run() { kernel::LiteKernel *CpuStridedSliceKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_StridedSlice); if (opParameter == nullptr) { MS_LOG(ERROR) << "opParameter null pointer dereferencing."; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.h b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.h index 891fd94b74..9e3d8d693f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.h @@ -26,7 +26,7 @@ class StridedSliceCPUKernel : public LiteKernel { public: StridedSliceCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~StridedSliceCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc index 3263ebab04..e2d7a9d68a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc @@ -310,7 +310,8 @@ int ArithmeticFP16CPUKernel::Run() { kernel::LiteKernel *CpuArithmeticFp16KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr) { MS_LOG(ERROR) << "input parameter is null!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h index 8a58bb0880..c978f24971 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h @@ -31,7 +31,7 @@ class ArithmeticFP16CPUKernel : public LiteKernel { public: ArithmeticFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { arithmeticParameter_ = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc index 04125aa12e..68ad986d6f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc @@ -97,7 +97,8 @@ int CastFp16CPUKernel::Run() { kernel::LiteKernel *CpuCastFp16KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.h index e3e1002890..f8a32d1965 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.h @@ -24,7 +24,7 @@ class CastFp16CPUKernel : public LiteKernel { public: CastFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~CastFp16CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc index d9e61256e5..84571dc24f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc @@ -143,7 +143,8 @@ int ConcatFp16CPUKernel::Run() { kernel::LiteKernel *CpuConcatFp16KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h index 598bf3e9cb..eaf87c7b24 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h @@ -29,7 +29,7 @@ class ConcatFp16CPUKernel : public ConcatBaseCPUKernel { public: ConcatFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConcatBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConcatFp16CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h index 0f55393c5f..080994e210 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h @@ -30,7 +30,7 @@ class Convolution1x1FP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: Convolution1x1FP16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) { matmul_param_ = new MatMulParameter(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.h index 85f541c9f4..fcb89b45dc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_3x3_fp16.h @@ -28,7 +28,7 @@ class Convolution3x3FP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: Convolution3x3FP16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~Convolution3x3FP16CPUKernel() override { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.h index b109431384..f5af08f5d1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.h @@ -28,7 +28,7 @@ class ConvolutionBaseFP16CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionBaseFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionBaseFP16CPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc index 68a6f8ed45..cfe10e7c3c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc @@ -192,7 +192,8 @@ int ConvolutionDepthwiseFp16CPUKernel::Run() { kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D); auto kernel = new (std::nothrow) ConvolutionDepthwiseFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h index 47334a1498..f325a30ece 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h @@ -38,7 +38,7 @@ class ConvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: ConvolutionDepthwiseFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionDepthwiseFp16CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc index 0c64b98adc..97bc671e0c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc @@ -217,7 +217,8 @@ int ConvolutionFP16CPUKernel::Run() { kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Conv2D); auto conv_param = reinterpret_cast(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.h index 05b78d9dc1..7ea1052b17 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.h @@ -27,7 +27,7 @@ class ConvolutionFP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: ConvolutionFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionFP16CPUKernel() override { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.h index 3b4f0be7a9..fa59cf517c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_sw_fp16.h @@ -26,7 +26,7 @@ class ConvolutionSWFP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: ConvolutionSWFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionSWFP16CPUKernel() override { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h index 77a725eefc..13fdf8de8f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h @@ -31,7 +31,7 @@ class ConvolutionWinogradFP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: ConvolutionWinogradFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive, int out_unit) + const mindspore::lite::PrimitiveC *primitive, int out_unit) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive), output_unit_(out_unit) {} ~ConvolutionWinogradFP16CPUKernel() override { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc index 17f68ba03e..a2a394cb49 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc @@ -204,7 +204,8 @@ int DeconvolutionDepthwiseFp16CPUKernel::Run() { kernel::LiteKernel *CpuDeconvDwFp16KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeDepthwiseConv2D); auto kernel = new (std::nothrow) DeconvolutionDepthwiseFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h index ae5c564960..cb7bc4b83d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h @@ -38,7 +38,7 @@ class DeconvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseFP16CPUKernel public: DeconvolutionDepthwiseFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~DeconvolutionDepthwiseFp16CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc index ff1f73466f..c8b94b38fd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc @@ -186,7 +186,8 @@ int DeConvolutionFp16CPUKernel::Run() { kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeConv2D); auto kernel = new (std::nothrow) DeConvolutionFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.h index b2778318ad..502b87a310 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.h @@ -34,7 +34,7 @@ class DeConvolutionFp16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: DeConvolutionFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) { matmul_param_ = new MatMulParameter(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc index deb6aff796..70fccd8cb3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc @@ -124,7 +124,8 @@ int PoolingFp16CPUKernel::Run() { kernel::LiteKernel *CpuPoolingFp16KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.h index 7c78e0fee5..2424955aeb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.h @@ -26,7 +26,7 @@ class PoolingFp16CPUKernel : public PoolingBaseCPUKernel { public: PoolingFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : PoolingBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~PoolingFp16CPUKernel() override { if (fp16_input_ != nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc index ce2b517bca..64351b6d29 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc @@ -178,7 +178,8 @@ int ReduceFp16CPUKernel::MallocTmpBuffer() { kernel::LiteKernel *CpuReduceFp16KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Reduce); if (opParameter == nullptr) { @@ -207,7 +208,8 @@ kernel::LiteKernel *CpuReduceFp16KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Mean); if (opParameter == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h index 92a144cd01..082fd6afff 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h @@ -32,7 +32,7 @@ class ReduceFp16CPUKernel : public ReduceBaseCPUKernel { public: ReduceFp16CPUKernel(OpParameter *param, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ReduceBaseCPUKernel(param, inputs, outputs, ctx, primitive) {} ~ReduceFp16CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc index 056a363e9a..6cc6137d91 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc @@ -118,7 +118,8 @@ int SplitFp16CPUKernel::Run() { kernel::LiteKernel *CpuSplitFp16KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.h index 93e960c9f8..9be678f747 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.h @@ -27,7 +27,7 @@ class SplitFp16CPUKernel : public SplitBaseCPUKernel { public: SplitFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : SplitBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SplitFp16CPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc index e5fc82ccb6..1532df884f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc @@ -183,7 +183,8 @@ int TransposeFp16CPUKernel::Run() { kernel::LiteKernel *CpuTransposeFp16KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Transpose); if (opParameter == nullptr) { MS_LOG(ERROR) << "desc type is not Transpose"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h index 49d3e2ee51..eab00dcc6a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h @@ -28,7 +28,7 @@ class TransposeFp16CPUKernel : public LiteKernel { public: explicit TransposeFp16CPUKernel(OpParameter *param, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(param, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) {} ~TransposeFp16CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc index eca2227763..41b76206a9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc @@ -19,7 +19,6 @@ #include "src/kernel_registry.h" #include "src/runtime/runtime_api.h" #include "include/errorcode.h" -#include "src/ops/ops.h" using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; @@ -95,7 +94,8 @@ int ActivationCPUKernel::Run() { kernel::LiteKernel *CpuActivationFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Activation); auto *kernel = new (std::nothrow) ActivationCPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/activation.h b/mindspore/lite/src/runtime/kernel/arm/fp32/activation.h index 3ecc6e9f62..8f995ce065 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/activation.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/activation.h @@ -26,7 +26,7 @@ class ActivationCPUKernel : public LiteKernel { public: ActivationCPUKernel(OpParameter *param, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(param, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { type_ = (reinterpret_cast(param))->type_; alpha_ = (reinterpret_cast(param))->alpha_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc index ce3b9a34b0..957e2227b9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc @@ -94,7 +94,8 @@ int AddNCPUKernel::Run() { kernel::LiteKernel *CpuAddNFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/addn.h b/mindspore/lite/src/runtime/kernel/arm/fp32/addn.h index 31a2fecfa0..51b25d4934 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/addn.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/addn.h @@ -26,7 +26,7 @@ class AddNCPUKernel : public LiteKernel { public: AddNCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~AddNCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.h b/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.h index 611e28ea6c..be2e4a16a1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/argminmax.h @@ -24,7 +24,7 @@ class ArgMinMaxCPUKernel : public ArgMinMaxBaseCPUKernel { public: ArgMinMaxCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ArgMinMaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ArgMinMaxCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc index a5d0c10440..4ea008514b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc @@ -157,7 +157,8 @@ int ArithmeticCPUKernel::Run() { kernel::LiteKernel *CpuArithmeticFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(parameter != nullptr); auto kernel = new (std::nothrow) ArithmeticCPUKernel(parameter, inputs, outputs, ctx, primitive); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h index 2ee8fc1744..7bea43390b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h @@ -51,7 +51,7 @@ class ArithmeticCPUKernel : public LiteKernel { public: ArithmeticCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { arithmeticParameter_ = reinterpret_cast(parameter); switch (parameter->type_) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc index bee19095af..d2fb614130 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc @@ -92,7 +92,7 @@ kernel::LiteKernel *CpuArithmeticSelfFp32KernelCreator(const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); if (opParameter == nullptr) { MS_LOG(ERROR) << "Creator failed, opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h index 8e8b3ec81a..f9dafc1bf0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h @@ -45,7 +45,7 @@ class ArithmeticSelfCPUKernel : public LiteKernel { public: explicit ArithmeticSelfCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { switch (parameter->type_) { case PrimitiveType_Abs: diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.h b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.h index a8060726a6..938224ff5a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space.h @@ -23,7 +23,7 @@ class BatchToSpaceCPUKernel : public BatchToSpaceBaseCPUKernel { public: BatchToSpaceCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : BatchToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~BatchToSpaceCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc index 766a98de29..bdc7569f24 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc @@ -125,7 +125,8 @@ int BatchnormCPUKernel::Run() { kernel::LiteKernel *CpuBatchnormKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_BatchNorm); auto *kernel = new (std::nothrow) BatchnormCPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.h b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.h index 3cc451ba32..b20b9ed697 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.h @@ -29,7 +29,7 @@ class BatchnormCPUKernel : public LiteKernel { public: BatchnormCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { batchnorm_param_ = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc index 34e456edc1..3a80e27d7f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc @@ -70,7 +70,7 @@ int BiasCPUKernel::Init() { kernel::LiteKernel *CpuBiasFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(parameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_BiasAdd); auto kernel = new (std::nothrow) BiasCPUKernel(parameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/bias.h b/mindspore/lite/src/runtime/kernel/arm/fp32/bias.h index 0282c668cb..97c39e684b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/bias.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/bias.h @@ -25,7 +25,7 @@ class BiasCPUKernel : public LiteKernel { public: BiasCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { bias_param_ = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc index d9a4af1479..afcac73026 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc @@ -63,7 +63,8 @@ int BroadcastToCPUKernel::Run() { kernel::LiteKernel *CpuBroadcastToFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.h b/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.h index 9b813582ac..4f948d9d35 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.h @@ -26,7 +26,7 @@ class BroadcastToCPUKernel : public LiteKernel { public: BroadcastToCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~BroadcastToCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/caffeprelu.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/caffeprelu.cc index 15be696346..81639ab668 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/caffeprelu.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/caffeprelu.cc @@ -72,7 +72,8 @@ int CaffePReluCPUKernel::Run() { kernel::LiteKernel *CpuCaffePReluFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/caffeprelu.h b/mindspore/lite/src/runtime/kernel/arm/fp32/caffeprelu.h index b4c169d508..bbfa85b5fe 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/caffeprelu.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/caffeprelu.h @@ -30,7 +30,7 @@ class CaffePReluCPUKernel : public LiteKernel { public: CaffePReluCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { prelu_param_ = reinterpret_cast(op_parameter_); primitive_ = primitive; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc index bb8f45591f..14d870530d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc @@ -75,7 +75,7 @@ int CastCPUKernel::DoCast(int thread_id) { reinterpret_cast(output_data) + offset, data_num); } else if (input_data_type == kNumberTypeFloat32 && output_data_type == kNumberTypeFloat16) { Float32ToFp16(reinterpret_cast(input->Data()) + offset, - reinterpret_cast(output_data) + offset, data_num); + reinterpret_cast(output_data) + offset, data_num); } else { MS_LOG(ERROR) << "Unsupported datatype from " << input_data_type << " to " << output_data_type; return RET_ERROR; @@ -117,7 +117,8 @@ int CastCPUKernel::Run() { kernel::LiteKernel *CpuCastFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/cast.h b/mindspore/lite/src/runtime/kernel/arm/fp32/cast.h index 200dd40c26..ef5347420e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/cast.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/cast.h @@ -24,7 +24,7 @@ class CastCPUKernel : public LiteKernel { public: CastCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~CastCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/concat.h b/mindspore/lite/src/runtime/kernel/arm/fp32/concat.h index cafd6c84f7..b1ddaa333d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/concat.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/concat.h @@ -29,7 +29,7 @@ class ConcatCPUKernel : public ConcatBaseCPUKernel { public: ConcatCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConcatBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConcatCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc index 0995320ee5..5e353cb7a8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc @@ -80,7 +80,7 @@ kernel::LiteKernel *CpuConstantOfShapeFp32KernelCreator(const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); if (opParameter == nullptr) { MS_LOG(ERROR) << "Create kernel failed, opParameter is nullptr, type: PrimitiveType_ConstantOfShape. "; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.h b/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.h index d9303937c5..88194a10b2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.h @@ -28,7 +28,7 @@ class ConstantOfShapeCPUKernel : public LiteKernel { public: ConstantOfShapeCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param_ = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc index a117139f19..3990963957 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc @@ -224,7 +224,8 @@ bool CheckIfUseSlideWindow(ConvParameter *conv_param) { kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *op_parameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(op_parameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Conv2D); auto conv_param = reinterpret_cast(op_parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.h index 3500f73176..d59c90dee0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.h @@ -28,7 +28,7 @@ class ConvolutionCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionCPUKernel() override { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.h index 46ed92a2df..4ec8d90ad5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1.h @@ -35,7 +35,7 @@ class Convolution1x1CPUKernel : public ConvolutionBaseCPUKernel { public: Convolution1x1CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { matmul_param_ = new MatMulParameter(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc index 644fcdafc7..386774d009 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc @@ -159,7 +159,7 @@ void Convolution3x3CPUKernel::ConfigInputOutput() { } int Convolution3x3CPUKernel::Init() { - if (!InferShapeDone()) { + if (!InferShapeDone()) { return RET_OK; } return ReSize(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.h index 88f655c707..39937118c8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.h @@ -27,7 +27,7 @@ class Convolution3x3CPUKernel : public ConvolutionBaseCPUKernel { public: Convolution3x3CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~Convolution3x3CPUKernel() override { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc index f6a8db4008..c0726fc2a3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc @@ -198,7 +198,8 @@ int ConvolutionDepthwiseCPUKernel::Run() { kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D); kernel::LiteKernel *kernel; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.h index e2863f1319..8fefa0dd08 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.h @@ -27,7 +27,7 @@ class ConvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionDepthwiseCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.cc index 52e647a742..5d8bcd73b4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.cc @@ -144,7 +144,7 @@ int ConvolutionDepthwise3x3CPUKernel::ReSize() { MS_LOG(ERROR) << "Depthwise3x3 fp32 initWeightBias error!ret: " << ret; return ret; } - // init threadNum; + // init threadNum; conv_param_->thread_num_ = MSMIN(thread_count_, UP_DIV(conv_param_->output_channel_, C4NUM)); ret = InitBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.h index d952275736..0e04d764ec 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.h @@ -27,7 +27,7 @@ class ConvolutionDepthwise3x3CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwise3x3CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionDepthwise3x3CPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.h index 6ed6170a95..82877c75de 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow.h @@ -29,7 +29,7 @@ class ConvolutionSWCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionSWCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionSWCPUKernel() override { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc index 1b548ab2c7..b910c6027d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc @@ -98,13 +98,13 @@ int ConvolutionWinogradCPUKernel::InitWeightBias() { int output_channel = conv_param_->output_channel_; int oc4 = UP_DIV(output_channel, C4NUM); int oc_block, oc_block_num; -// #ifdef ENABLE_ARM32 -// oc_block = C4NUM; -// oc_block_num = UP_DIV(output_channel, C4NUM); -// #else + // #ifdef ENABLE_ARM32 + // oc_block = C4NUM; + // oc_block_num = UP_DIV(output_channel, C4NUM); + // #else oc_block = C8NUM; oc_block_num = UP_DIV(output_channel, C8NUM); -// #endif + // #endif // init weight auto ret = MallocFilterMatrix(oc_block, oc_block_num); @@ -241,11 +241,11 @@ int ConvolutionWinogradCPUKernel::ConfigInputOutput() { MS_LOG(ERROR) << "Get output_trans_func_ failed."; return RET_ERROR; } -// #ifdef ENABLE_ARM32 -// gemm_func_ = IndirectGemmFp32_8x4; -// #else + // #ifdef ENABLE_ARM32 + // gemm_func_ = IndirectGemmFp32_8x4; + // #else gemm_func_ = IndirectGemmFp32_8x8; -// #endif + // #endif return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.h index 3cfd8f86e8..bd5373feaa 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.h @@ -29,7 +29,7 @@ class ConvolutionWinogradCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionWinogradCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive, int output_unit) + const mindspore::lite::PrimitiveC *primitive, int output_unit) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive), output_unit_(output_unit), trans_weight_(nullptr) {} ~ConvolutionWinogradCPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/crop.h b/mindspore/lite/src/runtime/kernel/arm/fp32/crop.h index 8165c84551..e15b20aaea 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/crop.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/crop.h @@ -25,7 +25,7 @@ class CropCPUKernel : public CropBaseCPUKernel { public: CropCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~CropCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc index 4f96c216ba..13819ba470 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc @@ -185,7 +185,8 @@ int DeConvolutionCPUKernel::Run() { kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeConv2D); auto kernel = new (std::nothrow) kernel::DeConvolutionCPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h index 559ccb63b9..6b441d9cb6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h @@ -32,7 +32,7 @@ class DeConvolutionCPUKernel : public ConvolutionBaseCPUKernel { public: DeConvolutionCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { matmul_param_ = new MatMulParameter(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc index 20c0b6f41b..3b83cd1d48 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc @@ -211,7 +211,8 @@ int DeconvolutionDepthwiseCPUKernel::Run() { kernel::LiteKernel *CpuDeconvDwFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeDepthwiseConv2D); auto kernel = diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.h b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.h index 9c656be679..54fd68e9e4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.h @@ -27,7 +27,7 @@ class DeconvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel { public: DeconvolutionDepthwiseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~DeconvolutionDepthwiseCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.h b/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.h index 706f094c37..0cd952ae5c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space.h @@ -24,7 +24,7 @@ class DepthToSpaceCPUKernel : public DepthToSpaceBaseCPUKernel { public: DepthToSpaceCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : DepthToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~DepthToSpaceCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc index d8c90fefdd..c0e75f8a17 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc @@ -73,7 +73,7 @@ int EluCPUKernel::Run() { kernel::LiteKernel *CpuEluFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { MS_LOG(ERROR) << "parameter or ctx is nullptr"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/elu.h b/mindspore/lite/src/runtime/kernel/arm/fp32/elu.h index f703f2b503..6cb7bc877e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/elu.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/elu.h @@ -26,7 +26,7 @@ class EluCPUKernel : public LiteKernel { public: explicit EluCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} ~EluCPUKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc index c4688822a1..8c9e20850a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc @@ -131,7 +131,8 @@ int EmbeddingLookupCPUKernel::Run() { kernel::LiteKernel *CpuEmbeddingLookupFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, - const KernelKey &desc, const lite::Primitive *primitive) { + const KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { MS_LOG(ERROR) << "parameter or ctx is nullptr"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.h b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.h index 5a80550e82..70328a0ac4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.h @@ -26,7 +26,7 @@ class EmbeddingLookupCPUKernel : public LiteKernel { public: explicit EmbeddingLookupCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} ~EmbeddingLookupCPUKernel() override { if (input_addr_ != nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc index 7239929a82..c4c7da7d65 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc @@ -85,7 +85,8 @@ int ExpandDimsCPUKernel::Run() { kernel::LiteKernel *CpuExpandsDimsFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_ExpandDims); auto *kernel = new (std::nothrow) ExpandDimsCPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.h b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.h index 6d47f2ac52..308a90782a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.h @@ -31,7 +31,7 @@ class ExpandDimsCPUKernel : public LiteKernel { public: ExpandDimsCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} ~ExpandDimsCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc index e620b8b2c7..359ecf7d2a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc @@ -94,7 +94,8 @@ int FillCPUKernel::Run() { kernel::LiteKernel *CpuFillFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); if (opParameter == nullptr) { MS_LOG(ERROR) << "Create kernel failed, opParameter is nullptr, type: PrimitiveType_Fill. "; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.h b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.h index 87e57bbb4e..3fd9a6e9fe 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.h @@ -29,7 +29,7 @@ class FillCPUKernel : public LiteKernel { public: FillCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} ~FillCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc index c88c187d97..c60ee8837c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc @@ -58,7 +58,8 @@ int FlattenCPUKernel::Run() { kernel::LiteKernel *CpuFlattenFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); if (opParameter == nullptr) { MS_LOG(ERROR) << "Create kernel failed, opParameter is nullptr, type: PrimitiveType_Flatten. "; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.h b/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.h index beb74b59e4..c0834ded20 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/flatten.h @@ -29,7 +29,7 @@ class FlattenCPUKernel : public LiteKernel { public: FlattenCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { flatten_param_ = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.h b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.h index f10f163f95..b8b0d5defe 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.h @@ -30,7 +30,7 @@ class FullconnectionCPUKernel : public FullconnectionBaseCPUKernel { public: FullconnectionCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : FullconnectionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~FullconnectionCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc index 9b5e28d870..e5c2f9801e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc @@ -148,7 +148,8 @@ int FusedBatchnormCPUKernel::Run() { kernel::LiteKernel *CpuFusedBatchnormKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_FusedBatchNorm); FusedBatchnormCPUKernel *kernel = diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.h b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.h index e1c67e545f..1b7634a3cb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.h @@ -26,7 +26,7 @@ class FusedBatchnormCPUKernel : public LiteKernel { public: FusedBatchnormCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { batchnorm_param_ = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc index 9a7e1a9a0d..7332b68c8e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc @@ -121,21 +121,22 @@ int GatherCPUKernel::Run() { kernel::LiteKernel *CpuGatherFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, - OpParameter *parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + OpParameter *opParameter, const lite::Context *ctx, + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Gather); - if (parameter == nullptr) { + if (opParameter == nullptr) { MS_LOG(ERROR) << "input parameter is nullptr!"; return nullptr; } - auto *kernel = new (std::nothrow) GatherCPUKernel(parameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) GatherCPUKernel(opParameter, inputs, outputs, ctx, primitive); if (kernel == nullptr) { return nullptr; } auto ret = kernel->Init(); if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast(parameter->type_)); + MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " + << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); delete kernel; return nullptr; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gather.h b/mindspore/lite/src/runtime/kernel/arm/fp32/gather.h index cddfb6f32e..5b954e0b64 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gather.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gather.h @@ -26,7 +26,7 @@ class GatherCPUKernel : public LiteKernel { public: GatherCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~GatherCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc index 1f882ea6d6..518d74589e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc @@ -134,7 +134,8 @@ int GatherNdCPUKernel::Run() { kernel::LiteKernel *CpuGatherNdFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_GatherNd); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.h b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.h index 8a043d4228..edaa5ba6cc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.h @@ -31,7 +31,7 @@ class GatherNdCPUKernel : public LiteKernel { public: GatherNdCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} ~GatherNdCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc index 1695e3ae03..85cc36f414 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc @@ -91,7 +91,7 @@ kernel::LiteKernel *CpuLocalResponseNormFp32KernelCreator(const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_LocalResponseNormalization); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.h b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.h index 90cdc8a66e..6a6bfaa75b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.h @@ -26,7 +26,7 @@ class LocalResponseNormCPUKernel : public LiteKernel { public: LocalResponseNormCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~LocalResponseNormCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc index b4ec2c7978..dc39cb5936 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc @@ -159,7 +159,7 @@ int LstmCPUKernel::Run() { kernel::LiteKernel *CpuLstmKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr) { MS_LOG(ERROR) << "Input parameter is nullptr!"; return nullptr; @@ -174,8 +174,8 @@ kernel::LiteKernel *CpuLstmKernelCreator(const std::vectorInit(); if (ret != RET_OK) { delete kernel; - MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast(parameter->type_)); + MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_ + << ", type: " << schema::EnumNamePrimitiveType(static_cast(parameter->type_)); return nullptr; } return kernel; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.h b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.h index 957dc216c2..5bdba39fdb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm.h @@ -26,7 +26,7 @@ class LstmCPUKernel : public LiteKernel { public: LstmCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { lstm_parm_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.h b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.h index 654950c9ff..4642c8ad6c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul.h @@ -27,7 +27,7 @@ class MatmulCPUKernel : public MatmulBaseCPUKernel { public: explicit MatmulCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : MatmulBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~MatmulCPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc index feb31753b3..f19c547893 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc @@ -53,7 +53,8 @@ int Nchw2NhwcCPUKernel::Run() { kernel::LiteKernel *CpuNchw2NhwcFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Nchw2Nhwc); auto *kernel = new (std::nothrow) Nchw2NhwcCPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.h b/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.h index 99d3739c5d..d3070ea4fa 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.h @@ -30,7 +30,7 @@ class Nchw2NhwcCPUKernel : public LiteKernel { public: Nchw2NhwcCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~Nchw2NhwcCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc index 3f46ea9560..27b50108cb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc @@ -53,7 +53,8 @@ int Nhwc2NchwCPUKernel::Run() { kernel::LiteKernel *CpuNhwc2NchwFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Nhwc2Nchw); auto *kernel = new (std::nothrow) Nhwc2NchwCPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.h b/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.h index 209fae64ba..52558bbaf3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.h @@ -30,7 +30,7 @@ class Nhwc2NchwCPUKernel : public LiteKernel { public: Nhwc2NchwCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~Nhwc2NchwCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc index 2c3325ad56..488057751c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc @@ -177,7 +177,8 @@ int OneHotCPUKernel::Run() { kernel::LiteKernel *CpuOneHotFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter != nullptr) { MS_LOG(ERROR) << "OneHot opParameter nullptr."; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.h b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.h index 52bfdcff2f..930c300928 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.h @@ -25,7 +25,7 @@ class OneHotCPUKernel : public LiteKernel { public: OneHotCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~OneHotCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pad.h b/mindspore/lite/src/runtime/kernel/arm/fp32/pad.h index f05a9fee54..ca0d2c2cd2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pad.h @@ -27,7 +27,7 @@ class PadCPUKernel : public LiteKernel { public: PadCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), context_(ctx) { pad_param_ = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.h b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.h index 2448657d6c..13939086b6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling.h @@ -27,7 +27,7 @@ class PoolingCPUKernel : public PoolingBaseCPUKernel { public: PoolingCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : PoolingBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~PoolingCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/power.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/power.cc index b708eb0901..469bdcc7f0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/power.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/power.cc @@ -79,7 +79,8 @@ int PowerCPUKernel::RunImpl(int task_id) { kernel::LiteKernel *CpuPowerFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Power); PowerCPUKernel *kernel = new (std::nothrow) PowerCPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/power.h b/mindspore/lite/src/runtime/kernel/arm/fp32/power.h index dda3561baf..3fa23b61b9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/power.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/power.h @@ -27,7 +27,7 @@ class PowerCPUKernel : public LiteKernel { public: PowerCPUKernel(OpParameter *param, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(param, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_), diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc index 7682d903cd..1cf7682c09 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc @@ -67,7 +67,8 @@ int PReluCPUKernel::Run() { kernel::LiteKernel *CpuPReluFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.h b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.h index cf0a0c5180..791f82a808 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu.h @@ -30,7 +30,7 @@ class PReluCPUKernel : public LiteKernel { public: PReluCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { prelu_param_ = (reinterpret_cast(op_parameter_)); primitive_ = primitive; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc index b4972e219a..2b2c0e8fea 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc @@ -54,7 +54,8 @@ int RangeCPUKernel::Run() { kernel::LiteKernel *CpuRangeFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Range); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/range.h b/mindspore/lite/src/runtime/kernel/arm/fp32/range.h index acaa63f47d..95bd779133 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/range.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/range.h @@ -26,7 +26,7 @@ class RangeCPUKernel : public LiteKernel { public: explicit RangeCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~RangeCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc index 16212071aa..ae350e150c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc @@ -53,7 +53,8 @@ int RankCPUKernel::Run() { kernel::LiteKernel *CpuRankFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Rank); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/rank.h b/mindspore/lite/src/runtime/kernel/arm/fp32/rank.h index 9e23d2ddcb..4db3a00f1a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/rank.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/rank.h @@ -26,7 +26,7 @@ class RankCPUKernel : public LiteKernel { public: explicit RankCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~RankCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.h index c3f5bd8c8f..1f9326481e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce.h @@ -33,7 +33,7 @@ class ReduceCPUKernel : public ReduceBaseCPUKernel { public: ReduceCPUKernel(OpParameter *param, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ReduceBaseCPUKernel(param, inputs, outputs, ctx, primitive) {} ~ReduceCPUKernel() { for (auto i = 0; i < data_buffers_.size(); i++) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.h index 65b9de20dc..cc00a80b28 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reshape.h @@ -30,7 +30,7 @@ class ReshapeCPUKernel : public ReshapeBaseCPUKernel { public: ReshapeCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ReshapeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ReshapeCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/resize.h b/mindspore/lite/src/runtime/kernel/arm/fp32/resize.h index 95ca4e3d0b..01e651ceee 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/resize.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/resize.h @@ -28,7 +28,7 @@ class ResizeCPUKernel : public ResizeBaseCPUKernel { public: ResizeCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ResizeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ResizeCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc index f5307743c3..430222f5a1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc @@ -143,7 +143,8 @@ int ReverseCPUKernel::Run() { kernel::LiteKernel *CpuReverseFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "opParameter is NULL! "; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h index 9b6168f6ed..70611f46b9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h @@ -30,7 +30,7 @@ class ReverseCPUKernel : public LiteKernel { public: ReverseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} ~ReverseCPUKernel() { if (tmp_ != nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc index 56f8e185dc..e1aa4a3afb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc @@ -103,7 +103,8 @@ int ReverseSequenceCPUKernel::Run() { kernel::LiteKernel *CpuReverseSequenceFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, - const KernelKey &desc, const lite::Primitive *primitive) { + const KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(parameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_ReverseSequence); auto *kernel = new (std::nothrow) ReverseSequenceCPUKernel(parameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.h index 4d2d57dbd2..8a924232b4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.h @@ -25,7 +25,7 @@ class ReverseSequenceCPUKernel : public LiteKernel { public: ReverseSequenceCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~ReverseSequenceCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc index fd8cda5b8b..abc0fcff5d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc @@ -102,7 +102,8 @@ int ROIPoolingCPUKernel::Run() { kernel::LiteKernel *CpuROIPoolingFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.h b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.h index 3bcfe5a243..95ab84aea2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.h @@ -25,7 +25,7 @@ class ROIPoolingCPUKernel : public LiteKernel { public: ROIPoolingCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param_ = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc index b3d553b3f9..a4889528fb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc @@ -182,7 +182,8 @@ int ScaleCPUKernel::Run() { kernel::LiteKernel *CpuScaleFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Scale); if (opParameter == nullptr) { MS_LOG(ERROR) << "opParameter is nullptr"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scale.h b/mindspore/lite/src/runtime/kernel/arm/fp32/scale.h index 2cfded9e08..b07fb52139 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scale.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scale.h @@ -27,7 +27,7 @@ class ScaleCPUKernel : public LiteKernel { public: ScaleCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { scale_param_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc index 0cec34b191..3a7fadfb0a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc @@ -168,7 +168,8 @@ int ScatterNDCPUKernel::Run() { kernel::LiteKernel *CpuScatterNDFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_ScatterND); if (opParameter == nullptr) { MS_LOG(ERROR) << "desc type is not scatterND"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.h b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.h index 937031b125..c1a3ed52f1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.h @@ -27,7 +27,7 @@ class ScatterNDCPUKernel : public LiteKernel { public: explicit ScatterNDCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~ScatterNDCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc index 93c02053bd..4e9e37c320 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc @@ -61,7 +61,8 @@ int ShapeCPUKernel::Run() { kernel::LiteKernel *CpuShapeFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Shape); if (opParameter == nullptr) { MS_LOG(ERROR) << "desc type is not Shape"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.h b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.h index 86edc153f8..0f1fae6ce9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.h @@ -27,7 +27,7 @@ class ShapeCPUKernel : public LiteKernel { public: ShapeCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~ShapeCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/slice.h b/mindspore/lite/src/runtime/kernel/arm/fp32/slice.h index 7fb862f6e1..22f99dde09 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/slice.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/slice.h @@ -25,7 +25,7 @@ class SliceCPUKernel : public SliceBaseCPUKernel { public: SliceCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : SliceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SliceCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.h b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.h index 4d69a660a9..91b4c9d346 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.h @@ -26,7 +26,7 @@ class SoftmaxCPUKernel : public SoftmaxBaseCPUKernel { public: SoftmaxCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : SoftmaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive), sum_data_(nullptr) {} ~SoftmaxCPUKernel() override { if (sum_data_ != nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc index aaff20c6f7..3cfdad473b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc @@ -137,7 +137,8 @@ int SpaceToBatchCPUKernel::Run() { kernel::LiteKernel *CpuSpaceToBatchFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.h b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.h index 3aa1b7ead7..f93de6cc84 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.h @@ -24,7 +24,7 @@ class SpaceToBatchCPUKernel : public LiteKernel { public: SpaceToBatchCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) {} ~SpaceToBatchCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc index 710a8fb8b2..7b3a3de852 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc @@ -109,7 +109,8 @@ int SpaceToDepthCPUKernel::Run() { kernel::LiteKernel *CpuSpaceToDepthFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h index c1b57bcb7a..3db95679a8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h @@ -25,7 +25,7 @@ class SpaceToDepthCPUKernel : public LiteKernel { public: SpaceToDepthCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~SpaceToDepthCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc index 87229b194e..4591e7e823 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc @@ -81,7 +81,8 @@ int SparseToDenseCPUKernel::Run() { kernel::LiteKernel *CpuSparseToDenseFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.h b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.h index 3bdb5379a2..86f8b69849 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.h @@ -30,7 +30,7 @@ class SparseToDenseCPUKernel : public LiteKernel { public: SparseToDenseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { s2d_param_ = (reinterpret_cast(op_parameter_)); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/split.h b/mindspore/lite/src/runtime/kernel/arm/fp32/split.h index 0796d7b135..90be466618 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/split.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/split.h @@ -26,7 +26,7 @@ class SplitCPUKernel : public SplitBaseCPUKernel { public: SplitCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : SplitBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SplitCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc index 486953c73f..a8baa45911 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc @@ -64,7 +64,8 @@ int SqueezeCPUKernel::Run() { kernel::LiteKernel *CpuSqueezeFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Squeeze); if (parameter == nullptr) { MS_LOG(ERROR) << "desc type is not Squeeze"; @@ -78,8 +79,8 @@ kernel::LiteKernel *CpuSqueezeFp32KernelCreator(const std::vectorInit(); if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast(parameter->type_)); + MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_ + << ", type: " << schema::EnumNamePrimitiveType(static_cast(parameter->type_)); delete kernel; return nullptr; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.h b/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.h index e9dc393f75..ce6ffaa74f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.h @@ -26,7 +26,7 @@ class SqueezeCPUKernel : public LiteKernel { public: explicit SqueezeCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~SqueezeCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc index 3334983361..7beb89e83b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc @@ -61,7 +61,8 @@ int StackCPUKernel::Run() { kernel::LiteKernel *CpuStackFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *op_parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (op_parameter == nullptr) { MS_LOG(ERROR) << "Input op_parameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/stack.h b/mindspore/lite/src/runtime/kernel/arm/fp32/stack.h index 735f50b73e..77a4f64f8a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/stack.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/stack.h @@ -24,7 +24,7 @@ class StackCPUKernel : public LiteKernel { public: StackCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~StackCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc index 398a747227..eb607b4d45 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc @@ -65,7 +65,7 @@ int TileCPUKernel::Run() { kernel::LiteKernel *CpuTileFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { MS_LOG(ERROR) << "parameter or ctx is nullptr"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.h b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.h index 879e77d604..51d741f41f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.h @@ -25,7 +25,7 @@ class TileCPUKernel : public LiteKernel { public: explicit TileCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~TileCPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc index 5f9ec21c3f..6c6be20aa1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc @@ -71,7 +71,7 @@ int TopKCPUKernel::Run() { kernel::LiteKernel *CpuTopKFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr) { MS_LOG(ERROR) << "input parameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/topk.h b/mindspore/lite/src/runtime/kernel/arm/fp32/topk.h index 9dd54a95af..012fae92ea 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/topk.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/topk.h @@ -25,7 +25,7 @@ class TopKCPUKernel : public LiteKernel { public: explicit TopKCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~TopKCPUKernel() override { TopkParameter *parameter = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc index 5d9452c68b..5ceb5f2cc4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc @@ -116,7 +116,8 @@ int TransposeCPUKernel::Run() { kernel::LiteKernel *CpuTransposeFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_Transpose); if (opParameter == nullptr) { MS_LOG(ERROR) << "desc type is not Transpose"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.h b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.h index 75460a09de..0fcd67f789 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.h @@ -28,8 +28,8 @@ class TransposeCPUKernel : public LiteKernel { public: explicit TransposeCPUKernel(OpParameter *param, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) - : LiteKernel(param, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) {} + const mindspore::lite::PrimitiveC *primitive) + : LiteKernel(param, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) {} ~TransposeCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc index c72ecd7b12..b947b522cf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc @@ -49,7 +49,7 @@ int UniqueCPUKernel::Run() { kernel::LiteKernel *CpuUniqueFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(parameter); MS_ASSERT(desc.type == PrimitiveType_Unique); auto *kernel = new (std::nothrow) UniqueCPUKernel(parameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unique.h b/mindspore/lite/src/runtime/kernel/arm/fp32/unique.h index 72f0ceb458..ab6558674e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unique.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unique.h @@ -25,7 +25,7 @@ class UniqueCPUKernel : public LiteKernel { public: UniqueCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~UniqueCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc index bfe611677a..57d545e24c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc @@ -84,7 +84,8 @@ int UnsqueezeCPUKernel::Run() { kernel::LiteKernel *CpuUnsqueezeFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(parameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Unsqueeze); auto *kernel = new (std::nothrow) UnsqueezeCPUKernel(parameter, inputs, outputs, ctx, primitive); @@ -94,8 +95,8 @@ kernel::LiteKernel *CpuUnsqueezeFp32KernelCreator(const std::vectorInit(); if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast(parameter->type_)); + MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_ + << ", type: " << schema::EnumNamePrimitiveType(static_cast(parameter->type_)); delete kernel; return nullptr; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.h b/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.h index c27b554959..b488ec2042 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.h @@ -28,7 +28,7 @@ class UnsqueezeCPUKernel : public LiteKernel { public: UnsqueezeCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~UnsqueezeCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.cc index b57b123e61..0dde6b7c5c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.cc @@ -81,7 +81,7 @@ int UnstackCPUKernel::Run() { kernel::LiteKernel *CpuUnstackFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(parameter != nullptr); MS_ASSERT(desc.type == PrimitiveType_Unstack); auto *kernel = new (std::nothrow) UnstackCPUKernel(parameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.h b/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.h index 6b3bfe0028..2746c5a349 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unstack.h @@ -25,7 +25,7 @@ class UnstackCPUKernel : public LiteKernel { public: UnstackCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~UnstackCPUKernel() { free(output_addr_array_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/where.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/where.cc index d307db840e..a6f20753dd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/where.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/where.cc @@ -90,7 +90,8 @@ int WhereCPUKernel::Run() { kernel::LiteKernel *CpuWhereFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/where.h b/mindspore/lite/src/runtime/kernel/arm/fp32/where.h index 61d03bd000..7b2306d1dc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/where.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/where.h @@ -30,7 +30,7 @@ class WhereCPUKernel : public LiteKernel { public: WhereCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { where_param_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc index f0a65e07cc..e1536f4525 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc @@ -48,7 +48,8 @@ int ZerosLikeCPUKernel::Run() { kernel::LiteKernel *CpuZerosLikeFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (opParameter == nullptr) { MS_LOG(ERROR) << "input opParameter is nullptr!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.h b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.h index 46f56527fc..92ac4b8202 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.h @@ -24,7 +24,7 @@ class ZerosLikeCPUKernel : public LiteKernel { public: ZerosLikeCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~ZerosLikeCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc index 7ed31910cd..d877009b31 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc @@ -93,7 +93,7 @@ kernel::LiteKernel *CpuActivationGradFp32KernelCreator(const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_ActivationGrad); auto *kernel = new (std::nothrow) ActivationGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h index 56ddf0f5fc..cc0d5126bb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h @@ -28,7 +28,7 @@ class ActivationGradCPUKernel : public LiteKernel { public: explicit ActivationGradCPUKernel(OpParameter *param, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(param, inputs, outputs, ctx, primitive) { ActivationGradParameter *param_act_grad = reinterpret_cast(param); type_ = param_act_grad->type_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc index 0639433ef4..aaeca7c083 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc @@ -262,7 +262,7 @@ kernel::LiteKernel *CpuArithmeticGradFp32KernelCreator(const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_EXCEPTION_IF_NULL(opParameter); if (opParameter == nullptr) { return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h index 0e919c7b7f..90ac24e197 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h @@ -38,7 +38,7 @@ class ArithmeticGradCPUKernel : public LiteKernel { public: explicit ArithmeticGradCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), tile_data0(NULL), tile_data1(NULL), tile_data2(NULL) { switch (type()) { case PrimitiveType_MulGrad: diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc index e5e53b0974..d3a9649814 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc @@ -97,7 +97,8 @@ int BiasGradCPUKernel::Run() { kernel::LiteKernel *CpuBiasGradFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_BiasGrad); auto *kernel = diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h index ed652ab617..e2260e31f3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h @@ -28,7 +28,7 @@ class BiasGradCPUKernel : public LiteKernel { public: explicit BiasGradCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { bias_param = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc index cc073ef539..e21ea12322 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc @@ -92,7 +92,8 @@ int BNGradInputCPUKernel::Run() { kernel::LiteKernel *CpuBNGradInputFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_BNGradInput); // parameter->name = opDef.name()->str().data(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h index 6476ceddbb..4a2409f067 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h @@ -26,7 +26,7 @@ class BNGradInputCPUKernel : public LiteKernel { public: explicit BNGradInputCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~BNGradInputCPUKernel() override { delete workspace; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc index 844062e324..ee3f59972b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc @@ -136,7 +136,7 @@ kernel::LiteKernel *CpuConvGradFilterFp32KernelCreator(const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DGradFilter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h index 7a9354be7e..75a345b6fb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h @@ -26,7 +26,7 @@ class ConvolutionGradFilterCPUKernel : public LiteKernel { public: explicit ConvolutionGradFilterCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionGradFilterCPUKernel() override { delete workspace; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc index 8563565bf6..0c10a11b9f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc @@ -115,7 +115,8 @@ int ConvolutionGradInputCPUKernel::Run() { kernel::LiteKernel *CpuConvGradInputFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DGradInput); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h index 9653fe06ad..6bda66b3dd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h @@ -26,7 +26,7 @@ class ConvolutionGradInputCPUKernel : public LiteKernel { public: explicit ConvolutionGradInputCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionGradInputCPUKernel() override { delete workspace; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/opt_momentum.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/opt_momentum.cc index 98f2e4143c..5494dade61 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/opt_momentum.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/opt_momentum.cc @@ -64,7 +64,8 @@ int OptMomentumCPUKernel::Init() { return 0; } kernel::LiteKernel *CpuOptMomentumFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(desc.type == schema::PrimitiveType_OptMomentum); auto *kernel = new (std::nothrow) OptMomentumCPUKernel(opParameter, inputs, outputs, ctx, primitive); MS_ASSERT(kernel != nullptr); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/opt_momentum.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/opt_momentum.h index f9e0395ea3..8603363003 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/opt_momentum.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/opt_momentum.h @@ -26,7 +26,7 @@ class OptMomentumCPUKernel : public LiteKernel { public: explicit OptMomentumCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~OptMomentumCPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc index c98b812515..91f7276396 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc @@ -175,7 +175,8 @@ int PoolingGradCPUKernel::Run() { kernel::LiteKernel *CpuPoolingGradFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_PoolingGrad); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h index 32c20f0abd..58c36a0ffb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h @@ -31,7 +31,7 @@ class PoolingGradCPUKernel : public LiteKernel { public: explicit PoolingGradCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~PoolingGradCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc index 57209ab126..360acd905f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc @@ -50,7 +50,8 @@ int PowerGradCPUKernel::Run() { kernel::LiteKernel *CpuPowerGradFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_PowerGrad); auto *kernel = new (std::nothrow) PowerGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h index 737de8c2a0..10d80634e6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h @@ -27,7 +27,7 @@ class PowerGradCPUKernel : public LiteKernel { public: PowerGradCPUKernel(OpParameter *param, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(param, inputs, outputs, ctx, primitive) { PowerParameter *power_param = reinterpret_cast(param); power_ = power_param->power_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc index c9d4706bdb..6e07dc27d1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc @@ -131,7 +131,7 @@ kernel::LiteKernel *CpuSoftmaxCrossEntropyFp32KernelCreator(const std::vector
  • &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_SoftmaxCrossEntropy); auto *kernel = diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h index 4447d293ba..0c619692ed 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h @@ -30,7 +30,8 @@ class SparseSoftmaxCrossEntropyWithLogitsCPUKernel : public LiteKernel { explicit SparseSoftmaxCrossEntropyWithLogitsCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, - const lite::Context *ctx, const lite::Primitive *primitive) + const lite::Context *ctx, + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { param = reinterpret_cast(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/activation.cc b/mindspore/lite/src/runtime/kernel/arm/int8/activation.cc index 3d9ba79303..3041732aa3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/activation.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/activation.cc @@ -33,7 +33,8 @@ namespace mindspore::kernel { kernel::LiteKernel *CpuActivationInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, - const KernelKey &desc, const lite::Primitive *primitive) { + const KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr) { MS_LOG(ERROR) << "parameter is nullptr"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc index 8ded0494d7..be93ddb0a0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc @@ -131,7 +131,7 @@ int QuantizedAddCPUKernel::DoExecute(int tId) { kernel::LiteKernel *CpuAddInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { MS_LOG(ERROR) << "parameter or ctx is nullptr"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h index 79692449f7..065992ed65 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h @@ -26,7 +26,7 @@ class QuantizedAddCPUKernel : public LiteKernel { public: explicit QuantizedAddCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx_->thread_num_) {} ~QuantizedAddCPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h index b195ce4c95..0d618103d1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h @@ -25,7 +25,7 @@ class ArgMinMaxInt8CPUKernel : public ArgMinMaxBaseCPUKernel { public: ArgMinMaxInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ArgMinMaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ArgMinMaxInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc index 26228e9836..7a215fcd08 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc @@ -195,7 +195,8 @@ int ArithmeticInt8CPUKernel::Run() { kernel::LiteKernel *CpuArithmeticInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr) { MS_LOG(ERROR) << "Input parameter is null!"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.h index 186e062393..b9d1de8b37 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.h @@ -30,7 +30,7 @@ class ArithmeticInt8CPUKernel : public LiteKernel { public: ArithmeticInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~ArithmeticInt8CPUKernel(); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc index e364355ac1..3079e3637d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc @@ -116,7 +116,7 @@ kernel::LiteKernel *CpuArithmeticSelfInt8KernelCreator(const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); if (opParameter == nullptr) { MS_LOG(ERROR) << "Creator failed, opParameter is nullptr!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h index b04e6615f9..2f8ebab69c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h @@ -44,7 +44,7 @@ class ArithmeticSelfInt8CPUKernel : public LiteKernel { public: explicit ArithmeticSelfInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { switch (parameter->type_) { case PrimitiveType_Round: diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h index 19b0ba4290..0f37d68903 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h @@ -24,7 +24,7 @@ class BatchToSpaceInt8CPUKernel : public BatchToSpaceBaseCPUKernel { public: BatchToSpaceInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : BatchToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~BatchToSpaceInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.cc index f0691b6f0c..33ff4abc6f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.cc @@ -71,7 +71,7 @@ int BiasAddInt8CPUKernel::Run() { kernel::LiteKernel *CpuBiasAddInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { MS_LOG(ERROR) << "parameter or context is nullptr"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h index d442bab0f3..71fd21f5e0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h @@ -26,7 +26,7 @@ class BiasAddInt8CPUKernel : public LiteKernel { public: BiasAddInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx) {} ~BiasAddInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc index da7ddc8718..5992267581 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc @@ -35,8 +35,7 @@ int ConcatInt8CPUKernel::Init() { MS_LOG(ERROR) << "Null pointer reference: inputs_array."; return RET_ERROR; } - concat_param_->quant_arg_.in_args_ = - reinterpret_cast(malloc(sizeof(QuantArg) * input_num)); + concat_param_->quant_arg_.in_args_ = reinterpret_cast(malloc(sizeof(QuantArg) * input_num)); if (concat_param_->quant_arg_.in_args_ == nullptr) { MS_LOG(ERROR) << "Null pointer reference: quant_concat_parm_->in_quant_args_."; return RET_ERROR; @@ -67,7 +66,7 @@ int ConcatInt8CPUKernel::ReSize() { return ret; } if (concat_param_->input_shapes_ != nullptr) { -// free(concat_param_->input_shapes_); + // free(concat_param_->input_shapes_); } auto input_num = in_tensors_.size(); concat_param_->input_num_ = input_num; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.h index 18fe2781da..7a677034db 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.h @@ -30,7 +30,7 @@ class ConcatInt8CPUKernel : public ConcatBaseCPUKernel { public: ConcatInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConcatBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConcatInt8CPUKernel() override { if (input_data_ != nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.h index 3a1ac8f385..9ffa1096a1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.h @@ -28,7 +28,7 @@ class Convolution3x3Int8CPUKernel : public ConvolutionBaseCPUKernel { public: Convolution3x3Int8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~Convolution3x3Int8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc index 84adbae71f..7da4f72097 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc @@ -105,7 +105,7 @@ int ConvolutionDepthwiseInt8CPUKernel::InitBuffer() { } int ConvolutionDepthwiseInt8CPUKernel::Init() { -if (!InferShapeDone()) { + if (!InferShapeDone()) { return RET_OK; } return ReSize(); @@ -196,7 +196,8 @@ int ConvolutionDepthwiseInt8CPUKernel::Run() { kernel::LiteKernel *CpuConvDwInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D); auto kernel = diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.h index d8778c8337..57cafca3d7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.h @@ -27,7 +27,7 @@ class ConvolutionDepthwiseInt8CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionDepthwiseInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc index f63d320f89..e90d6512f5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc @@ -32,10 +32,10 @@ using mindspore::schema::PrimitiveType_Conv2D; namespace mindspore::kernel { void ConvolutionInt8CPUKernel::CheckSupportOptimize() { tile_num_ = 24; -// #ifdef ENABLE_ARM32 -// tile_num_ = 2; -// support_optimize_ = false; -// #endif + // #ifdef ENABLE_ARM32 + // tile_num_ = 2; + // support_optimize_ = false; + // #endif #ifdef ENABLE_ARM64 void *optimize_op_handler = OptimizeModule::GetInstance()->optimized_op_handler_; @@ -404,7 +404,8 @@ int ConvolutionInt8CPUKernel::Run() { kernel::LiteKernel *CpuConvInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Conv2D); auto conv_param = reinterpret_cast(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.h index 3e0270bf26..c577833618 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.h @@ -28,7 +28,7 @@ class ConvolutionInt8CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ConvolutionInt8CPUKernel() override { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h index 2583c8708f..46aabf4354 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h @@ -30,7 +30,7 @@ class CropInt8CPUKernel : public CropBaseCPUKernel { public: CropInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { crop_para_ = reinterpret_cast(op_parameter_); crop_para_->thread_count_ = op_parameter_->thread_num_; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc index 2be8389915..a0099e5b1e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc @@ -233,7 +233,8 @@ int DeconvolutionDepthwiseInt8CPUKernel::Run() { kernel::LiteKernel *CpuDeconvDwInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeDepthwiseConv2D); auto kernel = diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h index da033e6124..ee07bad693 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h @@ -27,7 +27,7 @@ class DeconvolutionDepthwiseInt8CPUKernel : public ConvolutionBaseCPUKernel { public: DeconvolutionDepthwiseInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~DeconvolutionDepthwiseInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc index 4fdf8b5374..f39293b313 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc @@ -236,7 +236,8 @@ int DeConvInt8CPUKernel::Run() { kernel::LiteKernel *CpuDeConvInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeConv2D); auto kernel = new (std::nothrow) kernel::DeConvInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h index 4fb89293f5..d5f1cc989c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h @@ -33,7 +33,7 @@ class DeConvInt8CPUKernel : public ConvolutionBaseCPUKernel { public: DeConvInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~DeConvInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h index d0cf906600..9649c27dad 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h @@ -25,7 +25,7 @@ class DepthToSpaceInt8CPUKernel : public DepthToSpaceBaseCPUKernel { public: DepthToSpaceInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : DepthToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~DepthToSpaceInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc index e5db6555d1..657a8379c3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc @@ -169,7 +169,7 @@ int DivInt8CPUKernel::Run() { kernel::LiteKernel *CpuDivInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { MS_LOG(ERROR) << "parameter or ctx is nullptr"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.h index 2385b87d21..0e240bb054 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.h @@ -26,7 +26,7 @@ class DivInt8CPUKernel : public LiteKernel { public: explicit DivInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~DivInt8CPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h index 8cb8eb5b81..361bedbf73 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h @@ -29,7 +29,7 @@ class FullconnectionInt8CPUKernel : public FullconnectionBaseCPUKernel { public: FullconnectionInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : FullconnectionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~FullconnectionInt8CPUKernel() override { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h index 9fb8eb83d7..6f39a5e23f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h @@ -27,7 +27,7 @@ class HswishInt8CPUKernel : public LiteKernel { public: HswishInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~HswishInt8CPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h index 48677eb2ed..193495163b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h @@ -29,7 +29,7 @@ class MatmulInt8CPUKernel : public MatmulBaseCPUKernel { public: MatmulInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : MatmulBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~MatmulInt8CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc index ce3db4fa8a..d06a24926e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc @@ -118,7 +118,7 @@ int MulInt8CPUKernel::DoExecute(int task_id) { kernel::LiteKernel *CpuMulInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Mul); auto *kernel = new (std::nothrow) MulInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.h index 79f84c4987..3b2917d2e8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.h @@ -26,7 +26,7 @@ class MulInt8CPUKernel : public LiteKernel { public: explicit MulInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx_->thread_num_) {} ~MulInt8CPUKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h index 76bff714bc..98dc1b00d1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h @@ -28,7 +28,7 @@ class PadInt8CPUKernel : public LiteKernel { public: explicit PadInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { op_parameter_->thread_num_ = ctx->thread_num_; pad_param_ = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.h index fafdf09c6d..201ebe688b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.h @@ -30,7 +30,7 @@ class PoolingInt8CPUKernel : public PoolingBaseCPUKernel { public: PoolingInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : PoolingBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~PoolingInt8CPUKernel() { FreeQuantParam(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/prelu_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/prelu_int8.h index a47244f5fb..318a1eff83 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/prelu_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/prelu_int8.h @@ -30,7 +30,7 @@ class PreluInt8CPUKernel : public PreluBaseCPUKernel { public: PreluInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : PreluBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { quant_prelu_parm_ = reinterpret_cast(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h index 895e9016d9..800c8d69b3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h @@ -37,7 +37,7 @@ class ReduceInt8CPUKernel : public ReduceBaseCPUKernel { public: ReduceInt8CPUKernel(OpParameter *param, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ReduceBaseCPUKernel(param, inputs, outputs, ctx, primitive) {} ~ReduceInt8CPUKernel() { for (auto i = 0; i < data_buffers_.size(); i++) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.h index 72f2a80871..21d54e61bb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.h @@ -27,7 +27,7 @@ class ReluXInt8CPUKernel : public LiteKernel { public: ReluXInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) { type_ = (reinterpret_cast(parameter))->type_; } @@ -48,7 +48,7 @@ class ReluInt8CPUKernel : public ReluXInt8CPUKernel { public: ReluInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ReluXInt8CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ReluInt8CPUKernel() override = default; @@ -65,7 +65,7 @@ class Relu6Int8CPUKernel : public ReluXInt8CPUKernel { public: Relu6Int8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ReluXInt8CPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~Relu6Int8CPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.h index 57121b4af2..13f2450342 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.h @@ -30,7 +30,7 @@ class ReshapeInt8CPUKernel : public ReshapeBaseCPUKernel { public: ReshapeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ReshapeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ReshapeInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h index 58e45ebf17..7386e58a43 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h @@ -29,7 +29,7 @@ class ResizeInt8CPUKernel : public ResizeBaseCPUKernel { public: ResizeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : ResizeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~ResizeInt8CPUKernel() { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.h index a644af4681..0f243a1b59 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.h @@ -26,7 +26,7 @@ class SigmoidInt8CPUKernel : public LiteKernel { public: SigmoidInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~SigmoidInt8CPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h index 322579c201..ad9fa7741a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h @@ -26,7 +26,7 @@ class SliceInt8CPUKernel : public SliceBaseCPUKernel { public: SliceInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : SliceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SliceInt8CPUKernel() { } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h index 70268ce4ff..c0aaea1b14 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h @@ -26,7 +26,7 @@ class SoftmaxInt8CPUKernel : public SoftmaxBaseCPUKernel { public: SoftmaxInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : SoftmaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SoftmaxInt8CPUKernel() { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h index ad20fd4adc..48bfcbb527 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h @@ -30,7 +30,7 @@ class SplitInt8CPUKernel : public SplitBaseCPUKernel { public: SplitInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : SplitBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SplitInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h index d4ae65561f..128e32425e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h @@ -30,7 +30,7 @@ class SqueezeInt8CPUKernel : public SqueezeBaseCPUKernel { public: SqueezeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : SqueezeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} ~SqueezeInt8CPUKernel() override { delete quant_Squeeze_parm_; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc index adbd58b53d..f52161cb4f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc @@ -180,7 +180,7 @@ int SubInt8CPUKernel::Run() { kernel::LiteKernel *CpuSubInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { if (parameter == nullptr || ctx == nullptr) { MS_LOG(ERROR) << "parameter or ctx is nullptr"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.h index 49ee856e35..d662cc2beb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.h @@ -26,7 +26,7 @@ class SubInt8CPUKernel : public LiteKernel { public: explicit SubInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~SubInt8CPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc index 38e63be704..9cc37e9d66 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc @@ -64,7 +64,7 @@ int TopKInt8CPUKernel::Run() { kernel::LiteKernel *CpuTopKInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::Context *ctx, const KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(parameter != nullptr); TopKInt8CPUKernel *kernel = new (std::nothrow) TopKInt8CPUKernel(parameter, inputs, outputs, ctx, primitive); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h index 1fc8de9ae7..ef5e07378d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h @@ -25,7 +25,7 @@ class TopKInt8CPUKernel : public LiteKernel { public: explicit TopKInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} ~TopKInt8CPUKernel() override { TopkParameter *parameter = reinterpret_cast(op_parameter_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc index c9eb45742b..bcca5f32bb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc @@ -102,7 +102,8 @@ int Unsqueezeint8CPUKernel::Run() { kernel::LiteKernel *CpuUnsqueezeInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_Unsqueeze); auto *kernel = new (std::nothrow) Unsqueezeint8CPUKernel(opParameter, inputs, outputs, ctx, primitive); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h index ba785211e8..9570340082 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h @@ -29,7 +29,7 @@ class Unsqueezeint8CPUKernel : public LiteKernel { public: Unsqueezeint8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, - const lite::Primitive *primitive) + const mindspore::lite::PrimitiveC *primitive) : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { Unsq_para_ = reinterpret_cast(op_parameter_); Unsq_para_->thread_count_ = op_parameter_->thread_num_; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc index 60d6d0f86a..94c86eb4c2 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.cc @@ -23,7 +23,7 @@ #include "src/kernel_registry.h" #include "src/runtime/runtime_api.h" #include "include/errorcode.h" -#include "src/ops/ops.h" + #include "src/runtime/kernel/opencl/cl/fp32/activation.cl.inc" using mindspore::kernel::KERNEL_ARCH::kGPU; @@ -121,7 +121,8 @@ int ActivationOpenClKernel::GetImageSize(size_t idx, std::vector *img_si kernel::LiteKernel *OpenClActivationFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (inputs.size() == 0) { MS_LOG(ERROR) << "Input data size must be greater than 0, but your size is " << inputs.size(); return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc index ffdc3b076e..4159785f80 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc @@ -161,7 +161,8 @@ int ArithmeticOpenCLKernel::Run() { kernel::LiteKernel *OpenCLArithmeticKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) ArithmeticOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs, ctx); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc index 93fa46d11f..3fae3526a5 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/batchnorm.cc @@ -129,7 +129,8 @@ int BatchNormOpenCLKernel::Run() { kernel::LiteKernel *OpenCLBatchnormKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) BatchNormOpenCLKernel(opParameter, inputs, outputs); if (kernel == nullptr) { MS_LOG(ERROR) << "new BatchnormOpenCLKernel failed"; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/caffe_prelu.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/caffe_prelu.cc index 8820fc4e61..1ffa18e5e7 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/caffe_prelu.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/caffe_prelu.cc @@ -121,7 +121,8 @@ int CaffePReluOpenCLKernel::GetImageSize(size_t idx, std::vector *img_si kernel::LiteKernel *OpenCLCaffePReluKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { if (inputs.size() == 0) { MS_LOG(ERROR) << "Input data size must be greater than 0, but your size is " << inputs.size(); return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc index b7dc8f1a86..08764c306e 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/concat.cc @@ -203,7 +203,8 @@ int ConcatOpenCLKernel::Run() { kernel::LiteKernel *OpenCLConcatKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) ConcatOpenCLKernel(opParameter, inputs, outputs); if (kernel == nullptr) { MS_LOG(ERROR) << "new ConcatOpenCLKernel failed"; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc index 7339b5bbae..1cc4c37770 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc @@ -184,7 +184,7 @@ kernel::LiteKernel *OpenCLConv2dTransposeKernelCreator(const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) Conv2dTransposeOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.cc index 56e9b0330f..e6d4c9b0a8 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/convolution.cc @@ -671,7 +671,8 @@ int ConvolutionOpenCLKernel::SetGlobalLocalConv(std::vector *global, std kernel::LiteKernel *OpenCLConvolutionKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) ConvolutionOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc index 7194f2b775..128256a1ce 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc @@ -193,7 +193,7 @@ kernel::LiteKernel *OpenCLDepthwiseConv2dKernelCreator(const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, const kernel::KernelKey &desc, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) DepthwiseConv2dOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); if (kernel == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc index 06e993dc45..aa5d48bf07 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc @@ -156,7 +156,8 @@ int MatMulOpenCLKernel::Run() { kernel::LiteKernel *OpenCLMatMulKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { bool hasBias = false; if (opParameter->type_ == PrimitiveType_FullConnection) { hasBias = (reinterpret_cast(opParameter))->has_bias_; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc index fcee02ce94..8b1ef397fd 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc @@ -145,7 +145,8 @@ int PoolingOpenCLKernel::Run() { kernel::LiteKernel *OpenCLPooling2dKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) PoolingOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); if (kernel == nullptr) { MS_LOG(ERROR) << "Create OpenCL Pooling kernel failed!"; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc index a0fc919a6b..ed93a2a52f 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc @@ -97,7 +97,8 @@ int ReshapeOpenCLKernel::Run() { kernel::LiteKernel *OpenCLReshapeKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) ReshapeOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); if (kernel == nullptr) { MS_LOG(ERROR) << "kernel " << opParameter->name_ << " create failed."; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc index 0f27337159..6345a08e66 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc @@ -158,7 +158,8 @@ int SoftmaxOpenCLKernel::Run() { kernel::LiteKernel *OpenCLSoftMaxKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) SoftmaxOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); if (kernel == nullptr) { MS_LOG(ERROR) << "kernel " << opParameter->name_ << "is nullptr."; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc index d4e9d79a1f..323bb5b296 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc @@ -149,7 +149,8 @@ int ToFormatOpenCLKernel::Run() { kernel::LiteKernel *OpenCLToFormatKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) ToFormatOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); if (kernel == nullptr) { MS_LOG(ERROR) << "kernel " << opParameter->name_ << " create failed."; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc index 9c222b8662..a2200d2c80 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc @@ -117,7 +117,8 @@ int TransposeOpenCLKernel::Run() { kernel::LiteKernel *OpenCLTransposeKernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc, const lite::Primitive *primitive) { + const kernel::KernelKey &desc, + const mindspore::lite::PrimitiveC *primitive) { auto *kernel = new (std::nothrow) TransposeOpenCLKernel(reinterpret_cast(opParameter), inputs, outputs); if (kernel == nullptr) { diff --git a/mindspore/lite/src/scheduler.cc b/mindspore/lite/src/scheduler.cc index bef46f9ee7..8419a1db7c 100644 --- a/mindspore/lite/src/scheduler.cc +++ b/mindspore/lite/src/scheduler.cc @@ -56,7 +56,7 @@ int Scheduler::ReSizeKernels(const std::vector &kernels) { MS_LOG(ERROR) << "input kernel is nullptr!"; return RET_ERROR; } - auto primitive = const_cast(kernels[i]->GetPrimitive()); + auto primitive = const_cast(kernels[i]->GetPrimitive()); if (primitive == nullptr) { MS_LOG(ERROR) << "kernel(" << kernels[i]->name() << ")'s primitive is nullptr!"; return RET_ERROR; @@ -243,11 +243,11 @@ kernel::LiteKernel *Scheduler::CreateSubKernel(const std::vector &in_tensors, const std::vector &out_tensors, - const lite::Primitive *primitive) { + const mindspore::lite::PrimitiveC *primitive) { // todo: support NPU, APU MS_ASSERT(nullptr != primitive); auto data_type = in_tensors.front()->data_type(); - kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, primitive->Type()}; + kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, static_cast(primitive->Type())}; if (context_->device_ctx_.type == DT_GPU) { desc.arch = kernel::KERNEL_ARCH::kGPU; auto *kernel = KernelRegistry::GetInstance()->GetKernel(in_tensors, out_tensors, primitive, context_, desc); diff --git a/mindspore/lite/src/scheduler.h b/mindspore/lite/src/scheduler.h index ef1991c42c..0815d9c6be 100644 --- a/mindspore/lite/src/scheduler.h +++ b/mindspore/lite/src/scheduler.h @@ -21,6 +21,7 @@ #include "src/lite_kernel.h" #include "include/context.h" #include "include/model.h" +#include "src/ops/primitive_c.h" namespace mindspore::lite { class Scheduler { @@ -33,7 +34,8 @@ class Scheduler { protected: kernel::LiteKernel *ScheduleNode(const std::vector &in_tensors, - const std::vector &out_tensors, const lite::Primitive *primitive); + const std::vector &out_tensors, + const mindspore::lite::PrimitiveC *primitive); private: int InitOp2Kernel(const lite::Model *model, std::vector *tensors, diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc index 97966621ab..38c25d694b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc @@ -20,7 +20,6 @@ #include "src/common/file_utils.h" #include "mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h" #include "mindspore/lite/src/kernel_registry.h" -#include "mindspore/lite/src/ops/ops.h" namespace mindspore { class TestConvolutionDwFp32 : public mindspore::CommonTest { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc index a51901ac51..e9e6fbbd74 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc @@ -18,7 +18,6 @@ #include "utils/log_adapter.h" #include "common/common_test.h" #include "src/common/file_utils.h" -#include "mindspore/lite/src/ops/ops.h" #include "mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h" #include "mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/deconv.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc index 2e54fe362d..47d1d6c0f7 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc @@ -19,7 +19,6 @@ #include "common/common_test.h" #include "mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/lstm.h" #include "mindspore/lite/src/kernel_registry.h" -#include "mindspore/lite/src/ops/ops.h" namespace mindspore { class LstmFp32 : public mindspore::CommonTest { diff --git a/mindspore/lite/tools/anf_importer/anf_importer.cc b/mindspore/lite/tools/anf_importer/anf_importer.cc index 50a78e7fea..e51c160854 100644 --- a/mindspore/lite/tools/anf_importer/anf_importer.cc +++ b/mindspore/lite/tools/anf_importer/anf_importer.cc @@ -15,17 +15,9 @@ */ #include -#include -#include -#include #include "tools/anf_importer/anf_importer.h" #include "schema/model_generated.h" #include "ir/dtype.h" -#include "ir/primitive.h" -#include "src/param_value_lite.h" -#include "frontend/operator/ops.h" -#include "abstract/abstract_value.h" -#include "src/ir/primitive_value.h" #include "include/errorcode.h" #include "schema/inner/model_generated.h" namespace mindspore { diff --git a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc index 1f22b39b35..44b67dca56 100644 --- a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc +++ b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc @@ -22,7 +22,7 @@ #include "import_from_meta_graphT.h" #include "utils/log_adapter.h" #include "include/errorcode.h" -#include "src/ops/ops.h" + namespace mindspore::lite { int AnfImporterFromMetaGraphT::ConverterConstTensor() { diff --git a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc index 51f4de1cd2..595ba6d6a0 100644 --- a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc @@ -27,10 +27,11 @@ #include "src/lite_session.h" #include "src/ir/primitive_t_value.h" #include "src/populate_parameter.h" +#include "src/ops/primitive_c.h" using mindspore::lite::KernelRegistry; -using mindspore::lite::tensor::Tensor; using mindspore::lite::PrimitiveTValue; +using mindspore::lite::tensor::Tensor; namespace mindspore::opt { namespace { const std::vector GetCNodeInputTensors(const CNodePtr &CNode) { @@ -52,7 +53,7 @@ const std::vector GetCNodeInputTensors(const CNodePtr &CNode) { auto lite_tensor_size = tensorT->data.size() * sizeof(uint8_t); // when tensorT as graph input if (lite_tensor_size == 0) { - return input_tensors; + return input_tensors; } auto tensor_data = new(std::nothrow)char[lite_tensor_size / sizeof(char)]; if (tensor_data == nullptr) { @@ -69,8 +70,7 @@ const std::vector GetCNodeInputTensors(const CNodePtr &CNode) { return input_tensors; } schema::Primitive *PackPrimitiveT(const CNodePtr &cnode) { - auto primitiveT_value = - GetValueNode>(cnode->input(0)); + auto primitiveT_value = GetValueNode>(cnode->input(0)); if (primitiveT_value == nullptr) { MS_LOG(ERROR) << "PrimitiveT_value is nullptr"; return nullptr; @@ -120,15 +120,14 @@ const ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *ten return parameter; } kernel::LiteKernel *GetLiteKernel(std::vector inputs, std::vector outputs, - lite::Primitive *primitive) { + mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(nullptr != lite_primitive); auto data_type = inputs.front()->data_type(); - kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, primitive->Type()}; + kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, (schema::PrimitiveType)primitive->Type()}; lite::Context context; auto parameter = kernel::PopulateParameter(primitive); if (parameter == nullptr) { - MS_LOG(ERROR) - << "PopulateParameter return nullptr, type: " << schema::EnumNamePrimitiveType(primitive->Type()); + MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " << (schema::PrimitiveType)primitive->Type(); return nullptr; } auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); @@ -155,13 +154,13 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An auto input_cnode = input_node->cast(); auto input_tensors = GetCNodeInputTensors(input_cnode); if (input_tensors.empty() || input_tensors.size() != input_cnode->inputs().size() - 1) { - return any_node; + return any_node; } MS_LOG(INFO) << "Begin fold node:" << input_node->fullname_with_scope(); auto output_nums = GetOutputTensorNum(input_cnode); std::vector output_tensors{output_nums, new Tensor()}; auto scheam_primitive = PackPrimitiveT(input_cnode); - auto lite_primitive = lite::Primitive::CreatePrimitive(scheam_primitive); + auto lite_primitive = mindspore::lite::PrimitiveC::CreatePrimitive(scheam_primitive); if (lite_primitive == nullptr) { MS_LOG(DEBUG) << "constant_folding schedule node lite primitive nullptr"; return nullptr;