diff --git a/mindspore/lite/nnacl/fp32/prelu.c b/mindspore/lite/nnacl/fp32/prelu.c index c69bf526d7..ec77c1c254 100644 --- a/mindspore/lite/nnacl/fp32/prelu.c +++ b/mindspore/lite/nnacl/fp32/prelu.c @@ -107,21 +107,18 @@ void PRelu(float *input, float *output, PReluParameter *prelu_param_, int task_i void PReluShareChannel(float *input, float *output, PReluParameter *prelu_param_, int task_id) { for (int j = task_id; j < prelu_param_->tile_block_; j += prelu_param_->op_parameter_.thread_num_) { int cal_index; - int cal_per_time; #ifdef ENABLE_NEON float32x4_t slope_value = vdupq_n_f32(prelu_param_->slope_[0]); float32x4_t zero_value = vdupq_n_f32(0); #endif #ifdef ENABLE_ARM64 cal_index = j * 64; - cal_per_time = 64; #elif ENABLE_ARM32 cal_index = j * 32; - cal_per_time = 32; #else cal_index = j * 32; - cal_per_time = 32; + int cal_per_time = 32; #endif float *input_ptr = input + cal_index; float *output_ptr = input + cal_index; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc index 0280f75141..aa67e167c9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc @@ -52,7 +52,7 @@ int CastFp16CPUKernel::ReSize() { if (data_num_ == 0) { return RET_OK; } - op_parameter_->thread_num_ = MSMIN(op_parameter_->thread_num_, data_num_); + op_parameter_->thread_num_ = MSMIN(op_parameter_->thread_num_, static_cast(data_num_)); stride_ = UP_DIV(data_num_, op_parameter_->thread_num_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc index 5fa547e828..b95867c3d1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc @@ -69,7 +69,7 @@ int ConcatFp16CPUKernel::MallocTmpBuffer() { } void ConcatFp16CPUKernel::FreeTmpBuffer() { - for (auto i = 0; i < fp16_inputs_.size(); i++) { + for (size_t i = 0; i < fp16_inputs_.size(); i++) { auto &in_tensor = in_tensors_.at(i); auto in_ptr = fp16_inputs_.at(i); if (in_tensor->data_type() == kNumberTypeFloat32 || in_tensor->data_type() == kNumberTypeFloat) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.cc index 22f7143046..d317d8c694 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.cc @@ -66,7 +66,7 @@ int ConvolutionBaseFP16CPUKernel::GetExecuteFilter() { MS_LOG(ERROR) << "malloc fp16_weight_ failed."; return RET_ERROR; } - for (int i = 0; i < fp16_weight_size / sizeof(float16_t); ++i) { + for (size_t i = 0; i < fp16_weight_size / sizeof(float16_t); ++i) { fp16_weight_[i] = (float16_t)origin_weight[i]; } execute_weight_ = fp16_weight_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc index 6e36b6e470..19a81125dd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc @@ -97,7 +97,7 @@ int ReduceFp16CPUKernel::Run() { } fp16_src_data_ = fp16_input_; - for (int i = 0; i < data_buffers_.size(); ++i) { + for (size_t i = 0; i < data_buffers_.size(); ++i) { fp16_dst_data_ = data_buffers_[i]; outer_size_ = outer_sizes_[i]; inner_size_ = inner_sizes_[i];