|
|
@@ -15,12 +15,23 @@ |
|
|
*/ |
|
|
*/ |
|
|
|
|
|
|
|
|
#include "src/runtime/kernel/arm/int8/pad_int8.h" |
|
|
#include "src/runtime/kernel/arm/int8/pad_int8.h" |
|
|
|
|
|
#include <string> |
|
|
|
|
|
#include "include/errorcode.h" |
|
|
|
|
|
#include "nnacl/errorcode.h" |
|
|
|
|
|
#include "nnacl/int8/pad.h" |
|
|
|
|
|
#include "src/runtime/runtime_api.h" |
|
|
|
|
|
|
|
|
using mindspore::lite::RET_ERROR; |
|
|
using mindspore::lite::RET_ERROR; |
|
|
using mindspore::lite::RET_MEMORY_FAILED; |
|
|
using mindspore::lite::RET_MEMORY_FAILED; |
|
|
using mindspore::lite::RET_OK; |
|
|
using mindspore::lite::RET_OK; |
|
|
|
|
|
using mindspore::lite::RET_NULL_PTR; |
|
|
|
|
|
|
|
|
namespace mindspore::kernel { |
|
|
namespace mindspore::kernel { |
|
|
|
|
|
|
|
|
|
|
|
namespace { |
|
|
|
|
|
constexpr size_t kMirrorPadInputSize = 2; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
void PadInt8CPUKernel::FreeQuantParam() { |
|
|
void PadInt8CPUKernel::FreeQuantParam() { |
|
|
if (pad_param_->pad_quant_arg_.in_quant_args_ != nullptr) { |
|
|
if (pad_param_->pad_quant_arg_.in_quant_args_ != nullptr) { |
|
|
free(pad_param_->pad_quant_arg_.in_quant_args_); |
|
|
free(pad_param_->pad_quant_arg_.in_quant_args_); |
|
|
@@ -122,6 +133,123 @@ int PadInt8Impl(void *cdata, int task_id) { |
|
|
return RET_OK; |
|
|
return RET_OK; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int PadInt8CPUKernel::HandleMirrorPad() { |
|
|
|
|
|
auto ret = CopyPaddingFromInput(); |
|
|
|
|
|
if (ret != RET_OK) { |
|
|
|
|
|
return ret; |
|
|
|
|
|
} |
|
|
|
|
|
ret = CheckPaddings(pad_param_->paddings_, DEFAULT_PAD_NDIMS, in_dims_, pad_param_->pad_mode_); |
|
|
|
|
|
if (ret != RET_OK) { |
|
|
|
|
|
return ret; |
|
|
|
|
|
} |
|
|
|
|
|
CalculateStrides(); |
|
|
|
|
|
pad_param_->mirror_offset_ = pad_param_->pad_mode_ == static_cast<int>(schema::PaddingMode_REFLECT) ? 1 : 0; |
|
|
|
|
|
return RET_OK; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void PadInt8CPUKernel::CalculateStrides() { |
|
|
|
|
|
pad_param_->in_strides[DEFAULT_PAD_NDIMS - 1] = 1; |
|
|
|
|
|
for (auto i = DEFAULT_PAD_NDIMS - 2; i >= 0; --i) { |
|
|
|
|
|
pad_param_->in_strides[i] = in_dims_[i + 1] * pad_param_->in_strides[i + 1]; |
|
|
|
|
|
} |
|
|
|
|
|
for (auto i = 0; i < DEFAULT_PAD_NDIMS; ++i) { |
|
|
|
|
|
out_dims_[i] = in_dims_[i] + pad_param_->paddings_[i * 2] + pad_param_->paddings_[i * 2 + 1]; |
|
|
|
|
|
} |
|
|
|
|
|
pad_param_->out_strides[DEFAULT_PAD_NDIMS - 1] = 1; |
|
|
|
|
|
for (auto i = DEFAULT_PAD_NDIMS - 2; i >= 0; --i) { |
|
|
|
|
|
pad_param_->out_strides[i] = out_dims_[i + 1] * pad_param_->out_strides[i + 1]; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int PadInt8CPUKernel::ExtendPaddings(int *paddings, int length, const int *ori_paddings, int ori_length) { |
|
|
|
|
|
if (paddings == nullptr || ori_paddings == nullptr) { |
|
|
|
|
|
return RET_NULL_PTR; |
|
|
|
|
|
} |
|
|
|
|
|
for (auto i = 0; i < length - ori_length; ++i) { |
|
|
|
|
|
paddings[i] = 0; |
|
|
|
|
|
} |
|
|
|
|
|
for (auto i = length - ori_length; i < length; ++i) { |
|
|
|
|
|
paddings[i] = ori_paddings[i - (length - ori_length)]; |
|
|
|
|
|
} |
|
|
|
|
|
return RET_OK; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int PadInt8CPUKernel::RunMirrorPadImpl(int task_id) { |
|
|
|
|
|
auto input = in_tensors_.at(0); |
|
|
|
|
|
auto output = out_tensors_.at(0); |
|
|
|
|
|
auto input_data = reinterpret_cast<int8_t *>(input->MutableData()); |
|
|
|
|
|
auto output_data = reinterpret_cast<int8_t *>(output->MutableData()); |
|
|
|
|
|
|
|
|
|
|
|
int unit = UP_DIV(output->ElementsNum(), context_->thread_num_); |
|
|
|
|
|
int begin = unit * task_id; |
|
|
|
|
|
int end = MSMIN(begin + unit, output->ElementsNum()); |
|
|
|
|
|
MirrorPadInt8(input_data, output_data, in_dims_, pad_param_, begin, end); |
|
|
|
|
|
return RET_OK; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int MirrorPadImplInt8(void *cdata, int task_id) { |
|
|
|
|
|
auto padKernel = reinterpret_cast<PadInt8CPUKernel *>(cdata); |
|
|
|
|
|
int error_code = padKernel->RunMirrorPadImpl(task_id); |
|
|
|
|
|
if (error_code != NNACL_OK) { |
|
|
|
|
|
MS_LOG(ERROR) << "Pad Run error task_id[" << task_id << "] error_code[" << error_code << "]"; |
|
|
|
|
|
return RET_ERROR; |
|
|
|
|
|
} |
|
|
|
|
|
return RET_OK; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int PadInt8CPUKernel::CheckPaddings(int *paddings, int length, int *input_shape, int mode) { |
|
|
|
|
|
if (paddings == nullptr || input_shape == nullptr) { |
|
|
|
|
|
return RET_NULL_PTR; |
|
|
|
|
|
} |
|
|
|
|
|
std::string prefix; |
|
|
|
|
|
int offset; |
|
|
|
|
|
if (mode == static_cast<int>(schema::PaddingMode_SYMMETRIC)) { |
|
|
|
|
|
prefix = "For Pad SYMMETRIC "; |
|
|
|
|
|
offset = 0; |
|
|
|
|
|
} else { |
|
|
|
|
|
prefix = "For Pad REFLECT "; |
|
|
|
|
|
offset = 1; |
|
|
|
|
|
} |
|
|
|
|
|
for (auto i = 0; i < length; ++i) { |
|
|
|
|
|
int max_valid = input_shape[i] - offset; |
|
|
|
|
|
if (paddings[i * 2] > max_valid) { |
|
|
|
|
|
MS_LOG(ERROR) << prefix << "paddings " << paddings[i * 2] << "should be less than " << max_valid + 1; |
|
|
|
|
|
return RET_ERROR; |
|
|
|
|
|
} |
|
|
|
|
|
if (paddings[i * 2 + 1] > max_valid) { |
|
|
|
|
|
MS_LOG(ERROR) << prefix << "paddings " << paddings[i * 2 + 1] << "should be less than " << max_valid + 1; |
|
|
|
|
|
return RET_ERROR; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
return RET_OK; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int PadInt8CPUKernel::CopyPaddingFromInput() { |
|
|
|
|
|
if (in_tensors_.size() != kMirrorPadInputSize) { |
|
|
|
|
|
MS_LOG(ERROR) << "Pad Reflect or Symmetric mode need 2 inputs, got " << in_tensors_.size(); |
|
|
|
|
|
return RET_ERROR; |
|
|
|
|
|
} |
|
|
|
|
|
auto padding_tensor = in_tensors_.at(1); |
|
|
|
|
|
auto paddings = reinterpret_cast<int *>(padding_tensor->MutableData()); |
|
|
|
|
|
if (paddings == nullptr) { |
|
|
|
|
|
MS_LOG(ERROR) << "Pad second input data nullptr"; |
|
|
|
|
|
return RET_ERROR; |
|
|
|
|
|
} |
|
|
|
|
|
auto input_shape = in_tensors_.at(0)->shape(); |
|
|
|
|
|
int rank = static_cast<int>(input_shape.size()); |
|
|
|
|
|
if (padding_tensor->ElementsNum() != rank * 2) { |
|
|
|
|
|
MS_LOG(ERROR) << "Pad second input elements num" << padding_tensor->ElementsNum() << ", should be " << rank * 2; |
|
|
|
|
|
return RET_ERROR; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto ret = ExtendPaddings(pad_param_->paddings_, MAX_PAD_SIZE, paddings, padding_tensor->ElementsNum()); |
|
|
|
|
|
if (ret != RET_OK) { |
|
|
|
|
|
return ret; |
|
|
|
|
|
} |
|
|
|
|
|
pad_param_->padding_length = MAX_PAD_SIZE; |
|
|
|
|
|
return RET_OK; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
int PadInt8CPUKernel::Run() { |
|
|
int PadInt8CPUKernel::Run() { |
|
|
auto ret = Prepare(); |
|
|
auto ret = Prepare(); |
|
|
if (ret != RET_OK) { |
|
|
if (ret != RET_OK) { |
|
|
@@ -131,12 +259,26 @@ int PadInt8CPUKernel::Run() { |
|
|
in_data_ = reinterpret_cast<int8_t *>(in_tensors_[0]->MutableData()); |
|
|
in_data_ = reinterpret_cast<int8_t *>(in_tensors_[0]->MutableData()); |
|
|
out_data_ = reinterpret_cast<int8_t *>(out_tensors_[0]->MutableData()); |
|
|
out_data_ = reinterpret_cast<int8_t *>(out_tensors_[0]->MutableData()); |
|
|
|
|
|
|
|
|
memset(out_data_, pad_param_->pad_quant_arg_.constant_value_[0], out_tensors_[0]->ElementsNum() * sizeof(int8_t)); |
|
|
|
|
|
int error_code = ParallelLaunch(this->context_->thread_pool_, PadInt8Impl, this, context_->thread_num_); |
|
|
|
|
|
if (error_code != RET_OK) { |
|
|
|
|
|
MS_LOG(ERROR) << "Resize run error, error_code[" << error_code << "]"; |
|
|
|
|
|
return RET_ERROR; |
|
|
|
|
|
|
|
|
int error_code; |
|
|
|
|
|
if (pad_param_->pad_mode_ == static_cast<int>(schema::PaddingMode_CONSTANT)) { |
|
|
|
|
|
memset(out_data_, pad_param_->pad_quant_arg_.constant_value_[0], out_tensors_[0]->ElementsNum() * sizeof(int8_t)); |
|
|
|
|
|
error_code = ParallelLaunch(this->context_->thread_pool_, PadInt8Impl, this, context_->thread_num_); |
|
|
|
|
|
if (error_code != RET_OK) { |
|
|
|
|
|
MS_LOG(ERROR) << "Resize run error, error_code[" << error_code << "]"; |
|
|
|
|
|
return RET_ERROR; |
|
|
|
|
|
} |
|
|
|
|
|
} else { |
|
|
|
|
|
// mirror pad case |
|
|
|
|
|
HandleMirrorPad(); |
|
|
|
|
|
|
|
|
|
|
|
error_code = ParallelLaunch(this->context_->thread_pool_, MirrorPadImplInt8, this, context_->thread_num_); |
|
|
|
|
|
if (error_code != RET_OK) { |
|
|
|
|
|
MS_LOG(ERROR) << "Pad Reflect or Symmetric mode run error, error_code[" << error_code << "]"; |
|
|
|
|
|
return RET_ERROR; |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return RET_OK; |
|
|
return RET_OK; |
|
|
} |
|
|
} |
|
|
} // namespace mindspore::kernel |
|
|
} // namespace mindspore::kernel |