From 2ccb34d4a866cca42b0e149563701d5f848887e6 Mon Sep 17 00:00:00 2001 From: liuwenhao4 Date: Sat, 1 Aug 2020 15:36:37 +0800 Subject: [PATCH] Add new hms ops of split and eltwise with type of int8 --- mindspore/lite/src/populate_parameter.cc | 2 +- .../src/runtime/kernel/arm/base/split_base.cc | 136 ++++ .../src/runtime/kernel/arm/base/split_base.h | 50 ++ .../lite/src/runtime/kernel/arm/fp32/split.cc | 59 +- .../lite/src/runtime/kernel/arm/fp32/split.h | 12 +- .../kernel/arm/int8/arithmetic_self_int8.cc | 34 +- .../kernel/arm/int8/arithmetic_self_int8.h | 36 +- .../src/runtime/kernel/arm/int8/split_int8.cc | 92 +++ .../src/runtime/kernel/arm/int8/split_int8.h | 47 ++ .../arm/opclib/int8/arithmetic_self_int8.cc | 298 +++++++-- .../arm/opclib/int8/arithmetic_self_int8.h | 18 +- .../kernel/arm/opclib/int8/split_int8.cc | 73 +++ .../kernel/arm/opclib/int8/split_int8.h | 25 + .../kernel/arm/opclib/quantization/quantize.h | 10 + .../src/runtime/kernel/arm/opclib/split.cc | 1 + .../src/runtime/kernel/arm/opclib/split.h | 11 +- .../kernel/arm/opclib/split_parameter.h | 33 + .../arm/int8/arithmetic_self_int8_tests.cc | 590 ++++++++++++++++++ .../kernel/arm/int8/split_int8_tests.cc | 305 +++++++++ 19 files changed, 1691 insertions(+), 141 deletions(-) create mode 100644 mindspore/lite/src/runtime/kernel/arm/base/split_base.cc create mode 100644 mindspore/lite/src/runtime/kernel/arm/base/split_base.h create mode 100644 mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc create mode 100644 mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h create mode 100644 mindspore/lite/src/runtime/kernel/arm/opclib/int8/split_int8.cc create mode 100644 mindspore/lite/src/runtime/kernel/arm/opclib/int8/split_int8.h create mode 100644 mindspore/lite/src/runtime/kernel/arm/opclib/split_parameter.h create mode 100644 mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc diff --git a/mindspore/lite/src/populate_parameter.cc b/mindspore/lite/src/populate_parameter.cc index 513323963d..0f2061d6b9 100644 --- a/mindspore/lite/src/populate_parameter.cc +++ b/mindspore/lite/src/populate_parameter.cc @@ -47,7 +47,7 @@ #include "src/runtime/kernel/arm/opclib/pad_parameter.h" #include "src/runtime/kernel/arm/opclib/fp32/fill.h" #include "src/runtime/kernel/arm/opclib/transpose.h" -#include "src/runtime/kernel/arm/opclib/split.h" +#include "src/runtime/kernel/arm/opclib/split_parameter.h" #include "src/runtime/kernel/arm/opclib/squeeze.h" #include "src/runtime/kernel/arm/opclib/fp32/gather.h" #include "src/runtime/kernel/arm/opclib/fp32/reverse.h" diff --git a/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc new file mode 100644 index 0000000000..7e04a8ab2d --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc @@ -0,0 +1,136 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/runtime/kernel/arm/base/split_base.h" +#include +#include "src/runtime/kernel/arm/int8/split_int8.h" +#include "src/runtime/kernel/arm/fp32/split.h" +#include "schema/model_generated.h" +#include "src/kernel_factory.h" +#include "include/errorcode.h" +#include "include/context.h" + +using mindspore::lite::KernelRegistrar; +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; +using mindspore::schema::PrimitiveType_Split; + +namespace mindspore::kernel { +int SplitBaseCPUKernel::Init() { + auto in_tensor = inputs_.front(); + auto input_shape = in_tensor->shape(); + + param->strides_[input_shape.size() - 1] = 1; + for (int i = input_shape.size() - 2; i >= 0; i--) { + param->strides_[i] = param->strides_[i + 1] * input_shape[i + 1]; + } + + param->split_count_ = + param->strides_[0] * input_shape[0] / (input_shape[param->split_dim_] * param->strides_[param->split_dim_]); + param->n_dims_ = input_shape.size(); + + if (param->split_sizes_[0] == 0) { + if (input_shape[param->split_dim_] % param->num_split_ != 0) { + MS_LOG(ERROR) << "Default split size is not usable."; + return RET_ERROR; + } + int split_size = input_shape[param->split_dim_] / param->num_split_; + for (int i = 0; i < param->num_split_; i++) { + param->split_sizes_[i] = split_size; + } + } + + num_unit_ = param->split_count_ * param->num_split_; + thread_n_num_ = MSMIN(thread_count_, num_unit_); + thread_n_stride_ = UP_DIV(num_unit_, thread_n_num_); + return RET_OK; +} + +kernel::LiteKernel *CpuSplitInt8KernelCreator(const std::vector &inputs, + const std::vector &outputs, + OpParameter *opParameter, const Context *ctx, + const kernel::KernelKey &desc) { + if (opParameter == nullptr) { + MS_LOG(ERROR) << "Input opParameter is nullptr!"; + return nullptr; + } + MS_ASSERT(desc.type == schema::PrimitiveType_Split); + auto *kernel = new (std::nothrow) SplitInt8CPUKernel(opParameter, inputs, outputs, ctx); + if (kernel == nullptr) { + MS_LOG(ERROR) << "new SplitCPUKernel fail!"; + return nullptr; + } + auto ret = kernel->Init(); + if (ret != RET_OK) { + delete kernel; + MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " + << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); + return nullptr; + } + return kernel; +} + +kernel::LiteKernel *CpuSplitInt32KernelCreator(const std::vector &inputs, + const std::vector &outputs, + OpParameter *opParameter, const Context *ctx, + const kernel::KernelKey &desc) { + if (opParameter == nullptr) { + MS_LOG(ERROR) << "Input opParameter is nullptr!"; + return nullptr; + } + MS_ASSERT(desc.type == schema::PrimitiveType_Split); + auto *kernel = new (std::nothrow) SplitCPUKernel(opParameter, inputs, outputs, ctx); + if (kernel == nullptr) { + MS_LOG(ERROR) << "new SplitCPUKernel fail!"; + return nullptr; + } + auto ret = kernel->Init(); + if (ret != RET_OK) { + delete kernel; + MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " + << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); + return nullptr; + } + return kernel; +} + +kernel::LiteKernel *CpuSplitFp32KernelCreator(const std::vector &inputs, + const std::vector &outputs, + OpParameter *opParameter, const Context *ctx, + const kernel::KernelKey &desc) { + if (opParameter == nullptr) { + MS_LOG(ERROR) << "Input opParameter is nullptr!"; + return nullptr; + } + MS_ASSERT(desc.type == schema::PrimitiveType_Split); + auto *kernel = new (std::nothrow) SplitCPUKernel(opParameter, inputs, outputs, ctx); + if (kernel == nullptr) { + MS_LOG(ERROR) << "new SplitCPUKernel fail!"; + return nullptr; + } + auto ret = kernel->Init(); + if (ret != RET_OK) { + delete kernel; + MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " + << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); + return nullptr; + } + return kernel; +} + +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Split, CpuSplitInt8KernelCreator) +REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Split, CpuSplitInt32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Split, CpuSplitFp32KernelCreator) +} // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/base/split_base.h b/mindspore/lite/src/runtime/kernel/arm/base/split_base.h new file mode 100644 index 0000000000..d5ffffbd54 --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/base/split_base.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_SPLIT_BASE_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_SPLIT_BASE_H_ + +#include +#include "src/lite_kernel.h" +#include "src/runtime/kernel/arm/opclib/split_parameter.h" + +using mindspore::lite::Context; + +namespace mindspore::kernel { +class SplitBaseCPUKernel : public LiteKernel { + public: + SplitBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx) + : LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) { + param = reinterpret_cast(opParameter); + } + ~SplitBaseCPUKernel() = default; + + int Init() override; + int ReSize() override { return 0; } + int Run() override { return 0; } + + protected: + int thread_count_; + const Context *ctx_; + int thread_n_stride_; + int thread_n_num_; + int num_unit_; + SplitParameter *param; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_SPLIT_BASE_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/split.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/split.cc index 3bbf34c436..eae295a8fc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/split.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/split.cc @@ -14,11 +14,10 @@ * limitations under the License. */ -#include -#include #include "src/runtime/kernel/arm/fp32/split.h" +#include "src/runtime/kernel/arm/base/split_base.h" #include "src/runtime/kernel/arm/opclib/split.h" -#include "schema/model_generated.h" +#include "src/runtime/kernel/arm/opclib/split_parameter.h" #include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" @@ -32,38 +31,12 @@ using mindspore::schema::PrimitiveType_Split; namespace mindspore::kernel { int SplitCPUKernel::Init() { + SplitBaseCPUKernel::Init(); auto in_tensor = inputs_.front(); input_ptr_ = reinterpret_cast(in_tensor->Data()); - auto input_shape = in_tensor->shape(); - auto param = reinterpret_cast(opParameter); - - param->strides_[input_shape.size() - 1] = 1; - for (int i = input_shape.size() - 2; i >= 0; i--) { - param->strides_[i] = param->strides_[i + 1] * input_shape[i + 1]; - } - - param->split_count_ = - param->strides_[0] * input_shape[0] / (input_shape[param->split_dim_] * param->strides_[param->split_dim_]); for (int i = 0; i < param->num_split_; i++) { output_ptr_.push_back(reinterpret_cast(outputs_.at(i)->Data())); } - param->n_dims_ = input_shape.size(); - - if (param->split_sizes_[0] == 0) { - if (input_shape[param->split_dim_] % param->num_split_ != 0) { - MS_LOG(ERROR) << "Default split size is not usable."; - return RET_ERROR; - } - int split_size = input_shape[param->split_dim_] / param->num_split_; - for (int i = 0; i < param->num_split_; i++) { - param->split_sizes_[i] = split_size; - } - } - - num_unit_ = param->split_count_ * param->num_split_; - unit_size_ = param->strides_[param->split_dim_]; - thread_n_num_ = MSMIN(thread_num_, num_unit_); - thread_n_stride_ = UP_DIV(num_unit_, thread_n_num_); return RET_OK; } @@ -76,7 +49,7 @@ int SplitCPUKernel::Split(int task_id) { } int thread_offset = task_id * thread_n_stride_; auto ret = DoSplit(input_ptr_, output_ptr_.data(), inputs_.front()->shape().data(), thread_offset, num_unit_thread, - reinterpret_cast(opParameter)); + param); if (ret != RET_OK) { MS_LOG(ERROR) << "Split error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; @@ -103,28 +76,4 @@ int SplitCPUKernel::Run() { return RET_OK; } - -kernel::LiteKernel *CpuSplitFp32KernelCreator(const std::vector &inputs, - const std::vector &outputs, - OpParameter *opParameter, const lite::Context *ctx, - const kernel::KernelKey &desc) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_Split); - auto *kernel = new (std::nothrow) SplitCPUKernel(opParameter, inputs, outputs, ctx); - if (kernel == nullptr) { - MS_LOG(ERROR) << "New kernel fails."; - return nullptr; - } - - auto ret = kernel->Init(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast(opParameter->type_)); - delete kernel; - return nullptr; - } - return kernel; -} - -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Split, CpuSplitFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/split.h b/mindspore/lite/src/runtime/kernel/arm/fp32/split.h index 48e0350713..5761367abb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/split.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/split.h @@ -18,15 +18,15 @@ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_SPLIT_H_ #include - +#include "src/runtime/kernel/arm/base/split_base.h" #include "src/lite_kernel.h" namespace mindspore::kernel { -class SplitCPUKernel : public LiteKernel { +class SplitCPUKernel : public SplitBaseCPUKernel { public: SplitCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx) - : LiteKernel(parameter, inputs, outputs), thread_num_(ctx->thread_num_) {} + : SplitBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~SplitCPUKernel() override = default; int Init() override; @@ -35,15 +35,9 @@ class SplitCPUKernel : public LiteKernel { int Split(int task_id); private: - int thread_num_; - int thread_n_stride_; - int thread_n_num_; - int num_unit_; - int unit_size_; float *input_ptr_; std::vector output_ptr_; }; } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_SPLIT_H_ - diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc index aa821fd4e2..1688f27bb4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc @@ -31,16 +31,28 @@ int ArithmeticSelfInt8CPUKernel::Init() { int ret = ReSize(); auto *input_tensor = inputs_.at(kInputIndex); auto in_quant_args = input_tensor->GetQuantParams(); - arithmeticSelfParameter_->quant_arg_.in_args_.scale_ = in_quant_args.front().scale; - arithmeticSelfParameter_->quant_arg_.in_args_.zp_ = in_quant_args.front().zeroPoint; + para_->quant_arg_.in_args_.scale_ = in_quant_args.front().scale; + para_->quant_arg_.in_args_.zp_ = in_quant_args.front().zeroPoint * (-1); auto *out_tensor = outputs_.at(kOutputIndex); auto out_quant_args = out_tensor->GetQuantParams(); - arithmeticSelfParameter_->quant_arg_.out_args_.scale_ = out_quant_args.front().scale; - arithmeticSelfParameter_->quant_arg_.out_args_.zp_ = out_quant_args.front().zeroPoint; + para_->quant_arg_.out_args_.scale_ = out_quant_args.front().scale; + para_->quant_arg_.out_args_.zp_ = out_quant_args.front().zeroPoint; + + para_->quant_arg_.output_activation_max_ = std::numeric_limits::max(); + para_->quant_arg_.output_activation_min_ = std::numeric_limits::min(); + + if (para_->op_parameter_.type_ == PrimitiveType_Square) { + const double real_multiplier = + (para_->quant_arg_.in_args_.scale_ * para_->quant_arg_.in_args_.scale_) / para_->quant_arg_.out_args_.scale_; + + int right_shift = 0; + QuantizeMultiplierSmallerThanOne(real_multiplier, ¶_->quant_arg_.output_multiplier_, &right_shift); + + para_->quant_arg_.shift_left_ = right_shift < 0 ? -right_shift : 0; + para_->quant_arg_.shift_right_ = right_shift > 0 ? right_shift : 0; + } - arithmeticSelfParameter_->quant_arg_.output_activation_max_ = std::numeric_limits::max(); - arithmeticSelfParameter_->quant_arg_.output_activation_min_ = std::numeric_limits::min(); return ret; } @@ -68,7 +80,7 @@ int ArithmeticSelfInt8CPUKernel::DoArithmeticSelf(int task_id) { } int offset = task_id * thread_sz_stride_; if (arithmeticSelf_run_) { - auto ret = arithmeticSelf_run_(in_ptr_ + offset, out_ptr_ + offset, size, arithmeticSelfParameter_->quant_arg_); + auto ret = arithmeticSelf_run_(in_ptr_ + offset, out_ptr_ + offset, size, para_->quant_arg_); if (ret != RET_OK) { MS_LOG(ERROR) << "Run failed, illegal input! "; return ret; @@ -117,4 +129,12 @@ kernel::LiteKernel *CpuArithmeticSelfInt8KernelCreator(const std::vector(parameter); + para_ = reinterpret_cast(parameter); } ~ArithmeticSelfInt8CPUKernel() override = default; @@ -65,7 +97,7 @@ class ArithmeticSelfInt8CPUKernel : public LiteKernel { int thread_sz_count_; int thread_sz_stride_; size_t data_size_; - ArithmeticSelfParameter *arithmeticSelfParameter_; + ArithmeticSelfParameter *para_; ArithmeticSelfInt8Run arithmeticSelf_run_; const Context *ctx_; int8_t *in_ptr_; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc new file mode 100644 index 0000000000..63a621f0dc --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc @@ -0,0 +1,92 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/runtime/kernel/arm/int8/split_int8.h" +#include +#include "src/runtime/kernel/arm/opclib/split_parameter.h" +#include "src/runtime/kernel/arm/opclib/int8/split_int8.h" +#include "include/errorcode.h" +#include "src/runtime/runtime_api.h" + +using mindspore::kernel::KERNEL_ARCH::kCPU; +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; + +namespace mindspore::kernel { + +int SplitInt8CPUKernel::Init() { + SplitBaseCPUKernel::Init(); + auto in_tensor = inputs_.at(kInputIndex); + input_ptr_ = reinterpret_cast(in_tensor->Data()); + for (int i = 0; i < param->num_split_; i++) { + output_ptr_.push_back(reinterpret_cast(outputs_.at(i)->Data())); + } + + auto in_quant_args = in_tensor->GetQuantParams(); + param->quant_arg_.in_args_.scale_ = in_quant_args.front().scale; + param->quant_arg_.in_args_.zp_ = in_quant_args.front().zeroPoint; + + MS_ASSERT(param->num_split_ == outputs_.size()); + for (int i = 0; i < param->num_split_; i++) { + auto *out_tensor = outputs_.at(i); + auto out_quant_args = out_tensor->GetQuantParams(); + param->quant_arg_.out_args_[i].scale_ = out_quant_args.front().scale; + param->quant_arg_.out_args_[i].zp_ = out_quant_args.front().zeroPoint; + } + + param->quant_arg_.output_activation_max_ = std::numeric_limits::max(); + param->quant_arg_.output_activation_min_ = std::numeric_limits::min(); + + return RET_OK; +} + +int SplitInt8CPUKernel::ReSize() { return RET_OK; } + +int SplitInt8CPUKernel::Split(int task_id) { + int num_unit_thread = MSMIN(thread_n_stride_, num_unit_ - task_id * thread_n_stride_); + if (num_unit_thread <= 0) { + return RET_OK; + } + int thread_offset = task_id * thread_n_stride_; + auto ret = + DoSplit(input_ptr_, output_ptr_.data(), inputs_.front()->shape().data(), thread_offset, num_unit_thread, param); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Split error task_id[" << task_id << "] error_code[" << ret << "]"; + return RET_ERROR; + } + return RET_OK; +} + +int SplitInt8Run(int task_id, LiteParallelGroupEnv *penv, void *cdata) { + auto g_kernel = reinterpret_cast(cdata); + auto ret = g_kernel->Split(task_id); + if (ret != RET_OK) { + MS_LOG(ERROR) << "SplitRun error task_id[" << task_id << "] error_code[" << ret << "]"; + return RET_ERROR; + } + return RET_OK; +} + +int SplitInt8CPUKernel::Run() { + int ret = LiteBackendParallelLaunch(SplitInt8Run, this, thread_n_num_); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Scale error error_code[" << ret << "]"; + return RET_ERROR; + } + + return RET_OK; +} +} // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h new file mode 100644 index 0000000000..501d595634 --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_SPLIT_INT8_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_SPLIT_INT8_H_ + +#include +#include "src/lite_kernel.h" +#include "include/context.h" +#include "src/runtime/kernel/arm/base/split_base.h" +#include "src/runtime/runtime_api.h" + +using mindspore::lite::Context; + +namespace mindspore::kernel { +class SplitInt8CPUKernel : public SplitBaseCPUKernel { + public: + SplitInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const Context *ctx) + : SplitBaseCPUKernel(parameter, inputs, outputs, ctx) {} + ~SplitInt8CPUKernel() = default; + + int Init() override; + int ReSize() override; + int Run() override; + int Split(int tId); + + private: + int8_t *input_ptr_; + std::vector output_ptr_; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_SPLIT_INT8_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.cc b/mindspore/lite/src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.cc index 2994936865..d18da08e38 100644 --- a/mindspore/lite/src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.cc @@ -16,77 +16,263 @@ #include #include "src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.h" +#ifdef ENABLE_NEON +#include +#include "src/runtime/kernel/arm/opclib/add_int8.h" +#endif +#include "src/runtime/kernel/arm/opclib/quantization/fixed_point.h" int ElementFloor(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para) { - if (para.in_args_.scale_ == para.out_args_.scale_ && para.in_args_.zp_ == para.out_args_.zp_) { - for (int i = 0; i < element_size; i++) { - output[i] = floorf(input[i]); - } - } else { - float in_scale = para.in_args_.scale_; - int32_t in_zp = para.in_args_.zp_; - float out_scale = para.out_args_.scale_; - int32_t out_zp = para.out_args_.zp_; - float bias = -in_zp * in_scale; - for (int i = 0; i < element_size; i++) { - int32_t output_tmp = round(floorf(input[i] * in_scale + bias) / out_scale) + out_zp; - if (output_tmp > para.output_activation_max_) { - output[i] = para.output_activation_max_; - } else if (output_tmp < para.output_activation_min_) { - output[i] = para.output_activation_min_; - } else { - output[i] = static_cast(output_tmp); - } + float in_scale = para.in_args_.scale_; + int32_t in_zp = para.in_args_.zp_; + float out_scale = para.out_args_.scale_; + int32_t out_zp = para.out_args_.zp_; + float bias = in_zp * in_scale; + for (int i = 0; i < element_size; i++) { + int32_t output_tmp = round(floorf(input[i] * in_scale + bias) / out_scale) + out_zp; + if (output_tmp > para.output_activation_max_) { + output[i] = para.output_activation_max_; + } else if (output_tmp < para.output_activation_min_) { + output[i] = para.output_activation_min_; + } else { + output[i] = static_cast(output_tmp); } } return OPCLIB_OK; } int ElementRound(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para) { - if (para.in_args_.scale_ == para.out_args_.scale_ && para.in_args_.zp_ == para.out_args_.zp_) { - for (int i = 0; i < element_size; i++) { - output[i] = round(input[i]); - } - } else { - float in_scale = para.in_args_.scale_; - int32_t in_zp = para.in_args_.zp_; - float out_scale = para.out_args_.scale_; - int32_t out_zp = para.out_args_.zp_; - float bias = -in_zp * in_scale; - for (int i = 0; i < element_size; i++) { - int32_t output_tmp = round(round(input[i] * in_scale + bias) / out_scale) + out_zp; - if (output_tmp > para.output_activation_max_) { - output[i] = para.output_activation_max_; - } else if (output_tmp < para.output_activation_min_) { - output[i] = para.output_activation_min_; - } else { - output[i] = static_cast(output_tmp); - } + float in_scale = para.in_args_.scale_; + int32_t in_zp = para.in_args_.zp_; + float out_scale = para.out_args_.scale_; + int32_t out_zp = para.out_args_.zp_; + float bias = in_zp * in_scale; + for (int i = 0; i < element_size; i++) { + int32_t output_tmp = round(round(input[i] * in_scale + bias) / out_scale) + out_zp; + if (output_tmp > para.output_activation_max_) { + output[i] = para.output_activation_max_; + } else if (output_tmp < para.output_activation_min_) { + output[i] = para.output_activation_min_; + } else { + output[i] = static_cast(output_tmp); } } return OPCLIB_OK; } int ElementCeil(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para) { - if (para.in_args_.scale_ == para.out_args_.scale_ && para.in_args_.zp_ == para.out_args_.zp_) { - for (int i = 0; i < element_size; i++) { - output[i] = ceil(input[i]); + float in_scale = para.in_args_.scale_; + int32_t in_zp = para.in_args_.zp_; + float out_scale = para.out_args_.scale_; + int32_t out_zp = para.out_args_.zp_; + float bias = in_zp * in_scale; + for (int i = 0; i < element_size; i++) { + int32_t output_tmp = round(ceil(input[i] * in_scale + bias) / out_scale) + out_zp; + if (output_tmp > para.output_activation_max_) { + output[i] = para.output_activation_max_; + } else if (output_tmp < para.output_activation_min_) { + output[i] = para.output_activation_min_; + } else { + output[i] = static_cast(output_tmp); + } + } + return OPCLIB_OK; +} + +int ElementAbs(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para) { + float in_scale = para.in_args_.scale_; + int32_t in_zp = para.in_args_.zp_; + float out_scale = para.out_args_.scale_; + int32_t out_zp = para.out_args_.zp_; + float bias = in_zp * in_scale; + for (int i = 0; i < element_size; i++) { + int32_t output_tmp = round(fabsf(input[i] * in_scale + bias) / out_scale) + out_zp; + if (output_tmp > para.output_activation_max_) { + output[i] = para.output_activation_max_; + } else if (output_tmp < para.output_activation_min_) { + output[i] = para.output_activation_min_; + } else { + output[i] = static_cast(output_tmp); + } + } + return OPCLIB_OK; +} + +int ElementSin(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para) { + float in_scale = para.in_args_.scale_; + int32_t in_zp = para.in_args_.zp_; + float out_scale = para.out_args_.scale_; + int32_t out_zp = para.out_args_.zp_; + float bias = in_zp * in_scale; + for (int i = 0; i < element_size; i++) { + int32_t output_tmp = round(sinf(input[i] * in_scale + bias) / out_scale) + out_zp; + if (output_tmp > para.output_activation_max_) { + output[i] = para.output_activation_max_; + } else if (output_tmp < para.output_activation_min_) { + output[i] = para.output_activation_min_; + } else { + output[i] = static_cast(output_tmp); + } + } + return OPCLIB_OK; +} + +int ElementCos(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para) { + float in_scale = para.in_args_.scale_; + int32_t in_zp = para.in_args_.zp_; + float out_scale = para.out_args_.scale_; + int32_t out_zp = para.out_args_.zp_; + float bias = in_zp * in_scale; + for (int i = 0; i < element_size; i++) { + int32_t output_tmp = round(cosf(input[i] * in_scale + bias) / out_scale) + out_zp; + if (output_tmp > para.output_activation_max_) { + output[i] = para.output_activation_max_; + } else if (output_tmp < para.output_activation_min_) { + output[i] = para.output_activation_min_; + } else { + output[i] = static_cast(output_tmp); + } + } + return OPCLIB_OK; +} + +int ElementLog(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para) { + float in_scale = para.in_args_.scale_; + int32_t in_zp = para.in_args_.zp_; + float out_scale = para.out_args_.scale_; + int32_t out_zp = para.out_args_.zp_; + float bias = in_zp * in_scale; + for (int i = 0; i < element_size; i++) { + int32_t output_tmp = round(logf(input[i] * in_scale + bias) / out_scale) + out_zp; + if (output_tmp > para.output_activation_max_) { + output[i] = para.output_activation_max_; + } else if (output_tmp < para.output_activation_min_) { + output[i] = para.output_activation_min_; + } else { + output[i] = static_cast(output_tmp); + } + } + return OPCLIB_OK; +} + +int ElementSqrt(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para) { + float in_scale = para.in_args_.scale_; + int32_t in_zp = para.in_args_.zp_; + float out_scale = para.out_args_.scale_; + int32_t out_zp = para.out_args_.zp_; + float bias = in_zp * in_scale; + for (int i = 0; i < element_size; i++) { + float input_f32 = input[i] * in_scale + bias; + if (input_f32 < 0) { + return OPCLIB_ERRCODE_SQRT_NEGATIVE; } - } else { - float in_scale = para.in_args_.scale_; - int32_t in_zp = para.in_args_.zp_; - float out_scale = para.out_args_.scale_; - int32_t out_zp = para.out_args_.zp_; - float bias = -in_zp * in_scale; - for (int i = 0; i < element_size; i++) { - int32_t output_tmp = round(ceil(input[i] * in_scale + bias) / out_scale) + out_zp; - if (output_tmp > para.output_activation_max_) { - output[i] = para.output_activation_max_; - } else if (output_tmp < para.output_activation_min_) { - output[i] = para.output_activation_min_; - } else { - output[i] = static_cast(output_tmp); - } + int32_t output_tmp = round(sqrtf(input_f32) / out_scale) + out_zp; + if (output_tmp > para.output_activation_max_) { + output[i] = para.output_activation_max_; + } else if (output_tmp < para.output_activation_min_) { + output[i] = para.output_activation_min_; + } else { + output[i] = static_cast(output_tmp); + } + } + return OPCLIB_OK; +} + +int ElementRsqrt(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para) { + float in_scale = para.in_args_.scale_; + int32_t in_zp = para.in_args_.zp_; + float out_scale = para.out_args_.scale_; + int32_t out_zp = para.out_args_.zp_; + float bias = in_zp * in_scale; + for (int i = 0; i < element_size; i++) { + float input_f32 = input[i] * in_scale + bias; + if (input_f32 <= 0) { + return OPCLIB_ERRCODE_RSQRT_NEGATIVE_OR_ZERO; + } + int32_t output_tmp = round(1.f / (sqrtf(input_f32) * out_scale)) + out_zp; + if (output_tmp > para.output_activation_max_) { + output[i] = para.output_activation_max_; + } else if (output_tmp < para.output_activation_min_) { + output[i] = para.output_activation_min_; + } else { + output[i] = static_cast(output_tmp); + } + } + return OPCLIB_OK; +} + +#ifdef ENABLE_NEON + +int16x4_t ClacSumHalfWord(int32x4_t scaled_input, int32x4_t left_shift_out_vec, int32x4_t output_multiplier_vec, + ArithSelfQuantArg para) { + int32x4_t input_scale = vmulq_s32(scaled_input, scaled_input); + int32x4_t raw_sum = RoundingDivideByPOTInt32x4( + SaturatingRoundingDoublingHighMulInt32x4(vmulq_s32(input_scale, left_shift_out_vec), output_multiplier_vec), + para.shift_right_); + raw_sum = vaddq_s32(raw_sum, vdupq_n_s32(para.out_args_.zp_)); + raw_sum = vmaxq_s32(raw_sum, vdupq_n_s32(para.output_activation_min_)); + raw_sum = vminq_s32(raw_sum, vdupq_n_s32(para.output_activation_max_)); + return vqmovn_s32(raw_sum); +} + +void SquareInt8NEON(int8_t *input_data, int8_t *output_data, int64_t element_size, ArithSelfQuantArg para, int *index) { + int32x4_t output_multiplier_vec = vdupq_n_s32(para.output_multiplier_); + int32x4_t left_shift_out_vec = vdupq_n_s32(1 << para.shift_left_); + + for (; (*index) <= element_size - 8; (*index) += 8) { + int16x8_t input_val = LoadAndAddOffset(input_data, *index, para.in_args_.zp_); + int32x4_t input_low = vmovl_s16(vget_low_s16(input_val)); + int32x4_t input_high = vmovl_s16(vget_high_s16(input_val)); + + int16x4_t sum_low = ClacSumHalfWord(input_low, left_shift_out_vec, output_multiplier_vec, para); + int16x4_t sum_high = ClacSumHalfWord(input_high, left_shift_out_vec, output_multiplier_vec, para); + + int16x8_t res_s16 = vcombine_s16(sum_low, sum_high); + int8x8_t res_u8_n0 = vqmovn_s16(res_s16); + vst1_s8(output_data, res_u8_n0); + } +} +#endif + +int ElementSquare(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para) { + int32_t in_zp = para.in_args_.zp_; + int32_t out_zp = para.out_args_.zp_; + + int index = 0; +#ifdef ENABLE_NEON + SquareInt8NEON(input, output, element_size, para, &index); +#endif + for (; index < element_size; index++) { + const int32_t input_val = input[index] + in_zp; + int32_t output_tmp = RoundingDivideByPOT( + SaturatingRoundingDoublingHighMul(input_val * input_val * (1 << para.shift_left_), para.output_multiplier_), + para.shift_right_); + output_tmp += out_zp; + if (output_tmp > para.output_activation_max_) { + output[index] = para.output_activation_max_; + } else if (output_tmp < para.output_activation_min_) { + output[index] = para.output_activation_min_; + } else { + output[index] = static_cast(output_tmp); + } + } + return OPCLIB_OK; +} + +int ElementLogicalNot(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para) { + float in_scale = para.in_args_.scale_; + int32_t in_zp = para.in_args_.zp_; + float out_scale = para.out_args_.scale_; + int32_t out_zp = para.out_args_.zp_; + float bias = in_zp * in_scale; + for (int i = 0; i < element_size; i++) { + int32_t output_tmp = round(((float)(!(bool)(input[i] * in_scale + bias))) / out_scale) + out_zp; + if (output_tmp > para.output_activation_max_) { + output[i] = para.output_activation_max_; + } else if (output_tmp < para.output_activation_min_) { + output[i] = para.output_activation_min_; + } else { + output[i] = static_cast(output_tmp); } } return OPCLIB_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.h b/mindspore/lite/src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.h index bd75f83d07..a9880f82e0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.h @@ -27,6 +27,22 @@ int ElementRound(int8_t *input, int8_t *output, int element_size, ArithSelfQuant int ElementFloor(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); -int ElementCeil(int8_t *input, int8_t *output, int number, ArithSelfQuantArg para); +int ElementCeil(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int ElementAbs(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int ElementSin(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int ElementCos(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int ElementLog(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int ElementSqrt(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int ElementRsqrt(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int ElementSquare(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int ElementLogicalNot(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_ARITHMETIC_SELF_INT8_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/opclib/int8/split_int8.cc b/mindspore/lite/src/runtime/kernel/arm/opclib/int8/split_int8.cc new file mode 100644 index 0000000000..5ba3db9a5f --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/opclib/int8/split_int8.cc @@ -0,0 +1,73 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/runtime/kernel/arm/opclib/int8/split_int8.h" +#include "src/runtime/kernel/arm/opclib/split_parameter.h" +#include +#include "src/runtime/kernel/arm/opclib/errorcode.h" + +int DoSplit(int8_t *in_data, int8_t **out_data, const int *input_shape, int offset, int num_unit, + SplitParameter *param) { + if (in_data == nullptr || out_data == nullptr) { + return OPCLIB_ERR; + } + int num_split = param->num_split_; + int *split_sizes = param->split_sizes_; + int *strides = param->strides_; + int split_dim = param->split_dim_; + int in_stride = strides[split_dim]; + + int stride_per_split = in_stride * input_shape[split_dim]; + int split_which = offset % num_split; + int split_times = offset / num_split; + int8_t *src = in_data + split_times * stride_per_split; + for (int i = 0; i < split_which; i++) { + src += split_sizes[i] * in_stride; + } + + QuantArg in_quant_arg = param->quant_arg_.in_args_; + float in_scale = in_quant_arg.scale_; + int32_t in_zp = in_quant_arg.zp_; + QuantArg *out_quant_arg = param->quant_arg_.out_args_; + + for (int i = offset; i < offset + num_unit; i++) { + split_which = i % num_split; + split_times = i / num_split; + int copy_size = split_sizes[split_which] * in_stride; + int8_t *dst = out_data[split_which] + split_times * copy_size; + float out_scale = out_quant_arg[split_which].scale_; + int32_t out_zp = out_quant_arg[split_which].zp_; + if (in_scale == out_scale && in_zp == out_zp) { + (void)memcpy(dst, src, copy_size * sizeof(int8_t)); + } else { + float scale = in_scale / out_scale; + float bias = -in_zp * scale; + for (int j = 0; j < copy_size; j++) { + int32_t output_tmp = round(src[j] * scale + bias) + out_zp; + if (output_tmp > param->quant_arg_.output_activation_max_) { + dst[j] = param->quant_arg_.output_activation_max_; + } else if (output_tmp < param->quant_arg_.output_activation_min_) { + dst[j] = param->quant_arg_.output_activation_min_; + } else { + dst[j] = static_cast(output_tmp); + } + } + } + src += copy_size; + } + + return OPCLIB_OK; +} diff --git a/mindspore/lite/src/runtime/kernel/arm/opclib/int8/split_int8.h b/mindspore/lite/src/runtime/kernel/arm/opclib/int8/split_int8.h new file mode 100644 index 0000000000..4357137c3b --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/opclib/int8/split_int8.h @@ -0,0 +1,25 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_SPLIT_INT8_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_SPLIT_INT8_H_ +#include "src/runtime/kernel/arm/opclib/op_base.h" +#include "src/runtime/kernel/arm/opclib/split_parameter.h" + +int DoSplit(int8_t *in_data, int8_t **out_data, const int *input_shape, int offset, int num_unit, + SplitParameter *split_param); + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_SPLIT_INT8_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/opclib/quantization/quantize.h b/mindspore/lite/src/runtime/kernel/arm/opclib/quantization/quantize.h index a0d78631f1..592c30ecbe 100644 --- a/mindspore/lite/src/runtime/kernel/arm/opclib/quantization/quantize.h +++ b/mindspore/lite/src/runtime/kernel/arm/opclib/quantization/quantize.h @@ -89,6 +89,16 @@ struct ArithSelfQuantArg { QuantArg out_args_; int output_activation_min_; int output_activation_max_; + int output_multiplier_; + int shift_left_; + int shift_right_; +}; + +struct SplitQuantArg { + QuantArg in_args_; + QuantArg out_args_[20]; + int output_activation_min_; + int output_activation_max_; }; void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift); diff --git a/mindspore/lite/src/runtime/kernel/arm/opclib/split.cc b/mindspore/lite/src/runtime/kernel/arm/opclib/split.cc index 2a51702d41..de74f6f621 100644 --- a/mindspore/lite/src/runtime/kernel/arm/opclib/split.cc +++ b/mindspore/lite/src/runtime/kernel/arm/opclib/split.cc @@ -15,6 +15,7 @@ */ #include "src/runtime/kernel/arm/opclib/split.h" +#include "src/runtime/kernel/arm/opclib/split_parameter.h" #include #include "src/runtime/kernel/arm/opclib/errorcode.h" diff --git a/mindspore/lite/src/runtime/kernel/arm/opclib/split.h b/mindspore/lite/src/runtime/kernel/arm/opclib/split.h index 3297a6afae..464442c25a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/opclib/split.h +++ b/mindspore/lite/src/runtime/kernel/arm/opclib/split.h @@ -18,16 +18,7 @@ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_SPLIT_H_ #include "src/runtime/kernel/arm/opclib/op_base.h" - -struct SplitParameter { - OpParameter op_parameter_; - int num_split_; - int split_sizes_[20] = {0}; - int strides_[8]; - int split_dim_; - int n_dims_; - int split_count_; -}; +#include "src/runtime/kernel/arm/opclib/split_parameter.h" int DoSplit(float *in_data, float **out_data, const int *input_shape, int offset, int num_unit, SplitParameter *split_param); diff --git a/mindspore/lite/src/runtime/kernel/arm/opclib/split_parameter.h b/mindspore/lite/src/runtime/kernel/arm/opclib/split_parameter.h new file mode 100644 index 0000000000..ac22028916 --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/opclib/split_parameter.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_SPLIT_PARAMETER_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_SPLIT_PARAMETER_H_ + +#include "src/runtime/kernel/arm/opclib/op_base.h" + +struct SplitParameter { + OpParameter op_parameter_; + SplitQuantArg quant_arg_; + int num_split_; + int split_sizes_[20] = {0}; + int strides_[20]; + int split_dim_; + int n_dims_; + int split_count_; +}; + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_SPLIT_PARAMETER_H_ diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc index d291a8e2d5..c64560513f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc @@ -383,4 +383,594 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant1_thread2) { delete ctx; } +TEST_F(TestArithmeticSelfInt8, abs_quant0_thread0) { + std::vector input1 = {-1, -2, -3, -4, -5, -6, 7, 8, 9, 10, 11, 12}; + std::vector shape1 = {2, 3, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output_size = 12; + int8_t output[12]; + std::vector output_shape = {2, 3, 2}; + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 1.0; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 1.0; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + output0_tensor->SetData(output); + output0_tensor->set_shape(output_shape); + output0_tensor->AddQuantParam(output_quant_arg); + output0_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(1); + outputs_tensor[0] = output0_tensor; + + ArithmeticSelfParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_Abs; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 1; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Abs}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output_tensor_shape = output0_tensor->shape(); + ASSERT_EQ(output_tensor_shape, output_shape); + kernel->Run(); + + std::vector except_result = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + PrintData("output data", output, output_size); + PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size()); + CompareOutputData(output, except_result.data(), output_size, 0.000001); + + input_tensor1->SetData(nullptr); + output0_tensor->SetData(nullptr); + delete input_tensor1; + delete output0_tensor; + delete ctx; +} + +TEST_F(TestArithmeticSelfInt8, abs_quant1_thread2) { + std::vector input1 = {-1, -2, -3, -4, -5, -6, 7, 8, 9, 10, 11, 12}; + std::vector shape1 = {2, 3, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output_size = 12; + int8_t output[12]; + std::vector output_shape = {2, 3, 2}; + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 0.8; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 1.5; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + output0_tensor->SetData(output); + output0_tensor->set_shape(output_shape); + output0_tensor->AddQuantParam(output_quant_arg); + output0_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(1); + outputs_tensor[0] = output0_tensor; + + ArithmeticSelfParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_Abs; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 2; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Abs}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output_tensor_shape = output0_tensor->shape(); + ASSERT_EQ(output_tensor_shape, output_shape); + kernel->Run(); + + std::vector except_result = {1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6}; + PrintData("output data", output, output_size); + PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size()); + CompareOutputData(output, except_result.data(), output_size, 0.000001); + + input_tensor1->SetData(nullptr); + output0_tensor->SetData(nullptr); + delete input_tensor1; + delete output0_tensor; + delete ctx; +} + +TEST_F(TestArithmeticSelfInt8, sin_quant0_thread2) { + std::vector input1 = {1, 2, 3, 4}; + std::vector shape1 = {2, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output_size = 4; + int8_t output[4]; + std::vector output_shape = {2, 2}; + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 1.0; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 1.0; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + output0_tensor->SetData(output); + output0_tensor->set_shape(output_shape); + output0_tensor->AddQuantParam(output_quant_arg); + output0_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(1); + outputs_tensor[0] = output0_tensor; + + ArithmeticSelfParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_Sin; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 2; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Sin}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output_tensor_shape = output0_tensor->shape(); + ASSERT_EQ(output_tensor_shape, output_shape); + kernel->Run(); + + std::vector except_result = {1, 1, 0, -1}; + PrintData("output data", output, output_size); + PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size()); + CompareOutputData(output, except_result.data(), output_size, 0.000001); + + input_tensor1->SetData(nullptr); + output0_tensor->SetData(nullptr); + delete input_tensor1; + delete output0_tensor; + delete ctx; +} + +TEST_F(TestArithmeticSelfInt8, cos_quant0_thread2) { + std::vector input1 = {1, 2, 3, 4}; + std::vector shape1 = {2, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output_size = 4; + int8_t output[4]; + std::vector output_shape = {2, 2}; + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 1.0; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 1.0; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + output0_tensor->SetData(output); + output0_tensor->set_shape(output_shape); + output0_tensor->AddQuantParam(output_quant_arg); + output0_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(1); + outputs_tensor[0] = output0_tensor; + + ArithmeticSelfParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_Cos; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 2; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Cos}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output_tensor_shape = output0_tensor->shape(); + ASSERT_EQ(output_tensor_shape, output_shape); + kernel->Run(); + + std::vector except_result = {1, 0, -1, -1}; + PrintData("output data", output, output_size); + PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size()); + CompareOutputData(output, except_result.data(), output_size, 0.000001); + + input_tensor1->SetData(nullptr); + output0_tensor->SetData(nullptr); + delete input_tensor1; + delete output0_tensor; + delete ctx; +} + +TEST_F(TestArithmeticSelfInt8, log_quant0_thread2) { + std::vector input1 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + std::vector shape1 = {2, 3, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output_size = 12; + int8_t output[12]; + std::vector output_shape = {2, 3, 2}; + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 1.0; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 1.0; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + output0_tensor->SetData(output); + output0_tensor->set_shape(output_shape); + output0_tensor->AddQuantParam(output_quant_arg); + output0_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(1); + outputs_tensor[0] = output0_tensor; + + ArithmeticSelfParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_Log; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 2; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Log}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output_tensor_shape = output0_tensor->shape(); + ASSERT_EQ(output_tensor_shape, output_shape); + kernel->Run(); + + std::vector except_result = {0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2}; + PrintData("output data", output, output_size); + PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size()); + CompareOutputData(output, except_result.data(), output_size, 0.000001); + + input_tensor1->SetData(nullptr); + output0_tensor->SetData(nullptr); + delete input_tensor1; + delete output0_tensor; + delete ctx; +} + +TEST_F(TestArithmeticSelfInt8, sqrt_quant0_thread2) { + std::vector input1 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + std::vector shape1 = {2, 3, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output_size = 12; + int8_t output[12]; + std::vector output_shape = {2, 3, 2}; + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 1.0; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 1.0; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + output0_tensor->SetData(output); + output0_tensor->set_shape(output_shape); + output0_tensor->AddQuantParam(output_quant_arg); + output0_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(1); + outputs_tensor[0] = output0_tensor; + + ArithmeticSelfParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_Sqrt; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 2; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Sqrt}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output_tensor_shape = output0_tensor->shape(); + ASSERT_EQ(output_tensor_shape, output_shape); + kernel->Run(); + + std::vector except_result = {1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3}; + PrintData("output data", output, output_size); + PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size()); + CompareOutputData(output, except_result.data(), output_size, 0.000001); + + input_tensor1->SetData(nullptr); + output0_tensor->SetData(nullptr); + delete input_tensor1; + delete output0_tensor; + delete ctx; +} + +TEST_F(TestArithmeticSelfInt8, rsqrt_quant0_thread2) { + std::vector input1 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + std::vector shape1 = {2, 3, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output_size = 12; + int8_t output[12]; + std::vector output_shape = {2, 3, 2}; + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 1.0; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 1.0; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + output0_tensor->SetData(output); + output0_tensor->set_shape(output_shape); + output0_tensor->AddQuantParam(output_quant_arg); + output0_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(1); + outputs_tensor[0] = output0_tensor; + + ArithmeticSelfParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_Rsqrt; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 2; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Rsqrt}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output_tensor_shape = output0_tensor->shape(); + ASSERT_EQ(output_tensor_shape, output_shape); + kernel->Run(); + + std::vector except_result = {1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0}; + PrintData("output data", output, output_size); + PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size()); + CompareOutputData(output, except_result.data(), output_size, 0.000001); + + input_tensor1->SetData(nullptr); + output0_tensor->SetData(nullptr); + delete input_tensor1; + delete output0_tensor; + delete ctx; +} + +TEST_F(TestArithmeticSelfInt8, square_quant0_thread2) { + std::vector input1 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + std::vector shape1 = {2, 3, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output_size = 12; + int8_t output[12]; + std::vector output_shape = {2, 3, 2}; + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 1.0; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 1.0; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + output0_tensor->SetData(output); + output0_tensor->set_shape(output_shape); + output0_tensor->AddQuantParam(output_quant_arg); + output0_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(1); + outputs_tensor[0] = output0_tensor; + + ArithmeticSelfParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_Square; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 2; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Square}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output_tensor_shape = output0_tensor->shape(); + ASSERT_EQ(output_tensor_shape, output_shape); + kernel->Run(); + + std::vector except_result = {1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 127}; + PrintData("output data", output, output_size); + PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size()); + CompareOutputData(output, except_result.data(), output_size, 0.000001); + + input_tensor1->SetData(nullptr); + output0_tensor->SetData(nullptr); + delete input_tensor1; + delete output0_tensor; + delete ctx; +} + +TEST_F(TestArithmeticSelfInt8, square_quant1_thread2) { + std::vector input1 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + std::vector shape1 = {2, 3, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output_size = 12; + int8_t output[12]; + std::vector output_shape = {2, 3, 2}; + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 0.8; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 1.5; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + output0_tensor->SetData(output); + output0_tensor->set_shape(output_shape); + output0_tensor->AddQuantParam(output_quant_arg); + output0_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(1); + outputs_tensor[0] = output0_tensor; + + ArithmeticSelfParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_Square; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 2; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Square}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output_tensor_shape = output0_tensor->shape(); + ASSERT_EQ(output_tensor_shape, output_shape); + kernel->Run(); + + std::vector except_result = {1, 2, 4, 7, 11, 16, 21, 28, 35, 43, 52, 62}; + PrintData("output data", output, output_size); + PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size()); + CompareOutputData(output, except_result.data(), output_size, 0.000001); + + input_tensor1->SetData(nullptr); + output0_tensor->SetData(nullptr); + delete input_tensor1; + delete output0_tensor; + delete ctx; +} + +TEST_F(TestArithmeticSelfInt8, logical_not_quant0_thread2) { + std::vector input1 = {1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0}; + std::vector shape1 = {2, 3, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output_size = 12; + int8_t output[12]; + std::vector output_shape = {2, 3, 2}; + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 1.0; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 1.0; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output0_tensor = new lite::tensor::Tensor; + output0_tensor->SetData(output); + output0_tensor->set_shape(output_shape); + output0_tensor->AddQuantParam(output_quant_arg); + output0_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(1); + outputs_tensor[0] = output0_tensor; + + ArithmeticSelfParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_LogicalNot; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 2; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_LogicalNot}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output_tensor_shape = output0_tensor->shape(); + ASSERT_EQ(output_tensor_shape, output_shape); + kernel->Run(); + + std::vector except_result = {0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1}; + PrintData("output data", output, output_size); + PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size()); + CompareOutputData(output, except_result.data(), output_size, 0.000001); + + input_tensor1->SetData(nullptr); + output0_tensor->SetData(nullptr); + delete input_tensor1; + delete output0_tensor; + delete ctx; +} + } // namespace mindspore diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc new file mode 100644 index 0000000000..493d5f709b --- /dev/null +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc @@ -0,0 +1,305 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "utils/log_adapter.h" +#include "common/common_test.h" +#include "mindspore/lite/src/runtime/kernel/arm/opclib/split_parameter.h" +#include "mindspore/lite/src/kernel_registry.h" +#include "mindspore/lite/src/lite_kernel.h" +#include "mindspore/lite/src/ir/tensor.h" + +namespace mindspore { + +class TestSplitInt8 : public mindspore::Common { + public: + TestSplitInt8() {} +}; + +TEST_F(TestSplitInt8, Split_quant0_thread2) { + std::vector input1 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + std::vector shape1 = {2, 3, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output1_size = 4; + int8_t output1[4]; + const int output2_size = 8; + int8_t output2[8]; + std::vector output1_shape = {2, 1, 2}; + std::vector output2_shape = {2, 2, 2}; + + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 1.0; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 1.0; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output1_tensor = new lite::tensor::Tensor; + output1_tensor->SetData(output1); + output1_tensor->set_shape(output1_shape); + output1_tensor->AddQuantParam(output_quant_arg); + output1_tensor->set_data_type(tid_int8); + lite::tensor::Tensor *output2_tensor = new lite::tensor::Tensor; + output2_tensor->SetData(output2); + output2_tensor->set_shape(output2_shape); + output2_tensor->AddQuantParam(output_quant_arg); + output2_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(2); + outputs_tensor[0] = output1_tensor; + outputs_tensor[1] = output2_tensor; + + SplitParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_Split; + op_param.num_split_ = 2; + op_param.split_dim_ = 1; + op_param.split_sizes_[0] = 1; + op_param.split_sizes_[1] = 2; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 2; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Split}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output1_tensor_shape = output1_tensor->shape(); + auto output2_tensor_shape = output2_tensor->shape(); + ASSERT_EQ(output1_tensor_shape, output1_shape); + ASSERT_EQ(output2_tensor_shape, output2_shape); + kernel->Run(); + + std::vector except_result1 = {1, 2, 7, 8}; + std::vector except_result2 = {3, 4, 5, 6, 9, 10, 11, 12}; + PrintData("output data", output1, output1_size); + PrintData("output data shape", output1_tensor_shape.data(), output1_tensor_shape.size()); + PrintData("output data", output2, output2_size); + PrintData("output data shape", output2_tensor_shape.data(), output2_tensor_shape.size()); + CompareOutputData(output1, except_result1.data(), output1_size, 0.000001); + CompareOutputData(output2, except_result2.data(), output2_size, 0.000001); + + input_tensor1->SetData(nullptr); + output1_tensor->SetData(nullptr); + output2_tensor->SetData(nullptr); + delete input_tensor1; + delete output1_tensor; + delete output2_tensor; + delete ctx; +} + +TEST_F(TestSplitInt8, Split_quant0_thread2_num) { + std::vector input1 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + std::vector shape1 = {2, 3, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output1_size = 4; + int8_t output1[4]; + const int output2_size = 4; + int8_t output2[4]; + const int output3_size = 4; + int8_t output3[4]; + std::vector output1_shape = {2, 1, 2}; + std::vector output2_shape = {2, 1, 2}; + std::vector output3_shape = {2, 1, 2}; + + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 1.0; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 1.0; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output1_tensor = new lite::tensor::Tensor; + output1_tensor->SetData(output1); + output1_tensor->set_shape(output1_shape); + output1_tensor->AddQuantParam(output_quant_arg); + output1_tensor->set_data_type(tid_int8); + lite::tensor::Tensor *output2_tensor = new lite::tensor::Tensor; + output2_tensor->SetData(output2); + output2_tensor->set_shape(output2_shape); + output2_tensor->AddQuantParam(output_quant_arg); + output2_tensor->set_data_type(tid_int8); + lite::tensor::Tensor *output3_tensor = new lite::tensor::Tensor; + output3_tensor->SetData(output3); + output3_tensor->set_shape(output3_shape); + output3_tensor->AddQuantParam(output_quant_arg); + output3_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(3); + outputs_tensor[0] = output1_tensor; + outputs_tensor[1] = output2_tensor; + outputs_tensor[2] = output3_tensor; + + SplitParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_Split; + op_param.num_split_ = 3; + op_param.split_dim_ = 1; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 2; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Split}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output1_tensor_shape = output1_tensor->shape(); + auto output2_tensor_shape = output2_tensor->shape(); + auto output3_tensor_shape = output3_tensor->shape(); + ASSERT_EQ(output1_tensor_shape, output1_shape); + ASSERT_EQ(output2_tensor_shape, output2_shape); + ASSERT_EQ(output3_tensor_shape, output3_shape); + kernel->Run(); + + std::vector except_result1 = {1, 2, 7, 8}; + std::vector except_result2 = {3, 4, 9, 10}; + std::vector except_result3 = {5, 6, 11, 12}; + PrintData("output data", output1, output1_size); + PrintData("output data shape", output1_tensor_shape.data(), output1_tensor_shape.size()); + PrintData("output data", output2, output2_size); + PrintData("output data shape", output2_tensor_shape.data(), output2_tensor_shape.size()); + PrintData("output data", output3, output3_size); + PrintData("output data shape", output3_tensor_shape.data(), output3_tensor_shape.size()); + CompareOutputData(output1, except_result1.data(), output1_size, 0.000001); + CompareOutputData(output2, except_result2.data(), output2_size, 0.000001); + CompareOutputData(output3, except_result3.data(), output3_size, 0.000001); + + input_tensor1->SetData(nullptr); + output1_tensor->SetData(nullptr); + output2_tensor->SetData(nullptr); + output3_tensor->SetData(nullptr); + delete input_tensor1; + delete output1_tensor; + delete output2_tensor; + delete output3_tensor; + delete ctx; +} + +TEST_F(TestSplitInt8, Split_quant1_thread2_num) { + std::vector input1 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + std::vector shape1 = {2, 3, 2}; + std::vector input(1, nullptr); + input[0] = input1.data(); + + const int output1_size = 4; + int8_t output1[4]; + const int output2_size = 4; + int8_t output2[4]; + const int output3_size = 4; + int8_t output3[4]; + std::vector output1_shape = {2, 1, 2}; + std::vector output2_shape = {2, 1, 2}; + std::vector output3_shape = {2, 1, 2}; + + lite::tensor::QuantArg input_quant_arg; + input_quant_arg.scale = 1.0; + input_quant_arg.zeroPoint = 0; + lite::tensor::QuantArg output_quant_arg; + output_quant_arg.scale = 2.0; + output_quant_arg.zeroPoint = 0; + + TypeId tid_int8 = kNumberTypeInt8; + lite::tensor::Tensor *input_tensor1 = new lite::tensor::Tensor; + input_tensor1->SetData(input1.data()); + input_tensor1->set_shape(shape1); + input_tensor1->AddQuantParam(input_quant_arg); + input_tensor1->set_data_type(tid_int8); + std::vector inputs_tensor(1); + inputs_tensor[0] = input_tensor1; + + lite::tensor::Tensor *output1_tensor = new lite::tensor::Tensor; + output1_tensor->SetData(output1); + output1_tensor->set_shape(output1_shape); + output1_tensor->AddQuantParam(output_quant_arg); + output1_tensor->set_data_type(tid_int8); + lite::tensor::Tensor *output2_tensor = new lite::tensor::Tensor; + output2_tensor->SetData(output2); + output2_tensor->set_shape(output2_shape); + output2_tensor->AddQuantParam(output_quant_arg); + output2_tensor->set_data_type(tid_int8); + lite::tensor::Tensor *output3_tensor = new lite::tensor::Tensor; + output3_tensor->SetData(output3); + output3_tensor->set_shape(output3_shape); + output3_tensor->AddQuantParam(output_quant_arg); + output3_tensor->set_data_type(tid_int8); + std::vector outputs_tensor(3); + outputs_tensor[0] = output1_tensor; + outputs_tensor[1] = output2_tensor; + outputs_tensor[2] = output3_tensor; + + SplitParameter op_param; + op_param.op_parameter_.type_ = schema::PrimitiveType_Split; + op_param.num_split_ = 3; + op_param.split_dim_ = 1; + lite::Context *ctx = new lite::Context; + ctx->thread_num_ = 2; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Split}; + auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); + ASSERT_NE(creator, nullptr); + kernel::LiteKernel *kernel = + creator(inputs_tensor, outputs_tensor, reinterpret_cast(&op_param), ctx, desc); + ASSERT_NE(kernel, nullptr); + auto output1_tensor_shape = output1_tensor->shape(); + auto output2_tensor_shape = output2_tensor->shape(); + auto output3_tensor_shape = output3_tensor->shape(); + ASSERT_EQ(output1_tensor_shape, output1_shape); + ASSERT_EQ(output2_tensor_shape, output2_shape); + ASSERT_EQ(output3_tensor_shape, output3_shape); + kernel->Run(); + + std::vector except_result1 = {1, 1, 4, 4}; + std::vector except_result2 = {2, 2, 5, 5}; + std::vector except_result3 = {3, 3, 6, 6}; + PrintData("output data", output1, output1_size); + PrintData("output data shape", output1_tensor_shape.data(), output1_tensor_shape.size()); + PrintData("output data", output2, output2_size); + PrintData("output data shape", output2_tensor_shape.data(), output2_tensor_shape.size()); + PrintData("output data", output3, output3_size); + PrintData("output data shape", output3_tensor_shape.data(), output3_tensor_shape.size()); + CompareOutputData(output1, except_result1.data(), output1_size, 0.000001); + CompareOutputData(output2, except_result2.data(), output2_size, 0.000001); + CompareOutputData(output3, except_result3.data(), output3_size, 0.000001); + + input_tensor1->SetData(nullptr); + output1_tensor->SetData(nullptr); + output2_tensor->SetData(nullptr); + output3_tensor->SetData(nullptr); + delete input_tensor1; + delete output1_tensor; + delete output2_tensor; + delete output3_tensor; + delete ctx; +} + +} // namespace mindspore