Browse Source

!4411 Add arm op Slice for int8 and testcases

Merge pull request !4411 from wangminggui/master
tags/v0.7.0-beta
mindspore-ci-bot Gitee 5 years ago
parent
commit
d93bde32f1
12 changed files with 581 additions and 41 deletions
  1. +114
    -0
      mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc
  2. +44
    -0
      mindspore/lite/src/runtime/kernel/arm/base/slice_base.h
  3. +0
    -28
      mindspore/lite/src/runtime/kernel/arm/fp32/slice.cc
  4. +3
    -2
      mindspore/lite/src/runtime/kernel/arm/fp32/slice.h
  5. +101
    -0
      mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc
  6. +41
    -0
      mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h
  7. +1
    -11
      mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/slice.h
  8. +127
    -0
      mindspore/lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.c
  9. +32
    -0
      mindspore/lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.h
  10. +7
    -0
      mindspore/lite/src/runtime/kernel/arm/nnacl/quantization/quantize.h
  11. +36
    -0
      mindspore/lite/src/runtime/kernel/arm/nnacl/slice_parameter.h
  12. +75
    -0
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc

+ 114
- 0
mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc View File

@@ -0,0 +1,114 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/slice_base.h"
#include <vector>
#include "src/runtime/kernel/arm/int8/slice_int8.h"
#include "src/runtime/kernel/arm/fp32/slice.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"

using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Slice;

namespace mindspore::kernel {
int SliceBaseCPUKernel::Init() { return RET_OK; }

int SliceBaseCPUKernel::ReSize() {
auto input_shape = in_tensors_[0]->shape();
if (input_shape.size() > DIMENSION_4D) {
MS_LOG(ERROR) << "input dimension num should <= " << DIMENSION_4D;
return RET_ERROR;
}

for (size_t i = 0; i < input_shape.size(); ++i) {
param_->shape_[i] = input_shape[i];
}

if (param_->param_length_ < DIMENSION_4D) {
for (int i = param_->param_length_ - 1, j = 1; i >= 0; --i, ++j) {
param_->begin_[DIMENSION_4D - j] = param_->begin_[i];
param_->size_[DIMENSION_4D - j] = param_->size_[i];
}
for (size_t i = 0; i < DIMENSION_4D - param_->param_length_; i++) {
param_->begin_[i] = 0;
param_->size_[i] = 1;
}
}
param_->param_length_ = DIMENSION_4D;
for (int i = 0; i < DIMENSION_4D; ++i) {
if (param_->size_[i] < 0) {
param_->size_[i] = param_->shape_[i] - param_->begin_[i];
}
param_->end_[i] = param_->begin_[i] + param_->size_[i];
}

return RET_OK;
}

kernel::LiteKernel *CpuSliceInt8KernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,
OpParameter *opParameter, const lite::Context *ctx,
const kernel::KernelKey &desc, const lite::Primitive *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Slice);
auto *kernel = new (std::nothrow) SliceInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SliceInt8CPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
delete kernel;
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
return nullptr;
}
return kernel;
}

kernel::LiteKernel *CpuSliceFp32KernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,
OpParameter *opParameter, const lite::Context *ctx,
const kernel::KernelKey &desc, const lite::Primitive *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Slice);
auto *kernel = new (std::nothrow) SliceCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SliceCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
delete kernel;
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
return nullptr;
}
return kernel;
}

REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Slice, CpuSliceInt8KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Slice, CpuSliceFp32KernelCreator)
} // namespace mindspore::kernel

+ 44
- 0
mindspore/lite/src/runtime/kernel/arm/base/slice_base.h View File

@@ -0,0 +1,44 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_SLICE_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_SLICE_BASE_H_

#include <vector>
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/nnacl/slice_parameter.h"

namespace mindspore::kernel {
class SliceBaseCPUKernel : public LiteKernel {
public:
SliceBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx,
const lite::Primitive *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
param_ = reinterpret_cast<SliceParameter *>(op_parameter_);
}
~SliceBaseCPUKernel() = default;

int Init() override;
int ReSize() override;
int Run() override { return 0; }

protected:
SliceParameter *param_;
};
} // namespace mindspore::kernel

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_SLICE_BASE_H_

+ 0
- 28
mindspore/lite/src/runtime/kernel/arm/fp32/slice.cc View File

@@ -104,32 +104,4 @@ int SliceCPUKernel::Run() {
} }
return RET_OK; return RET_OK;
} }

kernel::LiteKernel *CpuSliceFp32KernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,
OpParameter *op_parameter, const lite::Context *ctx,
const kernel::KernelKey &desc, const lite::Primitive *primitive) {
if (op_parameter == nullptr) {
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
return nullptr;
}

MS_ASSERT(desc.type == schema::PrimitiveType_Slice);
auto *kernel = new (std::nothrow) SliceCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SliceCPUKernel fail!";
return nullptr;
}

auto ret = kernel->Init();
if (ret != RET_OK) {
delete kernel;
MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
return nullptr;
}
return kernel;
}

REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Slice, CpuSliceFp32KernelCreator)
} // namespace mindspore::kernel } // namespace mindspore::kernel

+ 3
- 2
mindspore/lite/src/runtime/kernel/arm/fp32/slice.h View File

@@ -18,14 +18,15 @@


#include <vector> #include <vector>
#include "src/lite_kernel.h" #include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/base/slice_base.h"


namespace mindspore::kernel { namespace mindspore::kernel {
class SliceCPUKernel : public LiteKernel {
class SliceCPUKernel : public SliceBaseCPUKernel {
public: public:
SliceCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs, SliceCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx, const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx,
const lite::Primitive *primitive) const lite::Primitive *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
: SliceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~SliceCPUKernel() = default; ~SliceCPUKernel() = default;


int Init() override; int Init() override;


+ 101
- 0
mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc View File

@@ -0,0 +1,101 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/runtime/kernel/arm/int8/slice_int8.h"
#include <limits>
#include "src/runtime/kernel/arm/nnacl/slice_parameter.h"
#include "src/runtime/kernel/arm/nnacl/int8/slice_int8.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"

using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;

namespace mindspore::kernel {

int SliceInt8CPUKernel::Init() {
auto ret = SliceBaseCPUKernel::Init();
if (ret != RET_OK) {
return ret;
}

auto input = in_tensors_.at(0);
auto output = out_tensors_.at(0);
MS_ASSERT(input);
MS_ASSERT(output);

auto in_quant_args = input->GetQuantParams();
param_->quant_arg_.in_args_.scale_ = in_quant_args.front().scale;
param_->quant_arg_.in_args_.zp_ = in_quant_args.front().zeroPoint;

auto out_quant_args = output->GetQuantParams();
param_->quant_arg_.out_args_.scale_ = out_quant_args.front().scale;
param_->quant_arg_.out_args_.zp_ = out_quant_args.front().zeroPoint;

param_->quant_arg_.output_activation_max_ = std::numeric_limits<int8_t>::max();
param_->quant_arg_.output_activation_min_ = std::numeric_limits<int8_t>::min();
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}

int SliceInt8CPUKernel::ReSize() { return SliceBaseCPUKernel::ReSize(); }

int SliceInt8CPUKernel::DoSlice(int task_id) {
const int8_t *input_data = reinterpret_cast<const int8_t *>(in_tensors_[0]->Data());
int8_t *output_data = reinterpret_cast<int8_t *>(out_tensors_[0]->Data());

param_->thread_id_ = task_id;
auto ret = SliceInt8(input_data, output_data, param_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "SliceInt8 error ,task_id[" << task_id << "] error_code[" << ret << "]";
}
return ret;
}

int SliceInt8Run(int task_id, LiteParallelGroupEnv *penv, void *cdata) {
auto slice_kernel = reinterpret_cast<SliceInt8CPUKernel *>(cdata);
auto ret = slice_kernel->DoSlice(task_id);
if (ret != RET_OK) {
MS_LOG(ERROR) << "DoSlice error, task_id[" << task_id << "] error_code[" << ret << "]";
}
return ret;
}

int SliceInt8CPUKernel::Run() {
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare failed.";
return ret;
}

const int8_t *input_data = reinterpret_cast<const int8_t *>(in_tensors_[0]->Data());
int8_t *output_data = reinterpret_cast<int8_t *>(out_tensors_[0]->Data());

if (param_->size_[1] < param_->op_parameter_.thread_num_) {
ret = SliceInt8NoParallel(input_data, output_data, param_);
} else {
ret = LiteBackendParallelLaunch(SliceInt8Run, this, op_parameter_->thread_num_);
}

if (ret != RET_OK) {
MS_LOG(ERROR) << "SliceInt8Run error, error_code[" << ret << "]";
}
return ret;
}
} // namespace mindspore::kernel

+ 41
- 0
mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h View File

@@ -0,0 +1,41 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_SLICE_INT8_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_SLICE_INT8_H_

#include <vector>
#include "src/runtime/kernel/arm/base/slice_base.h"
#include "src/runtime/kernel/arm/nnacl/quantization/quantize.h"

namespace mindspore::kernel {
class SliceInt8CPUKernel : public SliceBaseCPUKernel {
public:
SliceInt8CPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx,
const lite::Primitive *primitive)
: SliceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~SliceInt8CPUKernel() {
}

int Init() override;
int ReSize() override;
int Run() override;
int DoSlice(int task_id);
};
} // namespace mindspore::kernel

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_SLICE_INT8_H_

+ 1
- 11
mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/slice.h View File

@@ -17,17 +17,7 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_FP32_SLICE_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_FP32_SLICE_H_


#include "nnacl/op_base.h" #include "nnacl/op_base.h"
#define SLICE_SHAPE_MAX_SIZE 4

typedef struct SliceParameter {
OpParameter op_parameter_;
int32_t begin_[SLICE_SHAPE_MAX_SIZE];
int32_t end_[SLICE_SHAPE_MAX_SIZE];
int32_t size_[SLICE_SHAPE_MAX_SIZE];
int32_t shape_[SLICE_SHAPE_MAX_SIZE];
int32_t param_length_;
int32_t thread_id_;
} SliceParameter;
#include "nnacl/slice_parameter.h"


#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {


+ 127
- 0
mindspore/lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.c View File

@@ -0,0 +1,127 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "nnacl/int8/slice_int8.h"
#include <string.h>
#include "nnacl/quantization/fixed_point.h"
#include "nnacl/errorcode.h"

int SliceInt8NoParallel(const int8_t *input, int8_t *output, SliceParameter *param) {
int input_scale = param->quant_arg_.in_args_.scale_;
int input_zp = -param->quant_arg_.in_args_.zp_;
int output_scale = param->quant_arg_.in_args_.scale_;
int output_zp = -param->quant_arg_.out_args_.zp_;
int act_min = param->quant_arg_.output_activation_min_;
int act_max = param->quant_arg_.output_activation_max_;

int equal_quant = 0;
double multiplier = 0;
if (input_scale == output_scale && input_zp == output_zp) {
equal_quant = 1;
} else {
multiplier = input_scale / output_scale;
}

int32_t end_n = param->begin_[0] + param->size_[0];
int32_t end_h = param->begin_[1] + param->size_[1];
int32_t end_w = param->begin_[2] + param->size_[2];

int unit_count = param->size_[3];
int unit_size = unit_count * sizeof(int8_t);
int in_stride2 = param->shape_[3];
int in_stride1 = param->shape_[2] * in_stride2;
int in_stride0 = param->shape_[1] * in_stride1;
int out_offset = 0;
int n, h, w, c;

for (n = param->begin_[0]; n < end_n; ++n) {
size_t in_offset0 = n * in_stride0;
for (h = param->begin_[1]; h < end_h; ++h) {
size_t in_offset1 = h * in_stride1 + in_offset0;
for (w = param->begin_[2]; w < end_w; ++w) {
size_t in_offset = in_offset1 + w * in_stride2;
if (equal_quant == 1) {
memcpy(output + out_offset, input + in_offset, unit_size);
} else {
for (c = 0; c < unit_count; ++c) {
int32_t output_val = round(multiplier * (input[in_offset + c] + input_zp)) + output_zp;
output[c + out_offset] = (int8_t)MSMAX(act_min, MSMIN(output_val, act_max));
}
}
out_offset += unit_count;
}
}
}
return 0;
}

int SliceInt8(const int8_t *input, int8_t *output, SliceParameter *param) {
int input_scale = param->quant_arg_.in_args_.scale_;
int input_zp = -param->quant_arg_.in_args_.zp_;
int output_scale = param->quant_arg_.in_args_.scale_;
int output_zp = -param->quant_arg_.out_args_.zp_;
int act_min = param->quant_arg_.output_activation_min_;
int act_max = param->quant_arg_.output_activation_max_;

int32_t out_dim1 = param->size_[1];
int32_t out_dim2 = param->size_[2];
int32_t out_dim3 = param->size_[3];
int out_stride2 = out_dim3;
int out_stride1 = out_stride2 * out_dim2;
int out_stride0 = out_stride1 * out_dim1;
int count_per_thread = UP_DIV(out_dim1, param->op_parameter_.thread_num_);
int thread_id = param->thread_id_;
int thread_stride = thread_id * count_per_thread;
int unit_size = param->size_[3] * sizeof(int8_t);
int in_stride2 = param->shape_[3];
int in_stride1 = param->shape_[2] * in_stride2;
int in_stride0 = param->shape_[1] * in_stride1;
int n, h, w, c;

int equal_quant = 0;
double multiplier = 0;
if (input_scale == output_scale && input_zp == output_zp) {
equal_quant = 1;
} else {
multiplier = input_scale / output_scale;
}

for (n = 0; n< param->size_[0]; ++n) {
size_t out_offset0 = n * out_stride0;
size_t in_offset0 = (n+ param->begin_[0]) * in_stride0 + param->begin_[3];
for (h = 0; h < count_per_thread; ++h) {
size_t k = h + thread_stride;
if (k >= out_dim1) {
break;
}
size_t out_offset1 = k * out_stride1 + out_offset0;
size_t in_offset1 = (k + param->begin_[1]) * in_stride1 + in_offset0;
for (w = 0; w < out_dim2; ++w) {
size_t out_offset = out_offset1 + w * out_stride2;
size_t in_offset = in_offset1 + (w + param->begin_[2]) * in_stride2;
if (equal_quant == 1) {
memcpy(output + out_offset, input + in_offset, unit_size);
} else {
for (c = 0; c < out_dim3; ++c) {
int32_t output_val = round(multiplier * (input[in_offset + c] + input_zp)) + output_zp;
output[c + out_offset] = (int8_t)MSMAX(act_min, MSMIN(output_val, act_max));
}
}
}
}
}
return 0;
}

+ 32
- 0
mindspore/lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.h View File

@@ -0,0 +1,32 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_INT8_SLICE_INT8_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_INT8_SLICE_INT8_H_

#include "nnacl/op_base.h"
#include "nnacl/slice_parameter.h"

#ifdef __cplusplus
extern "C" {
#endif
int SliceInt8NoParallel(const int8_t*input, int8_t *output, SliceParameter *param);
int SliceInt8(const int8_t*input, int8_t *output, SliceParameter *param);
#ifdef __cplusplus
}
#endif

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_INT8_SLICE_INT8_H_


+ 7
- 0
mindspore/lite/src/runtime/kernel/arm/nnacl/quantization/quantize.h View File

@@ -239,6 +239,13 @@ typedef struct ReduceQuantArg {
int sum_square_right_shift_; int sum_square_right_shift_;
} ReduceQuantArg; } ReduceQuantArg;


typedef struct SliceQuantArg {
QuantArg in_args_;
QuantArg out_args_;
int output_activation_min_;
int output_activation_max_;
} SliceQuantArg;

#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif


+ 36
- 0
mindspore/lite/src/runtime/kernel/arm/nnacl/slice_parameter.h View File

@@ -0,0 +1,36 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_SLICE_PARAMETER_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_SLICE_PARAMETER_H_

#include "nnacl/op_base.h"
#include "nnacl/quantization/quantize.h"

#define SLICE_SHAPE_MAX_SIZE 4

typedef struct SliceParameter {
OpParameter op_parameter_;
SliceQuantArg quant_arg_;
int32_t begin_[SLICE_SHAPE_MAX_SIZE];
int32_t end_[SLICE_SHAPE_MAX_SIZE];
int32_t size_[SLICE_SHAPE_MAX_SIZE];
int32_t shape_[SLICE_SHAPE_MAX_SIZE];
int32_t param_length_;
int32_t thread_id_;
} SliceParameter;

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_SLICE_PARAMETER_H_

+ 75
- 0
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc View File

@@ -0,0 +1,75 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <iostream>
#include <memory>
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/include/context.h"

namespace mindspore {
class TestSliceInt8 : public mindspore::CommonTest {
public:
TestSliceInt8() {}
};

TEST_F(TestSliceInt8, SliceInt8) {
lite::tensor::Tensor in_tensor(kNumberTypeInt8, {1, 3, 2, 3});
lite::tensor::Tensor out_tensor(kNumberTypeInt8, {1, 1, 2, 3});

int8_t input_data[] = {105, 35, -27, 0, -63, 99, 16, 45, 67, -49, -115, 106, -98, 119, 103, 81, -114, 68};
int8_t output_data[12];
in_tensor.SetData(input_data);
out_tensor.SetData(output_data);

const lite::tensor::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::tensor::QuantArg quant_out = {0.00784314f, 0};
in_tensor.AddQuantParam(quant_in0);
out_tensor.AddQuantParam(quant_out);

std::vector<lite::tensor::Tensor *> inputs = {&in_tensor};
std::vector<lite::tensor::Tensor *> outputs = {&out_tensor};

SliceParameter parameter;
parameter.begin_[0] = 1;
parameter.begin_[1] = 0;
parameter.begin_[2] = 0;
parameter.size_[0] = -1;
parameter.size_[1] = -1;
parameter.size_[2] = -1;
parameter.param_length_ = 3;

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Slice};

auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
ASSERT_NE(creator, nullptr);

auto ctx = std::make_shared<lite::Context>();
auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr);
ASSERT_NE(kernel, nullptr);
auto ret = kernel->Run();
EXPECT_EQ(0, ret);

int8_t expect0[12] = {16, 45, 67, -49, -115, 106, -98, 119, 103, 81, -114, 68};
for (int i = 0; i < 12; ++i) {
EXPECT_EQ(output_data[i], expect0[i]);
}

in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
}
} // namespace mindspore

Loading…
Cancel
Save