Browse Source

fix issue quant dtype cast not support input fp16

opParameter free in creator
tags/v1.1.0
zhaozhenlong 5 years ago
parent
commit
2219a9c80e
100 changed files with 472 additions and 15 deletions
  1. +48
    -0
      mindspore/lite/nnacl/fp16/quant_dtype_cast_fp16.c
  2. +35
    -0
      mindspore/lite/nnacl/fp16/quant_dtype_cast_fp16.h
  3. +0
    -1
      mindspore/lite/src/kernel_registry.cc
  4. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc
  5. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc
  6. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc
  7. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/base/crop_base.cc
  8. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc
  9. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.cc
  10. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc
  11. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/base/matmul_base.cc
  12. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/base/pad.cc
  13. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc
  14. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/base/power_base.cc
  15. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc
  16. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc
  17. +6
    -0
      mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc
  18. +3
    -0
      mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc
  19. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc
  20. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc
  21. +3
    -0
      mindspore/lite/src/runtime/kernel/arm/base/split_base.cc
  22. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.cc
  23. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc
  24. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.cc
  25. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc
  26. +3
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_self_fp16.cc
  27. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.cc
  28. +3
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc
  29. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc
  30. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc
  31. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc
  32. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc
  33. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc
  34. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc
  35. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.cc
  36. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.cc
  37. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.cc
  38. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.cc
  39. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc
  40. +169
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc
  41. +49
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.h
  42. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc
  43. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.cc
  44. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc
  45. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/slice_fp16.cc
  46. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc
  47. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc
  48. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.cc
  49. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc
  50. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc
  51. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc
  52. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc
  53. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc
  54. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc
  55. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc
  56. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc
  57. +3
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc
  58. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc
  59. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc
  60. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc
  61. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc
  62. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc
  63. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process.cc
  64. +8
    -2
      mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc
  65. +8
    -2
      mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc
  66. +8
    -2
      mindspore/lite/src/runtime/kernel/arm/fp32/exp.cc
  67. +5
    -4
      mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc
  68. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc
  69. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc
  70. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc
  71. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc
  72. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc
  73. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm.cc
  74. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc
  75. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection.cc
  76. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc
  77. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc
  78. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc
  79. +2
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc
  80. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc
  81. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/range.cc
  82. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc
  83. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc
  84. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc
  85. +3
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc
  86. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc
  87. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc
  88. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc
  89. +8
    -2
      mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram.cc
  90. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/slice.cc
  91. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc
  92. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc
  93. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc
  94. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc
  95. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc
  96. +8
    -2
      mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc
  97. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc
  98. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc
  99. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc
  100. +1
    -0
      mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc

+ 48
- 0
mindspore/lite/nnacl/fp16/quant_dtype_cast_fp16.c View File

@@ -0,0 +1,48 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <math.h>
#include "nnacl/fp16/quant_dtype_cast_fp16.h"
#include "nnacl/errorcode.h"

int DoDequantizeInt8ToFp16(int8_t *quant_values, float16_t *real_values, float scale, int32_t zp, int size) {
if (quant_values == NULL || real_values == NULL) {
return NNACL_PARAM_INVALID;
}

for (int i = 0; i < size; ++i) {
real_values[i] = (quant_values[i] - zp) * scale;
}
return NNACL_OK;
}

int DoQuantizeToInt8FromFp16(float16_t *real_values, int8_t *quant_values, float scale, int32_t zp, int size) {
if (quant_values == NULL || real_values == NULL) {
return NNACL_PARAM_INVALID;
}

for (int i = 0; i < size; ++i) {
float temp = round((float)real_values[i] / scale + zp);
if (temp > 127) {
quant_values[i] = 127;
} else if (temp < -128) {
quant_values[i] = -128;
} else {
quant_values[i] = (int8_t)temp;
}
}
return NNACL_OK;
}

+ 35
- 0
mindspore/lite/nnacl/fp16/quant_dtype_cast_fp16.h View File

@@ -0,0 +1,35 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_NNACL_FP16_QUANTDTYPECAST_FP16_H_
#define MINDSPORE_LITE_NNACL_FP16_QUANTDTYPECAST_FP16_H_

#include "nnacl/op_base.h"

#ifdef ENABLE_NEON
#include <arm_neon.h>
#endif

#ifdef __cplusplus
extern "C" {
#endif
int DoDequantizeInt8ToFp16(int8_t *quant_values, float16_t *real_values, float scale, int32_t zp, int size);
int DoQuantizeToInt8FromFp16(float16_t *real_values, int8_t *quant_values, float scale, int32_t zp, int size);
#ifdef __cplusplus
}
#endif

#endif // MINDSPORE_LITE_NNACL_INT8_QUANTDTYPECAST_H_

+ 0
- 1
mindspore/lite/src/kernel_registry.cc View File

@@ -120,7 +120,6 @@ kernel::LiteKernel *KernelRegistry::GetKernel(const std::vector<Tensor *> &in_te
return kernel;
}
}
free(parameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/base/arg_min_max_base.cc View File

@@ -124,6 +124,7 @@ kernel::LiteKernel *CpuArgMinMaxFp32KernelCreator(const std::vector<lite::Tensor
auto kernel = new (std::nothrow) ArgMinMaxCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ArgMinMaxCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.cc View File

@@ -90,6 +90,7 @@ kernel::LiteKernel *CpuBatchToSpaceFp32KernelCreator(const std::vector<lite::Ten
auto *kernel = new (std::nothrow) BatchToSpaceCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new BatchToSpaceCPUKernel fail!";
free(op_parameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/base/concat_base.cc View File

@@ -96,6 +96,7 @@ kernel::LiteKernel *CpuConcatFp32KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) ConcatCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ConcatCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/base/crop_base.cc View File

@@ -142,6 +142,7 @@ kernel::LiteKernel *CpuCropFp32KernelCreator(const std::vector<lite::Tensor *> &
auto *kernel = new (std::nothrow) CropCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new CropCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc View File

@@ -100,6 +100,7 @@ kernel::LiteKernel *CpuDepthToSpaceFp32KernelCreator(const std::vector<lite::Ten
auto *kernel = new (std::nothrow) DepthToSpaceCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new DepthToSpaceCPUKernel fail!";
free(op_parameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.cc View File

@@ -78,6 +78,7 @@ kernel::LiteKernel *CpuFullConnectionFp32KernelCreator(const std::vector<lite::T
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc View File

@@ -41,6 +41,7 @@ kernel::LiteKernel *CpuLeakyReluInt8KernelCreator(const std::vector<lite::Tensor
auto *kernel = new (std::nothrow) LeakyReluInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new LeakyReluInt8CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/base/matmul_base.cc View File

@@ -40,6 +40,7 @@ kernel::LiteKernel *CpuMatmulKernelCreator(const std::vector<lite::Tensor *> &in
auto *dequant_weight = kernel::LiteKernelUtil::DequantWeight(weight_tensor);
if (dequant_weight == nullptr) {
MS_LOG(ERROR) << "dequant data is nullptr.";
free(opParameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
@@ -59,6 +60,7 @@ kernel::LiteKernel *CpuMatmulKernelCreator(const std::vector<lite::Tensor *> &in
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/base/pad.cc View File

@@ -37,6 +37,7 @@ kernel::LiteKernel *CpuPadInt8KernelCreator(const std::vector<lite::Tensor *> &i
auto *kernel = new (std::nothrow) PadInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PadCPUKernel failed.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
@@ -58,6 +59,7 @@ kernel::LiteKernel *CpuPadFp32KernelCreator(const std::vector<lite::Tensor *> &i
auto *kernel = new (std::nothrow) PadCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PadCPUKernel failed.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc View File

@@ -115,6 +115,7 @@ kernel::LiteKernel *CpuPoolingInt8KernelCreator(const std::vector<lite::Tensor *
auto *kernel = new (std::nothrow) PoolingInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PoolingInt8CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
@@ -139,6 +140,7 @@ kernel::LiteKernel *CpuPoolingFp32KernelCreator(const std::vector<lite::Tensor *
auto *kernel = new (std::nothrow) PoolingCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PoolingCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/base/power_base.cc View File

@@ -43,6 +43,7 @@ kernel::LiteKernel *CpuPowerInt8KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) PowerInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PowerInt8CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
@@ -64,6 +65,7 @@ kernel::LiteKernel *CpuPowerFp32KernelCreator(const std::vector<lite::Tensor *>
PowerCPUKernel *kernel = new (std::nothrow) PowerCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PowerCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc View File

@@ -186,11 +186,13 @@ kernel::LiteKernel *CpuPriorBoxKernelCreator(const std::vector<lite::Tensor *> &
}
if (desc.type != schema::PrimitiveType_PriorBox) {
MS_LOG(ERROR) << "PriorBox invalid desc type " << desc.type;
free(op_parameter);
return nullptr;
}
auto *kernel = new (std::nothrow) PriorBoxCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PriorBoxCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc View File

@@ -177,6 +177,7 @@ kernel::LiteKernel *CpuQuantDTypeCastFp32KernelCreator(const std::vector<lite::T
auto *kernel = new (std::nothrow) QuantDTypeCastCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new QuantDTypeCastCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 6
- 0
mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc View File

@@ -181,11 +181,13 @@ kernel::LiteKernel *CpuReduceFp32KernelCreator(const std::vector<lite::Tensor *>
}
if (desc.type != schema::PrimitiveType_Reduce) {
MS_LOG(ERROR) << "Reduce op desc.type should be PrimitiveType_Reduce, got " << desc.type;
free(opParameter);
return nullptr;
}
auto *kernel = new (std::nothrow) ReduceCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Reduce new ReduceCPUKernel failed.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
@@ -210,11 +212,13 @@ kernel::LiteKernel *CpuMeanFp32KernelCreator(const std::vector<lite::Tensor *> &
}
if (desc.type != schema::PrimitiveType_Mean) {
MS_LOG(ERROR) << "Reduce op desc.type should be PrimitiveType_Mean, got " << desc.type;
free(opParameter);
return nullptr;
}
auto *kernel = new (std::nothrow) ReduceCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Reduce new ReduceCPUKernel failed.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
@@ -239,11 +243,13 @@ kernel::LiteKernel *CpuReduceInt8KernelCreator(const std::vector<lite::Tensor *>
}
if (desc.type != schema::PrimitiveType_Reduce) {
MS_LOG(ERROR) << "Reduce op desc.type should be PrimitiveType_Reduce, got " << desc.type;
free(opParameter);
return nullptr;
}
auto *kernel = new (std::nothrow) ReduceInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Reduce new ReduceCPUKernel failed.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 3
- 0
mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc View File

@@ -42,6 +42,7 @@ kernel::LiteKernel *CpuReshapeInt8KernelCreator(const std::vector<lite::Tensor *
auto *kernel = new (std::nothrow) ReshapeInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ReshapeInt8CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
@@ -66,6 +67,7 @@ kernel::LiteKernel *CpuReshapeInt32KernelCreator(const std::vector<lite::Tensor
auto *kernel = new (std::nothrow) ReshapeCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ReshapeCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
@@ -90,6 +92,7 @@ kernel::LiteKernel *CpuReshapeFp32KernelCreator(const std::vector<lite::Tensor *
auto *kernel = new (std::nothrow) ReshapeCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ReshapeCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc View File

@@ -140,6 +140,7 @@ kernel::LiteKernel *CpuResizeFp32KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) ResizeCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ResizeCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
@@ -165,6 +166,7 @@ kernel::LiteKernel *CpuResizeInt8KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) ResizeInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ResizeCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc View File

@@ -68,6 +68,7 @@ kernel::LiteKernel *CpuSoftmaxInt8KernelCreator(const std::vector<lite::Tensor *
auto *kernel = new (std::nothrow) SoftmaxInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SoftmaxCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
@@ -92,6 +93,7 @@ kernel::LiteKernel *CpuSoftmaxFp32KernelCreator(const std::vector<lite::Tensor *
auto *kernel = new (std::nothrow) SoftmaxCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SoftmaxCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 3
- 0
mindspore/lite/src/runtime/kernel/arm/base/split_base.cc View File

@@ -84,6 +84,7 @@ kernel::LiteKernel *CpuSplitInt8KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) SplitInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SplitCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
@@ -108,6 +109,7 @@ kernel::LiteKernel *CpuSplitInt32KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) SplitCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SplitCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
@@ -132,6 +134,7 @@ kernel::LiteKernel *CpuSplitFp32KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) SplitCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SplitCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/base/squeeze_base.cc View File

@@ -41,6 +41,7 @@ kernel::LiteKernel *CpuSqueezeInt8KernelCreator(const std::vector<lite::Tensor *
auto *kernel = new (std::nothrow) SqueezeInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SqueezeCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc View File

@@ -128,6 +128,7 @@ kernel::LiteKernel *CpuStridedSliceKernelCreator(const std::vector<lite::Tensor
auto *kernel = new (std::nothrow) StridedSliceCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "New kernel fails.";
free(opParameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.cc View File

@@ -139,6 +139,7 @@ kernel::LiteKernel *CpuActivationFp16KernelCreator(const std::vector<lite::Tenso
auto *kernel = new (std::nothrow) ActivationFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel is nullptr.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc View File

@@ -241,6 +241,7 @@ kernel::LiteKernel *CpuArithmeticFp16KernelCreator(const std::vector<lite::Tenso
auto kernel = new (std::nothrow) ArithmeticFP16CPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 3
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_self_fp16.cc View File

@@ -115,6 +115,9 @@ kernel::LiteKernel *CpuArithmeticSelfFp16KernelCreator(const std::vector<lite::T
auto *kernel = new (std::nothrow) ArithmeticSelfFp16CPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ArithmeticSelfFp16CPUKernel fail!";
if (parameter != nullptr) {
free(parameter);
}
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.cc View File

@@ -97,6 +97,7 @@ kernel::LiteKernel *CpuBatchnormFp16KernelCreator(const std::vector<lite::Tensor
auto *kernel = new (std::nothrow) BatchnormFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new BatchnormFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 3
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc View File

@@ -104,15 +104,18 @@ kernel::LiteKernel *CpuCastFp16KernelCreator(const std::vector<lite::Tensor *> &
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "Input context is nullptr!";
free(opParameter);
return nullptr;
}
if (ctx->thread_num_ == 0) {
MS_LOG(ERROR) << "context thread num is 0!";
free(opParameter);
return nullptr;
}
auto *kernel = new (std::nothrow) CastFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new CastFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.cc View File

@@ -151,6 +151,7 @@ kernel::LiteKernel *CpuConcatFp16KernelCreator(const std::vector<lite::Tensor *>
kernel::LiteKernel *kernel = new (std::nothrow) ConcatFp16CPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ConcatCPUKernel fail!";
free(parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc View File

@@ -150,6 +150,7 @@ kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector<lite::Tensor *>
auto *dequant_weight = kernel::LiteKernelUtil::DequantWeight(weight_tensor);
if (dequant_weight == nullptr) {
MS_LOG(ERROR) << "dequant data is nullptr.";
free(opParameter);
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
@@ -171,6 +172,7 @@ kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector<lite::Tensor *>
weight_tensor->set_data_type(kNumberTypeInt8);
weight_tensor->SetData(restore_data);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc View File

@@ -188,6 +188,7 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> &
auto *dequant_weight = kernel::LiteKernelUtil::DequantWeight(weight_tensor);
if (dequant_weight == nullptr) {
MS_LOG(ERROR) << "dequant data is nullptr.";
free(opParameter);
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
@@ -226,6 +227,7 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> &
weight_tensor->set_data_type(kNumberTypeInt8);
weight_tensor->SetData(restore_data);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc View File

@@ -115,6 +115,7 @@ kernel::LiteKernel *CpuCropFp16KernelCreator(const std::vector<lite::Tensor *> &
auto *kernel = new (std::nothrow) CropFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new CropFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc View File

@@ -213,6 +213,7 @@ kernel::LiteKernel *CpuDeconvDwFp16KernelCreator(const std::vector<lite::Tensor
auto *dequant_weight = kernel::LiteKernelUtil::DequantWeight(weight_tensor);
if (dequant_weight == nullptr) {
MS_LOG(ERROR) << "dequant data is nullptr.";
free(opParameter);
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
@@ -227,6 +228,7 @@ kernel::LiteKernel *CpuDeconvDwFp16KernelCreator(const std::vector<lite::Tensor
weight_tensor->set_data_type(kNumberTypeInt8);
weight_tensor->SetData(restore_data);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc View File

@@ -220,6 +220,7 @@ kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector<lite::Tensor *>
auto *dequant_weight = kernel::LiteKernelUtil::DequantWeight(weight_tensor);
if (dequant_weight == nullptr) {
MS_LOG(ERROR) << "dequant data is nullptr.";
free(opParameter);
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
@@ -234,6 +235,7 @@ kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector<lite::Tensor *>
weight_tensor->set_data_type(kNumberTypeInt8);
weight_tensor->SetData(restore_data);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.cc View File

@@ -196,6 +196,7 @@ kernel::LiteKernel *CpuFullConnectionFp16KernelCreator(const std::vector<lite::T
auto *dequant_weight = kernel::LiteKernelUtil::DequantWeight(weight_tensor);
if (dequant_weight == nullptr) {
MS_LOG(ERROR) << "dequant data is nullptr.";
free(opParameter);
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
@@ -209,6 +210,7 @@ kernel::LiteKernel *CpuFullConnectionFp16KernelCreator(const std::vector<lite::T
weight_tensor->set_data_type(kNumberTypeInt8);
weight_tensor->SetData(restore_data);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.cc View File

@@ -86,6 +86,7 @@ kernel::LiteKernel *CpuFusedBatchnormFp16KernelCreator(const std::vector<lite::T
new (std::nothrow) FusedBatchnormFp16CPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new FusedBatchnormFp16CPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.cc View File

@@ -272,6 +272,7 @@ kernel::LiteKernel *CpuMatmulFp16KernelCreator(const std::vector<lite::Tensor *>
weight_tensor->set_data_type(kNumberTypeInt8);
weight_tensor->SetData(restore_data);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.cc View File

@@ -87,6 +87,7 @@ kernel::LiteKernel *CpuPadFp16KernelCreator(const std::vector<lite::Tensor *> &i
auto *kernel = new (std::nothrow) PadFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PadFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc View File

@@ -130,6 +130,7 @@ kernel::LiteKernel *CpuPoolingFp16KernelCreator(const std::vector<lite::Tensor *
auto *kernel = new (std::nothrow) PoolingFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PoolingCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 169
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc View File

@@ -0,0 +1,169 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.h"
#include <vector>
#include "nnacl/int8/quant_dtype_cast.h"
#include "nnacl/fp16/quant_dtype_cast_fp16.h"
#include "src/runtime/runtime_api.h"
#include "src/kernel_registry.h"
#include "schema/model_generated.h"
#include "include/errorcode.h"

using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::lite::RET_PARAM_INVALID;
using mindspore::schema::PrimitiveType_QuantDTypeCast;

namespace mindspore::kernel {
int QuantDTypeCastFp16CPUKernel::Init() {
if (in_tensors_.size() != 1) {
MS_LOG(ERROR) << "inputs number should be 1, but " << in_tensors_.size() << " is given.";
return RET_PARAM_INVALID;
}
if (out_tensors_.size() != 1) {
MS_LOG(ERROR) << "outputs number should be 1, but " << out_tensors_.size() << " is given.";
return RET_PARAM_INVALID;
}
auto in_tensor = in_tensors_.front();
auto out_tensor = out_tensors_.front();
auto param = reinterpret_cast<QuantDTypeCastParameter *>(op_parameter_);
if (param->dstT == kNumberTypeInt8) {
if (in_tensor->data_type() != kNumberTypeFloat16 || out_tensor->data_type() != kNumberTypeInt8) {
MS_LOG(ERROR) << "param data type and tensor data type do not match.";
return RET_ERROR;
}
inverse_ = false;
} else if (param->srcT == kNumberTypeInt8) {
if (in_tensor->data_type() != kNumberTypeInt8 || out_tensor->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << "param data type and tensor data type do not match.";
return RET_ERROR;
}
inverse_ = true;
} else {
MS_LOG(ERROR) << "param data type not supported:"
<< " src: " << param->srcT << " dst: " << param->dstT;
return RET_PARAM_INVALID;
}

if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}

int QuantDTypeCastFp16CPUKernel::ReSize() {
auto in_tensor = in_tensors_.front();
num_unit_ = static_cast<int>(in_tensor->ElementsNum());
thread_n_num_ = MSMIN(thread_num_, num_unit_);
thread_n_stride_ = UP_DIV(num_unit_, thread_n_num_);
return RET_OK;
}

int QuantDTypeCastFp16CPUKernel::QuantDTypeCast(int task_id) {
int num_unit_thread = MSMIN(thread_n_stride_, num_unit_ - task_id * thread_n_stride_);
if (num_unit_thread <= 0) {
return RET_OK;
}
int thread_offset = task_id * thread_n_stride_;
if (in_tensors_.front()->GetQuantParams().empty() && out_tensors_.front()->GetQuantParams().empty()) {
MS_LOG(ERROR) << "QuantDTypeCast need quantization parameters which is not found.";
return RET_ERROR;
}
auto quant_arg = !out_tensors_.front()->GetQuantParams().empty() ? out_tensors_.front()->GetQuantParams().front()
: in_tensors_.front()->GetQuantParams().front();
int ret;
if (inverse_) {
ret = DoDequantizeInt8ToFp16(int8_ptr_ + thread_offset, float16_ptr_ + thread_offset, quant_arg.scale,
quant_arg.zeroPoint, num_unit_thread);
} else {
ret = DoQuantizeToInt8FromFp16(float16_ptr_ + thread_offset, int8_ptr_ + thread_offset, quant_arg.scale,
quant_arg.zeroPoint, num_unit_thread);
}

if (ret != RET_OK) {
MS_LOG(ERROR) << "QuantDTypeCastFp16 error task_id[" << task_id << "] error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}

int QuantDTypeCastRun(void *cdata, int task_id) {
auto g_kernel = reinterpret_cast<QuantDTypeCastFp16CPUKernel *>(cdata);
auto ret = g_kernel->QuantDTypeCast(task_id);
if (ret != RET_OK) {
MS_LOG(ERROR) << "QuantDTypeCastRun error task_id[" << task_id << "] error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}

int QuantDTypeCastFp16CPUKernel::Run() {
auto prepare_ret = Prepare();
if (prepare_ret != RET_OK) {
MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret;
return prepare_ret;
}

if (in_tensors_[0]->data_type() == TypeId::kNumberTypeInt8 &&
out_tensors_[0]->data_type() == TypeId::kNumberTypeFloat16) {
int8_ptr_ = reinterpret_cast<int8_t *>(in_tensors_[0]->data_c());
float16_ptr_ = reinterpret_cast<float16_t *>(out_tensors_[0]->data_c());
} else if (in_tensors_[0]->data_type() == TypeId::kNumberTypeFloat16 &&
out_tensors_[0]->data_type() == TypeId::kNumberTypeInt8) {
float16_ptr_ = reinterpret_cast<float16_t *>(in_tensors_[0]->data_c());
int8_ptr_ = reinterpret_cast<int8_t *>(out_tensors_[0]->data_c());
} else {
MS_LOG(ERROR) << "QuantDTypeCastFp16 not support input or output type";
return RET_ERROR;
}

auto ret = ParallelLaunch(this->context_->thread_pool_, QuantDTypeCastRun, this, thread_n_num_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "Scale error error_code[" << ret << "]";
return RET_ERROR;
}

return RET_OK;
}

kernel::LiteKernel *CpuQuantDTypeCastFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
auto *kernel = new (std::nothrow) QuantDTypeCastFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new QuantDTypeCastFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed! name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_QuantDTypeCast, CpuQuantDTypeCastFp16KernelCreator)
} // namespace mindspore::kernel

+ 49
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.h View File

@@ -0,0 +1,49 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_QUANTDTYPECAST_FP16_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_QUANTDTYPECAST_FP16_H_

#include <arm_neon.h>
#include <vector>
#include "src/lite_kernel.h"

namespace mindspore::kernel {
class QuantDTypeCastFp16CPUKernel : public LiteKernel {
public:
QuantDTypeCastFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) {}
~QuantDTypeCastFp16CPUKernel() override = default;

int Init() override;
int ReSize() override;
int Run() override;
int QuantDTypeCast(int task_id);

private:
int thread_num_;
int thread_n_num_;
int thread_n_stride_;
int num_unit_;
int8_t *int8_ptr_;
float16_t *float16_ptr_;
bool inverse_;
};
} // namespace mindspore::kernel

#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_QUANTDTYPECAST_FP16_H_

+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc View File

@@ -205,11 +205,13 @@ kernel::LiteKernel *CpuMeanFp16KernelCreator(const std::vector<lite::Tensor *> &
}
if (desc.type != schema::PrimitiveType_Mean) {
MS_LOG(ERROR) << "Reduce op desc.type should be PrimitiveType_Mean, got " << desc.type;
free(opParameter);
return nullptr;
}
auto *kernel = new (std::nothrow) ReduceFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Reduce new ReduceCPUKernel failed.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/reshape_fp16.cc View File

@@ -86,6 +86,7 @@ kernel::LiteKernel *CpuReshapeFp16KernelCreator(const std::vector<lite::Tensor *
auto *kernel = new (std::nothrow) ReshapeFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ReshapeFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc View File

@@ -198,6 +198,7 @@ kernel::LiteKernel *CpuScaleFp16KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) ScaleFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "New kernel fails.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/slice_fp16.cc View File

@@ -75,6 +75,7 @@ kernel::LiteKernel *CpuSliceFp16KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) SliceFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SliceFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc View File

@@ -136,6 +136,7 @@ kernel::LiteKernel *CpuSoftmaxFp16KernelCreator(const std::vector<lite::Tensor *
auto *kernel = new (std::nothrow) SoftmaxFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SoftmaxFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc View File

@@ -133,6 +133,7 @@ kernel::LiteKernel *CpuSplitFp16KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) SplitFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SplitFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.cc View File

@@ -117,6 +117,7 @@ kernel::LiteKernel *CpuStackFp16KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) StackFp16CPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new StackFp16CPUKernel fail!";
free(op_parameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc View File

@@ -190,6 +190,7 @@ kernel::LiteKernel *CpuTransposeFp16KernelCreator(const std::vector<lite::Tensor
auto *kernel = new (std::nothrow) TransposeFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "New kernel fails.";
free(opParameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/activation.cc View File

@@ -102,6 +102,7 @@ kernel::LiteKernel *CpuActivationFp32KernelCreator(const std::vector<lite::Tenso
auto *kernel = new (std::nothrow) ActivationCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel is nullptr.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/addn.cc View File

@@ -101,6 +101,7 @@ kernel::LiteKernel *CpuAddNFp32KernelCreator(const std::vector<lite::Tensor *> &
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "Input context is nullptr!";
free(op_parameter);
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_AddN);
@@ -108,6 +109,7 @@ kernel::LiteKernel *CpuAddNFp32KernelCreator(const std::vector<lite::Tensor *> &
auto *kernel = new (std::nothrow) AddNCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new AddNCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc View File

@@ -285,6 +285,7 @@ kernel::LiteKernel *CpuArithmeticFp32KernelCreator(const std::vector<lite::Tenso
auto kernel = new (std::nothrow) ArithmeticCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.cc View File

@@ -108,6 +108,7 @@ kernel::LiteKernel *CpuArithmeticSelfFp32KernelCreator(const std::vector<lite::T
auto *kernel = new (std::nothrow) ArithmeticSelfCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ArithmeticSelfCPUKernel fail!";
free(parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm.cc View File

@@ -105,6 +105,7 @@ kernel::LiteKernel *CpuBatchnormKernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) BatchnormCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new BatchNormCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/bias.cc View File

@@ -83,6 +83,7 @@ kernel::LiteKernel *CpuBiasFp32KernelCreator(const std::vector<lite::Tensor *> &
auto kernel = new (std::nothrow) BiasCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to.cc View File

@@ -73,6 +73,7 @@ kernel::LiteKernel *CpuBroadcastToFp32KernelCreator(const std::vector<lite::Tens
auto *kernel = new (std::nothrow) BroadcastToCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new BroadcastToCPUKernel fail!";
free(op_parameter);
return nullptr;
}



+ 3
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/cast.cc View File

@@ -127,15 +127,18 @@ kernel::LiteKernel *CpuCastFp32KernelCreator(const std::vector<lite::Tensor *> &
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "Input context is nullptr!";
free(opParameter);
return nullptr;
}
if (ctx->thread_num_ == 0) {
MS_LOG(ERROR) << "context thread num is 0!";
free(opParameter);
return nullptr;
}
auto *kernel = new (std::nothrow) CastCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new CastCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc View File

@@ -84,6 +84,7 @@ kernel::LiteKernel *CpuConstantOfShapeFp32KernelCreator(const std::vector<lite::
auto *kernel = new (std::nothrow) ConstantOfShapeCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ConstantOfShapeCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc View File

@@ -190,6 +190,7 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> &
auto *dequant_weight = kernel::LiteKernelUtil::DequantWeight(weight_tensor);
if (dequant_weight == nullptr) {
MS_LOG(ERROR) << "dequant data is nullptr.";
free(op_parameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
@@ -210,6 +211,7 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> &
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
}
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc View File

@@ -137,6 +137,7 @@ kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::Tensor *>
auto *dequant_weight = kernel::LiteKernelUtil::DequantWeight(weight_tensor);
if (dequant_weight == nullptr) {
MS_LOG(ERROR) << "dequant data is nullptr.";
free(opParameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
@@ -155,6 +156,7 @@ kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::Tensor *>
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc View File

@@ -241,6 +241,7 @@ kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector<lite::Tensor *>
auto *dequant_weight = kernel::LiteKernelUtil::DequantWeight(weight_tensor);
if (dequant_weight == nullptr) {
MS_LOG(ERROR) << "dequant data is nullptr.";
free(opParameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
@@ -252,6 +253,7 @@ kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector<lite::Tensor *>
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc View File

@@ -205,6 +205,7 @@ kernel::LiteKernel *CpuDeconvDwFp32KernelCreator(const std::vector<lite::Tensor
auto *dequant_weight = kernel::LiteKernelUtil::DequantWeight(weight_tensor);
if (dequant_weight == nullptr) {
MS_LOG(ERROR) << "dequant data is nullptr.";
free(opParameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
@@ -217,6 +218,7 @@ kernel::LiteKernel *CpuDeconvDwFp32KernelCreator(const std::vector<lite::Tensor
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process.cc View File

@@ -136,6 +136,7 @@ kernel::LiteKernel *CpuDetectionPostProcessFp32KernelCreator(const std::vector<l
auto *kernel = new (std::nothrow) DetectionPostProcessCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new DetectionPostProcessCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 8
- 2
mindspore/lite/src/runtime/kernel/arm/fp32/elu.cc View File

@@ -77,14 +77,20 @@ kernel::LiteKernel *CpuEluFp32KernelCreator(const std::vector<lite::Tensor *> &i
const std::vector<lite::Tensor *> &outputs, OpParameter *parameter,
const lite::InnerContext *ctx, const KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (parameter == nullptr || ctx == nullptr) {
MS_LOG(ERROR) << "parameter or ctx is nullptr";
if (parameter == nullptr) {
MS_LOG(ERROR) << "parameter is nullptr";
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "ctx is nullptr";
free(parameter);
return nullptr;
}
MS_ASSERT(desc.type == PrimitiveType_Elu);
auto *kernel = new (std::nothrow) EluCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create Kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}



+ 8
- 2
mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.cc View File

@@ -117,14 +117,20 @@ kernel::LiteKernel *CpuEmbeddingLookupFp32KernelCreator(const std::vector<lite::
OpParameter *parameter, const lite::InnerContext *ctx,
const KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (parameter == nullptr || ctx == nullptr) {
MS_LOG(ERROR) << "parameter or ctx is nullptr";
if (parameter == nullptr) {
MS_LOG(ERROR) << "parameter is nullptr";
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "ctx is nullptr";
free(parameter);
return nullptr;
}
MS_ASSERT(desc.type == PrimitiveType_EmbeddingLookup);
auto *kernel = new (std::nothrow) EmbeddingLookupCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create Kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}



+ 8
- 2
mindspore/lite/src/runtime/kernel/arm/fp32/exp.cc View File

@@ -90,14 +90,20 @@ kernel::LiteKernel *CpuExpFp32KernelCreator(const std::vector<lite::Tensor *> &i
const std::vector<lite::Tensor *> &outputs, OpParameter *parameter,
const lite::InnerContext *ctx, const KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (parameter == nullptr || ctx == nullptr) {
MS_LOG(ERROR) << "parameter or ctx is nullptr";
if (parameter == nullptr) {
MS_LOG(ERROR) << "parameter is nullptr";
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "ctx is nullptr";
free(parameter);
return nullptr;
}
MS_ASSERT(desc.type == PrimitiveType_Exp);
auto *kernel = new (std::nothrow) ExpCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create Kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}



+ 5
- 4
mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc View File

@@ -49,15 +49,15 @@ int ExpandDimsCPUKernel::DoExpandDims(int task_id) {
}
int offset = task_id * thread_sz_stride_;
if (this->in_tensors_[0]->data_type() == kNumberTypeFloat32) {
int ret = ExpandDims(reinterpret_cast<float *>(in_ptr_) + offset,
reinterpret_cast<float *>(out_ptr_) + offset, size * sizeof(float));
int ret = ExpandDims(reinterpret_cast<float *>(in_ptr_) + offset, reinterpret_cast<float *>(out_ptr_) + offset,
size * sizeof(float));
if (ret != RET_OK) {
MS_LOG(ERROR) << "ExpandDimsRun error task_id[" << task_id << "] error_code[" << ret << "]";
return ret;
}
} else if (this->in_tensors_[0]->data_type() == kNumberTypeInt8) {
int ret = ExpandDims(reinterpret_cast<int8_t *>(in_ptr_) + offset,
reinterpret_cast<int8_t *>(out_ptr_) + offset, size * sizeof(int8_t));
int ret = ExpandDims(reinterpret_cast<int8_t *>(in_ptr_) + offset, reinterpret_cast<int8_t *>(out_ptr_) + offset,
size * sizeof(int8_t));
if (ret != RET_OK) {
MS_LOG(ERROR) << "ExpandDimsRun error task_id[" << task_id << "] error_code[" << ret << "]";
return ret;
@@ -102,6 +102,7 @@ kernel::LiteKernel *CpuExpandsDimsFp32KernelCreator(const std::vector<lite::Tens
auto *kernel = new (std::nothrow) ExpandDimsCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ExpandDimsCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc View File

@@ -98,6 +98,7 @@ kernel::LiteKernel *CpuFillFp32KernelCreator(const std::vector<lite::Tensor *> &
auto *kernel = new (std::nothrow) FillCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new FillCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/flatten.cc View File

@@ -68,6 +68,7 @@ kernel::LiteKernel *CpuFlattenFp32KernelCreator(const std::vector<lite::Tensor *
auto *kernel = new (std::nothrow) FlattenCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new FlattenCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm.cc View File

@@ -149,6 +149,7 @@ kernel::LiteKernel *CpuFusedBatchnormKernelCreator(const std::vector<lite::Tenso
new (std::nothrow) FusedBatchnormCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new FusedBatchnormCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/gather.cc View File

@@ -150,6 +150,7 @@ kernel::LiteKernel *CpuGatherFp32KernelCreator(const std::vector<lite::Tensor *>
}
auto *kernel = new (std::nothrow) GatherCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.cc View File

@@ -140,6 +140,7 @@ kernel::LiteKernel *CpuGatherNdFp32KernelCreator(const std::vector<lite::Tensor

auto *kernel = new (std::nothrow) GatherNdCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm.cc View File

@@ -191,6 +191,7 @@ kernel::LiteKernel *CpuL2NormFp32KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) L2NormCPUKernel(param, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new L2NormCPUKernel fail!";
free(param);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm.cc View File

@@ -98,6 +98,7 @@ kernel::LiteKernel *CpuLocalResponseNormFp32KernelCreator(const std::vector<lite
auto *kernel = new (std::nothrow) LocalResponseNormCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new LocalResponseNormCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection.cc View File

@@ -161,12 +161,14 @@ kernel::LiteKernel *CpuLshProjectionFp32KernelCreator(const std::vector<lite::Te
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "Input context is nullptr!";
free(op_parameter);
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_LshProjection);
auto *kernel = new (std::nothrow) LshProjectionCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new LshProjectionCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/lstm.cc View File

@@ -188,6 +188,7 @@ kernel::LiteKernel *CpuLstmKernelCreator(const std::vector<lite::Tensor *> &inpu
auto *kernel = new (std::nothrow) LstmCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel is nullptr.";
free(parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/nchw2nhwc.cc View File

@@ -59,6 +59,7 @@ kernel::LiteKernel *CpuNchw2NhwcFp32KernelCreator(const std::vector<lite::Tensor
auto *kernel = new (std::nothrow) Nchw2NhwcCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new Nchw2NhwcCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/nhwc2nchw.cc View File

@@ -59,6 +59,7 @@ kernel::LiteKernel *CpuNhwc2NchwFp32KernelCreator(const std::vector<lite::Tensor
auto *kernel = new (std::nothrow) Nhwc2NchwCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new Nhwc2NchwCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 2
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/one_hot.cc View File

@@ -184,11 +184,13 @@ kernel::LiteKernel *CpuOneHotFp32KernelCreator(const std::vector<lite::Tensor *>
}
if (desc.type != schema::PrimitiveType_OneHot) {
MS_LOG(ERROR) << "OneHot desc type should be " << schema::PrimitiveType_OneHot << " got " << desc.type;
free(opParameter);
return nullptr;
}
auto *kernel = new (std::nothrow) OneHotCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "OneHot new kernel failed.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/prelu.cc View File

@@ -159,6 +159,7 @@ kernel::LiteKernel *CpuPReluFp32KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) PReluCPUKernel(param, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PReluCPUKernel fail!";
free(param);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/range.cc View File

@@ -55,6 +55,7 @@ kernel::LiteKernel *CpuRangeFp32KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) RangeCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new RangeCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc View File

@@ -54,6 +54,7 @@ kernel::LiteKernel *CpuRankFp32KernelCreator(const std::vector<lite::Tensor *> &
auto *kernel = new (std::nothrow) RankCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new RankCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/reverse.cc View File

@@ -153,6 +153,7 @@ kernel::LiteKernel *CpuReverseFp32KernelCreator(const std::vector<lite::Tensor *
if (kernel == nullptr) {
MS_LOG(ERROR) << "Kernel is NULL! name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
free(opParameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence.cc View File

@@ -111,6 +111,7 @@ kernel::LiteKernel *CpuReverseSequenceFp32KernelCreator(const std::vector<lite::
auto *kernel = new (std::nothrow) ReverseSequenceCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 3
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.cc View File

@@ -119,15 +119,18 @@ kernel::LiteKernel *CpuROIPoolingFp32KernelCreator(const std::vector<lite::Tenso
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "Input context is nullptr!";
free(opParameter);
return nullptr;
}
if (ctx->thread_num_ == 0) {
MS_LOG(ERROR) << "context thread num is 0!";
free(opParameter);
return nullptr;
}
auto *kernel = new (std::nothrow) ROIPoolingCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ROIPoolingCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc View File

@@ -213,6 +213,7 @@ kernel::LiteKernel *CpuScaleFp32KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) ScaleCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "New kernel fails.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc View File

@@ -174,6 +174,7 @@ kernel::LiteKernel *CpuScatterNDFp32KernelCreator(const std::vector<lite::Tensor
auto *kernel = new (std::nothrow) ScatterNDCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "New kernel fails.";
free(opParameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc View File

@@ -66,6 +66,7 @@ kernel::LiteKernel *CpuShapeFp32KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) ShapeCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "New kernel fails.";
free(opParameter);
return nullptr;
}



+ 8
- 2
mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram.cc View File

@@ -108,14 +108,20 @@ kernel::LiteKernel *CpuSkipGramFp32KernelCreator(const std::vector<lite::Tensor
const std::vector<lite::Tensor *> &outputs, OpParameter *parameter,
const lite::InnerContext *ctx, const KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (parameter == nullptr || ctx == nullptr) {
MS_LOG(ERROR) << "parameter or ctx is nullptr";
if (parameter == nullptr) {
MS_LOG(ERROR) << "parameter is nullptr";
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "ctx is nullptr";
free(parameter);
return nullptr;
}
MS_ASSERT(desc.type == PrimitiveType_SkipGram);
auto *kernel = new (std::nothrow) SkipGramCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create Kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/slice.cc View File

@@ -94,6 +94,7 @@ kernel::LiteKernel *CpuSliceFp32KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) SliceCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SliceCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc View File

@@ -110,6 +110,7 @@ kernel::LiteKernel *CpuSpaceToBatchFp32KernelCreator(const std::vector<lite::Ten
auto *kernel = new (std::nothrow) SpaceToBatchCPUKernel(param, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SpaceToBatchCPUKernel fail!";
free(param);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc View File

@@ -118,6 +118,7 @@ kernel::LiteKernel *CpuSpaceToDepthFp32KernelCreator(const std::vector<lite::Ten
auto *kernel = new (std::nothrow) SpaceToDepthCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SpaceToDepthCPUKernel fail!";
free(opParameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense.cc View File

@@ -202,6 +202,7 @@ kernel::LiteKernel *CpuSparseToDenseFp32KernelCreator(const std::vector<lite::Te
auto *kernel = new (std::nothrow) SparseToDenseCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SparseToDenseCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc View File

@@ -68,6 +68,7 @@ kernel::LiteKernel *CpuSqueezeFp32KernelCreator(const std::vector<lite::Tensor *
auto *kernel = new (std::nothrow) SqueezeCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "New kernel fails.";
free(parameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/stack.cc View File

@@ -86,6 +86,7 @@ kernel::LiteKernel *CpuStackFp32KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) StackCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new StackCPUKernel fail!";
free(op_parameter);
return nullptr;
}



+ 8
- 2
mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc View File

@@ -67,14 +67,20 @@ kernel::LiteKernel *CpuTileFp32KernelCreator(const std::vector<lite::Tensor *> &
const std::vector<lite::Tensor *> &outputs, OpParameter *parameter,
const lite::InnerContext *ctx, const KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (parameter == nullptr || ctx == nullptr) {
MS_LOG(ERROR) << "parameter or ctx is nullptr";
if (parameter == nullptr) {
MS_LOG(ERROR) << "parameter is nullptr";
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "ctx is nullptr";
free(parameter);
return nullptr;
}
MS_ASSERT(desc.type == PrimitiveType_Tile);
auto *kernel = new (std::nothrow) TileCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/topk.cc View File

@@ -78,6 +78,7 @@ kernel::LiteKernel *CpuTopKFp32KernelCreator(const std::vector<lite::Tensor *> &
auto *kernel = new (std::nothrow) TopKCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new TopKCPUKernel fail!";
free(parameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc View File

@@ -146,6 +146,7 @@ kernel::LiteKernel *CpuTransposeFp32KernelCreator(const std::vector<lite::Tensor
auto *kernel = new (std::nothrow) TransposeCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "New kernel fails.";
free(opParameter);
return nullptr;
}



+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/unique.cc View File

@@ -55,6 +55,7 @@ kernel::LiteKernel *CpuUniqueFp32KernelCreator(const std::vector<lite::Tensor *>
auto *kernel = new (std::nothrow) UniqueCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}
auto ret = kernel->Init();


+ 1
- 0
mindspore/lite/src/runtime/kernel/arm/fp32/unsqueeze.cc View File

@@ -90,6 +90,7 @@ kernel::LiteKernel *CpuUnsqueezeFp32KernelCreator(const std::vector<lite::Tensor
auto *kernel = new (std::nothrow) UnsqueezeCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new UnsqueezeCPUKernel fail!";
free(parameter);
return nullptr;
}
auto ret = kernel->Init();


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save