Browse Source

support scalar

tags/v1.1.0
hangq 5 years ago
parent
commit
1efc6d4394
100 changed files with 1088 additions and 1096 deletions
  1. +0
    -31
      mindspore/lite/src/common/op_utils.h
  2. +2
    -2
      mindspore/lite/src/executor.cc
  3. +4
    -5
      mindspore/lite/src/lite_kernel.cc
  4. +9
    -14
      mindspore/lite/src/lite_session.cc
  5. +4
    -4
      mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.cc
  6. +4
    -4
      mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc
  7. +4
    -4
      mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc
  8. +4
    -4
      mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc
  9. +4
    -4
      mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc
  10. +4
    -4
      mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.cc
  11. +4
    -4
      mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.cc
  12. +7
    -7
      mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc
  13. +4
    -4
      mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc
  14. +4
    -4
      mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc
  15. +4
    -4
      mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc
  16. +4
    -4
      mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.cc
  17. +1
    -1
      mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc
  18. +1
    -1
      mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc
  19. +1
    -1
      mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.cc
  20. +1
    -1
      mindspore/lite/src/runtime/opencl/opencl_executor.cc
  21. +1
    -1
      mindspore/lite/src/scheduler.cc
  22. +47
    -14
      mindspore/lite/src/tensor.cc
  23. +65
    -75
      mindspore/lite/src/tensor.h
  24. +8
    -8
      mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc
  25. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/reduce_fp16_tests.cc
  26. +12
    -12
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc
  27. +54
    -54
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/arithmetic_fp32_tests.cc
  28. +28
    -28
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc
  29. +2
    -2
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc
  30. +10
    -16
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc
  31. +2
    -2
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc
  32. +18
    -16
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc
  33. +3
    -2
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc
  34. +7
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc
  35. +12
    -11
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc
  36. +16
    -16
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/instance_norm_fp32_tests.cc
  37. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc
  38. +20
    -20
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc
  39. +10
    -7
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc
  40. +10
    -10
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc
  41. +6
    -6
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/pad_fp32_test.cc
  42. +6
    -5
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc
  43. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc
  44. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc
  45. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc
  46. +18
    -18
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc
  47. +3
    -3
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc
  48. +8
    -8
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc
  49. +2
    -2
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc
  50. +2
    -2
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc
  51. +50
    -50
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc
  52. +31
    -31
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strided_slice_fp32_tests.cc
  53. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc
  54. +6
    -6
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc
  55. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc
  56. +6
    -6
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc
  57. +18
    -18
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc
  58. +18
    -18
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc
  59. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc
  60. +3
    -3
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc
  61. +54
    -54
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc
  62. +36
    -36
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc
  63. +36
    -36
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc
  64. +8
    -8
      mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc
  65. +6
    -6
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc
  66. +64
    -64
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc
  67. +20
    -20
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc
  68. +6
    -6
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/bias_add_int8_tests.cc
  69. +18
    -18
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc
  70. +13
    -10
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc
  71. +40
    -40
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc
  72. +3
    -3
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc
  73. +6
    -6
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/div_int8_test.cc
  74. +1
    -1
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc
  75. +6
    -6
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc
  76. +6
    -6
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc
  77. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc
  78. +1
    -1
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc
  79. +30
    -30
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc
  80. +6
    -8
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc
  81. +9
    -9
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc
  82. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc
  83. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc
  84. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc
  85. +8
    -8
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc
  86. +8
    -8
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc
  87. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc
  88. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc
  89. +8
    -8
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/scale_int8.cc
  90. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc
  91. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc
  92. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc
  93. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/space_to_batch_int8_tests.cc
  94. +22
    -22
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc
  95. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc
  96. +6
    -6
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc
  97. +6
    -6
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc
  98. +4
    -4
      mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc
  99. +2
    -2
      mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc
  100. +7
    -7
      mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc

+ 0
- 31
mindspore/lite/src/common/op_utils.h View File

@@ -1,31 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_COMMON_OP_UTILS_H_
#define MINDSPORE_LITE_COMMON_OP_UTILS_H_

#include <functional>
#include <string>
#include "schema/model_generated.h"

namespace mindspore {
namespace lite {
inline schema::PrimitiveType GetOpType(const schema::CNode &opDef) { return opDef.primitive()->value_type(); }
inline std::string GetOpTypeName(const schema::CNode &opDef) { return schema::EnumNamePrimitiveType(GetOpType(opDef)); }
} // namespace lite
} // namespace mindspore

#endif // MINDSPORE_LITE_COMMON_OP_UTILS_H_

+ 2
- 2
mindspore/lite/src/executor.cc View File

@@ -44,7 +44,7 @@ int Executor::Run(std::vector<Tensor *> &in_tensors, std::vector<Tensor *> &out_
}
kernel::LiteKernelUtil::InitTensorRefCount(kernels);
for (auto out_tensor : out_tensors) { // increase RefCount of output tensors, such that Run will not free them
out_tensor->SetRefCount(out_tensor->RefCount() + 1);
out_tensor->set_ref_count(out_tensor->ref_count() + 1);
}

for (auto *kernel : kernels) {
@@ -101,7 +101,7 @@ int Executor::TransformTensorLayoutFp32(Tensor *tensor, schema::Format dst_forma
return RET_ERROR;
}
PackNC4HW4ToNHWCFp32(src_data, dst_data, tensor->Batch(), tensor->Height() * tensor->Width(), tensor->Channel());
tensor->SetData(dst_data);
tensor->set_data(dst_data);
tensor->SetFormat(dst_format);
allocator->Free(src_data);
return RET_OK;


+ 4
- 5
mindspore/lite/src/lite_kernel.cc View File

@@ -39,14 +39,14 @@ void LiteKernel::FreeWorkspace() {

void LiteKernel::InitOutTensorRefCount() {
for (auto *tensor : this->out_tensors_) {
tensor->SetRefCount(this->out_kernels_.size());
tensor->set_ref_count(this->out_kernels_.size());
}
}

int LiteKernel::DecOutTensorRefCount() {
for (auto *tensor : this->out_tensors_) {
tensor->decRefCount();
if (0 >= tensor->RefCount()) {
tensor->DecRefCount();
if (0 >= tensor->ref_count()) {
auto ret = tensor->FreeData();
if (0 != ret) {
MS_LOG(ERROR) << "Free tensor data failed";
@@ -190,8 +190,7 @@ std::vector<lite::Tensor *> LiteKernelUtil::SubgraphInputTensors(const std::vect
for (const auto &kernel : input_kernels) {
for (const auto &tensor : kernel->in_tensors()) {
auto iter = std::find(all_output_tensors.begin(), all_output_tensors.end(), tensor);
if (iter == all_output_tensors.end() &&
!(tensor->category() == mindspore::lite::Tensor::CONST && tensor->data_c() != nullptr)) {
if (iter == all_output_tensors.end() && !tensor->IsConst()) {
input_tensors.emplace_back(tensor);
}
}


+ 9
- 14
mindspore/lite/src/lite_session.cc View File

@@ -61,11 +61,12 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
MS_LOG(ERROR) << i << "th tensor in model is nullptr";
return RET_NULL_PTR;
}
auto src_category = TensorCategory(srcTensor);
std::vector<int> shape;
if (srcTensor->dims() == nullptr) {
MS_LOG(DEBUG) << "Dims of " << i << "th tensor is nullptr";
} else {
if (TensorCategory(srcTensor) == Tensor::Category::CONST) {
if (src_category == Tensor::Category::CONST_TENSOR) {
if (srcTensor->dataType() == kObjectTypeString && srcTensor->data() != nullptr) {
shape.push_back(srcTensor->data()->size());
} else {
@@ -76,18 +77,13 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
}
}
int dataType = srcTensor->dataType();
auto *dstTensor =
new (std::nothrow) Tensor(TypeId(dataType), shape, srcTensor->format(), TensorCategory(srcTensor));
auto *dstTensor = new (std::nothrow) Tensor(TypeId(dataType), shape, srcTensor->format(), src_category);
if (dstTensor == nullptr) {
MS_LOG(ERROR) << "new " << i << "th tensor failed";
return RET_NULL_PTR;
}
if (TensorCategory(srcTensor) == Tensor::Category::CONST && srcTensor->data() != nullptr &&
srcTensor->data()->size() > 0) {
if (shape.empty()) {
shape.push_back(1);
dstTensor->set_shape(shape);
}
if ((src_category == Tensor::Category::CONST_TENSOR || src_category == Tensor::Category::CONST_SCALAR) &&
srcTensor->data() != nullptr && srcTensor->data()->size() > 0) {
MS_ASSERT(dstTensor->Size() == srcTensor->data()->size());
if (WeightTensorNeedCopy(model, i)) {
auto dst_data = dstTensor->MutableData();
@@ -99,7 +95,7 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
memcpy(dst_data, srcTensor->data()->data(), dstTensor->Size());
copyed_tensor_idxes_.emplace_back(i);
} else {
dstTensor->SetData(const_cast<unsigned char *>(srcTensor->data()->data()));
dstTensor->set_data(const_cast<unsigned char *>(srcTensor->data()->data()));
}
}
auto quant_params = srcTensor->quantParams();
@@ -395,7 +391,7 @@ void LiteSession::BindThread(bool if_bind) {
MS_LOG(ERROR) << "Device list is empty.";
return;
}
if (this->context_->IsCpuEnabled()) {
if (!this->context_->IsCpuEnabled()) {
return;
}
auto cpu_device_info = this->context_->GetCpuInfo();
@@ -415,9 +411,8 @@ LiteSession::~LiteSession() {
auto *tensor = tensors_.at(i);
MS_ASSERT(tensor != nullptr);
// data of weight tensor of node in packed_op can not be to free, we will free weight data when freeing meta_graph
if (tensor->category() == Tensor::Category::CONST && !IsContain(this->inputs_, tensor) &&
!IsContain(copyed_tensor_idxes_, i)) {
tensor->SetData(nullptr);
if (tensor->IsConst() && !IsContain(this->inputs_, tensor) && !IsContain(copyed_tensor_idxes_, i)) {
tensor->set_data(nullptr);
}
delete tensor;
}


+ 4
- 4
mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.cc View File

@@ -50,14 +50,14 @@ kernel::LiteKernel *CpuFullConnectionFp32KernelCreator(const std::vector<lite::T
MS_LOG(ERROR) << "dequant data is nullptr.";
return nullptr;
}
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto kernel = new (std::nothrow) FullconnectionCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (!kernel) {
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@@ -69,13 +69,13 @@ kernel::LiteKernel *CpuFullConnectionFp32KernelCreator(const std::vector<lite::T
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}


+ 4
- 4
mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc View File

@@ -150,7 +150,7 @@ kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector<lite::Tensor *>
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}

auto conv_param = reinterpret_cast<ConvParameter *>(opParameter);
@@ -165,7 +165,7 @@ kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector<lite::Tensor *>
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@@ -177,13 +177,13 @@ kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector<lite::Tensor *>
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}


+ 4
- 4
mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc View File

@@ -192,7 +192,7 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> &
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}

auto conv_param = reinterpret_cast<ConvParameter *>(opParameter);
@@ -224,7 +224,7 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> &
MS_LOG(DEBUG) << "Create conv fp16 kernel failed.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@@ -236,13 +236,13 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> &
<< ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}


+ 4
- 4
mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc View File

@@ -214,7 +214,7 @@ kernel::LiteKernel *CpuDeconvDwFp16KernelCreator(const std::vector<lite::Tensor
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}

auto kernel = new (std::nothrow) DeconvolutionDepthwiseFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
@@ -222,7 +222,7 @@ kernel::LiteKernel *CpuDeconvDwFp16KernelCreator(const std::vector<lite::Tensor
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@@ -234,13 +234,13 @@ kernel::LiteKernel *CpuDeconvDwFp16KernelCreator(const std::vector<lite::Tensor
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}


+ 4
- 4
mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc View File

@@ -226,7 +226,7 @@ kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector<lite::Tensor *>
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}

kernel::LiteKernel *kernel;
@@ -242,7 +242,7 @@ kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector<lite::Tensor *>
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@@ -254,13 +254,13 @@ kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector<lite::Tensor *>
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}


+ 4
- 4
mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.cc View File

@@ -247,14 +247,14 @@ kernel::LiteKernel *CpuFullConnectionFp16KernelCreator(const std::vector<lite::T
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto *kernel = new (std::nothrow) FullconnectionFP16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@@ -266,13 +266,13 @@ kernel::LiteKernel *CpuFullConnectionFp16KernelCreator(const std::vector<lite::T
delete kernel;
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}


+ 4
- 4
mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.cc View File

@@ -260,14 +260,14 @@ kernel::LiteKernel *CpuMatmulFp16KernelCreator(const std::vector<lite::Tensor *>
return nullptr;
}
weight_tensor->set_data_type(kNumberTypeFloat32);
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto *kernel = new (std::nothrow) MatmulFP16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@@ -279,13 +279,13 @@ kernel::LiteKernel *CpuMatmulFp16KernelCreator(const std::vector<lite::Tensor *>
delete kernel;
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}


+ 7
- 7
mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc View File

@@ -225,8 +225,8 @@ kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor
new_inputs.emplace_back(in_tensor);

// nwe weight
auto filter_tensor = new (std::nothrow)
lite::Tensor(inputs.at(kWeightIndex)->data_type(), filter_shape, Format_NHWC, lite::Tensor::Category::CONST);
auto filter_tensor = new (std::nothrow) lite::Tensor(inputs.at(kWeightIndex)->data_type(), filter_shape,
Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
filter_tensor->MallocData();
int copy_length = kernel_h * kernel_w * new_in_channel * new_out_channel;
memcpy(filter_tensor->data_c(), origin_weight + i * copy_length, copy_length * sizeof(float));
@@ -235,7 +235,7 @@ kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor
// if has bias, set new bias
if (has_bias) {
auto bias_tensor = new (std::nothrow)
lite::Tensor(inputs.at(kBiasIndex)->data_type(), bias_shape, Format_NHWC, lite::Tensor::Category::CONST);
lite::Tensor(inputs.at(kBiasIndex)->data_type(), bias_shape, Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
bias_tensor->MallocData();
memcpy(bias_tensor->data_c(), origin_bias + i * new_out_channel, new_out_channel * sizeof(float));
new_inputs.emplace_back(bias_tensor);
@@ -293,7 +293,7 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> &
free(op_parameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}

kernel::LiteKernel *kernel;
@@ -307,7 +307,7 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> &
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(op_parameter);
return nullptr;
@@ -319,14 +319,14 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> &
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}

if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}

return kernel;


+ 4
- 4
mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise.cc View File

@@ -132,7 +132,7 @@ kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::Tensor *>
free(opParameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}

auto conv_param = reinterpret_cast<ConvParameter *>(opParameter);
@@ -146,7 +146,7 @@ kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::Tensor *>
MS_LOG(ERROR) << "kernel is nullptr.";
if (weight_tensor->data_type() == kNumberTypeInt8 || weight_tensor->data_type() == kNumberTypeInt16) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@@ -158,14 +158,14 @@ kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::Tensor *>
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (weight_tensor->data_type() == kNumberTypeInt8 || weight_tensor->data_type() == kNumberTypeInt16) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}

if (weight_tensor->data_type() == kNumberTypeInt8 || weight_tensor->data_type() == kNumberTypeInt16) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}

return kernel;


+ 4
- 4
mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.cc View File

@@ -243,7 +243,7 @@ kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector<lite::Tensor *>
free(opParameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}

kernel::LiteKernel *kernel;
@@ -259,7 +259,7 @@ kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector<lite::Tensor *>
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@@ -271,14 +271,14 @@ kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector<lite::Tensor *>
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}

if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}

return kernel;


+ 4
- 4
mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise.cc View File

@@ -205,7 +205,7 @@ kernel::LiteKernel *CpuDeconvDwFp32KernelCreator(const std::vector<lite::Tensor
free(opParameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}
auto kernel =
new (std::nothrow) kernel::DeconvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx, primitive);
@@ -213,7 +213,7 @@ kernel::LiteKernel *CpuDeconvDwFp32KernelCreator(const std::vector<lite::Tensor
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@@ -225,13 +225,13 @@ kernel::LiteKernel *CpuDeconvDwFp32KernelCreator(const std::vector<lite::Tensor
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return kernel;
}


+ 4
- 4
mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.cc View File

@@ -217,7 +217,7 @@ kernel::LiteKernel *CpuMatmulInt8KernelCreator(const std::vector<lite::Tensor *>
free(opParameter);
return nullptr;
}
weight_tensor->SetData(dequant_weight);
weight_tensor->set_data(dequant_weight);
}

auto input_tensor = inputs.at(kInputIndex);
@@ -230,7 +230,7 @@ kernel::LiteKernel *CpuMatmulInt8KernelCreator(const std::vector<lite::Tensor *>
MS_LOG(ERROR) << "kernel is nullptr.";
if (is_const_quant_weight) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
free(opParameter);
return nullptr;
@@ -242,14 +242,14 @@ kernel::LiteKernel *CpuMatmulInt8KernelCreator(const std::vector<lite::Tensor *>
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (is_const_quant_weight) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}
return nullptr;
}

if (is_const_quant_weight) {
weight_tensor->FreeData();
weight_tensor->SetData(restore_data);
weight_tensor->set_data(restore_data);
}

return kernel;


+ 1
- 1
mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc View File

@@ -71,7 +71,7 @@ int ArithmeticOpenCLKernel::InitBuffer() {
for (auto in_tensor_ : in_tensors_) {
auto nhwc_shape = GetNHWCShape(in_tensor_->shape());
inputs_nhwc_shapes_.push_back(nhwc_shape);
if (in_tensor_->category() != lite::Tensor::Category::CONST || in_tensor_->data_c() == nullptr) {
if (!in_tensor_->IsConst()) {
inputs_weight_ptrs_.push_back(nullptr);
} else {
auto allocator = ocl_runtime_->GetAllocator();


+ 1
- 1
mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc View File

@@ -63,7 +63,7 @@ int ScaleOpenCLKernel::InitBuffer() {
if (!element_flag_) {
return RET_OK;
}
if (in_tensors_[1]->category() == lite::Tensor::Category::CONST && in_tensors_[1]->data_c() != nullptr) {
if (in_tensors_[1]->IsConst()) {
auto allocator = ocl_runtime_->GetAllocator();
std::vector<size_t> img_size;
GetImageSize(0, &img_size);


+ 1
- 1
mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.cc View File

@@ -209,7 +209,7 @@ int SubGraphOpenCLKernel::MallocTensorWithReuse() {
std::vector<size_t> img_size;
op_kernel->GetImageSize(i, &img_size);
auto data_ptr = allocator_->Malloc(output->Size(), img_size);
output->SetData(data_ptr);
output->set_data(data_ptr);
} else {
output->MallocData(allocator_);
}


+ 1
- 1
mindspore/lite/src/runtime/opencl/opencl_executor.cc View File

@@ -46,7 +46,7 @@ int OpenCLExecutor::Run(std::vector<Tensor *> &inputs, std::vector<Tensor *> &ou
std::vector<size_t> img_size;
op_kernel->GetImageSize(i, &img_size);
auto data_ptr = allocator_->Malloc(output->Size(), img_size);
output->SetData(data_ptr);
output->set_data(data_ptr);
} else {
output->MallocData(allocator_);
}


+ 1
- 1
mindspore/lite/src/scheduler.cc View File

@@ -315,7 +315,7 @@ void Scheduler::SetKernelTensorDataType(kernel::LiteKernel *kernel) {
}
} else if (kernel->desc().data_type == kNumberTypeFloat32) {
for (auto tensor : kernel->in_tensors()) {
if (tensor->category() != Tensor::Category::CONST && tensor->data_type() == kNumberTypeFloat16) {
if (!tensor->IsConst() && tensor->data_type() == kNumberTypeFloat16) {
tensor->set_data_type(kNumberTypeFloat32);
}
}


+ 47
- 14
mindspore/lite/src/tensor.cc View File

@@ -54,7 +54,7 @@ int Tensor::CopyTensorData(const Tensor &srcTensor) {
}
}
memcpy(this->data_, srcTensor.data_, data_size);
return 0;
return RET_OK;
}

int Tensor::CopyTensor(const Tensor &srcTensor, bool copyData) {
@@ -69,7 +69,7 @@ int Tensor::CopyTensor(const Tensor &srcTensor, bool copyData) {
return RET_ERROR;
}
}
return 0;
return RET_OK;
}

Tensor::~Tensor() {
@@ -102,7 +102,7 @@ bool Tensor::operator==(const Tensor &tensor) {
int32_t Tensor::Batch() const {
if (this->shape_.size() != 4 && this->shape_.size() != 2) {
MS_LOG(ERROR) << "Unsupported tensor shape: " << this->shape().size();
return -1;
return RET_ERROR;
}
switch (this->format_) {
case schema::Format::Format_NHWC:
@@ -123,14 +123,14 @@ int32_t Tensor::Batch() const {
return this->shape_[1];
default:
MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(this->format_);
return -1;
return RET_ERROR;
}
}

int32_t Tensor::Channel() const {
if (this->shape_.size() != 4 && this->shape_.size() != 2) {
MS_LOG(ERROR) << "Unsupported tensor shape: " << this->shape().size();
return -1;
return RET_ERROR;
}
switch (this->format_) {
case schema::Format::Format_NCHW:
@@ -150,14 +150,14 @@ int32_t Tensor::Channel() const {
case schema::Format::Format_CHWK:
return this->shape_[0];
default:
return -1;
return RET_ERROR;
}
}

int32_t Tensor::Height() const {
if (this->shape_.size() != 4 && this->shape_.size() != 2) {
MS_LOG(ERROR) << "Unsupported tensor shape: " << this->shape().size();
return -1;
return RET_ERROR;
}
switch (this->format_) {
case schema::Format::Format_NCHW:
@@ -177,7 +177,7 @@ int32_t Tensor::Height() const {
return this->shape_[0];
default:
MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(this->format_);
return -1;
return RET_ERROR;
}
}

@@ -203,11 +203,28 @@ int32_t Tensor::Width() const {
case schema::Format::Format_HW4:
return this->shape_[1];
default:
return -1;
return RET_ERROR;
}
}

size_t Tensor::Size() const {
size_t size = DataTypeSize(this->data_type_);
size *= (format_ == schema::Format::Format_NC4HW4 || format_ == schema::Format::Format_NHWC4) ? ElementsC4Num()
: ElementsNum();
return size;
}

int Tensor::ElementsNum() const {
if (this->category_ == CONST_SCALAR) {
return 1;
}
return std::accumulate(shape_.begin(), shape_.end(), 1LL, std::multiplies<int>());
}

int32_t Tensor::ElementsC4Num() const {
if (this->category_ == CONST_SCALAR) {
return 1;
}
int32_t result = 0;
if (this->shape_.size() == 4) {
result = Batch() * Height() * Width() * ((Channel() + 3) / 4 * 4);
@@ -217,6 +234,16 @@ int32_t Tensor::ElementsC4Num() const {
return result;
}

int Tensor::DimensionSize(size_t index) const {
int dim_size = -1;
if (index < shape_.size()) {
dim_size = shape_[index];
} else {
MS_LOG(ERROR) << "Dimension index is wrong: " << index;
}
return dim_size;
}

std::string Tensor::ToString() const {
std::ostringstream oss;
oss << "schema::Format: " << EnumNameFormat(this->format_);
@@ -287,7 +314,7 @@ std::string Tensor::ToString() const {

int Tensor::MallocData(mindspore::lite::Allocator *allocator) {
if (nullptr != this->data_) {
return 0;
return RET_OK;
}
if (allocator != nullptr) {
allocator_ = allocator;
@@ -299,15 +326,15 @@ int Tensor::MallocData(mindspore::lite::Allocator *allocator) {
}
if (nullptr == this->data_) {
MS_LOG(ERROR) << "Malloc tensor data failed, size=" << this->Size();
return -1;
return RET_ERROR;
}

return 0;
return RET_OK;
}

int Tensor::FreeData() {
if (nullptr == this->data_) {
return 0;
return RET_OK;
}
if (nullptr == allocator_) {
free(this->data_);
@@ -316,7 +343,7 @@ int Tensor::FreeData() {
allocator_->Free(this->data_);
this->data_ = nullptr;
}
return 0;
return RET_OK;
}

void *Tensor::MutableData() {
@@ -330,6 +357,12 @@ void *Tensor::MutableData() {
return this->data_;
}

bool Tensor::IsConst() {
return (this->category_ == CONST_TENSOR || this->category_ == CONST_SCALAR) && this->data_ != nullptr;
}

bool Tensor::IsScalar() { return this->category_ == CONST_SCALAR && this->data_ != nullptr; }

void Tensor::AddQuantParam(const QuantArg &quant_arg) { this->quant_params_.push_back(quant_arg); }

std::vector<QuantArg> Tensor::GetQuantParams() const { return this->quant_params_; }


+ 65
- 75
mindspore/lite/src/tensor.h View File

@@ -42,8 +42,9 @@ struct QuantArg {
class Tensor : public mindspore::tensor::MSTensor {
public:
enum Category {
CONST, // weight tensor
VAR // activation tensor
CONST_TENSOR, // weight tensor
CONST_SCALAR, // weight scalar
VAR // activation tensor
};
Tensor() = default;

@@ -70,19 +71,9 @@ class Tensor : public mindspore::tensor::MSTensor {

void set_shape(const std::vector<int> &shape) { shape_ = shape; }

int DimensionSize(size_t index) const override {
int dim_size = -1;
if (index < shape_.size()) {
dim_size = shape_[index];
} else {
MS_LOG(ERROR) << "Dimension index is wrong: " << index;
}
return dim_size;
}
int DimensionSize(size_t index) const override;

int ElementsNum() const override {
return std::accumulate(shape_.begin(), shape_.end(), 1LL, std::multiplies<int>());
}
int ElementsNum() const override;

int32_t Batch() const;

@@ -94,58 +85,7 @@ class Tensor : public mindspore::tensor::MSTensor {

int32_t ElementsC4Num() const;

size_t Size() const override {
size_t size = 0;
switch (this->data_type_) {
case kNumberTypeFloat64:
size = sizeof(double);
break;
case kNumberTypeFloat:
case kNumberTypeFloat32:
size = sizeof(float);
break;
case kNumberTypeInt8:
size = sizeof(int8_t);
break;
case kNumberTypeUInt8:
size = sizeof(uint8_t);
break;
case kNumberTypeFloat16:
size = sizeof(int16_t);
break;
case kNumberTypeInt16:
size = sizeof(int16_t);
break;
case kNumberTypeInt32:
size = sizeof(int32_t);
break;
case kNumberTypeInt64:
size = sizeof(int64_t);
break;
case kNumberTypeUInt16:
size = sizeof(uint16_t);
break;
case kNumberTypeUInt32:
size = sizeof(uint32_t);
break;
case kNumberTypeUInt64:
size = sizeof(uint64_t);
break;
case kNumberTypeBool:
size = sizeof(bool);
break;
case kObjectTypeString:
size = sizeof(char);
break;
default:
MS_LOG(ERROR) << "Not support the type: " << this->data_type_;
return 0;
}
size *= (format_ == schema::Format::Format_NC4HW4 || format_ == schema::Format::Format_NHWC4) ? ElementsC4Num()
: ElementsNum();

return size;
}
size_t Size() const override;

void set_allocator(mindspore::lite::Allocator *allocator) { allocator_ = allocator; }

@@ -157,7 +97,7 @@ class Tensor : public mindspore::tensor::MSTensor {

void *data_c() const { return data_; }

void SetData(void *data) { this->data_ = data; }
void set_data(void *data) { this->data_ = data; }

Category category() { return this->category_; }

@@ -165,11 +105,11 @@ class Tensor : public mindspore::tensor::MSTensor {

schema::Format GetFormat() { return this->format_; }

size_t RefCount() { return this->refCount; }
size_t ref_count() { return this->ref_count_; }

void SetRefCount(size_t refCount) { this->refCount = refCount; }
void set_ref_count(size_t ref_count) { this->ref_count_ = ref_count; }

void decRefCount() { this->refCount--; }
void DecRefCount() { this->ref_count_--; }

std::string ToString() const;

@@ -177,6 +117,10 @@ class Tensor : public mindspore::tensor::MSTensor {

std::vector<QuantArg> GetQuantParams() const;

bool IsConst();

bool IsScalar();

void Prepare() {
if (allocator_ != nullptr) {
data_ = allocator_->Prepare(data_);
@@ -190,17 +134,63 @@ class Tensor : public mindspore::tensor::MSTensor {
std::vector<int> shape_;
schema::Format format_;
Category category_;
size_t refCount = 0;
size_t ref_count_ = 0;
std::vector<QuantArg> quant_params_;
mindspore::lite::Allocator *allocator_ = nullptr;
};

inline Tensor::Category TensorCategory(const schema::Tensor *tensor) {
return (tensor->nodeType() == schema::NodeType::NodeType_ValueNode) ? Tensor::Category::CONST : Tensor::Category::VAR;
inline size_t DataTypeSize(const TypeId type) {
switch (type) {
case kNumberTypeFloat64:
return sizeof(double);
case kNumberTypeFloat:
case kNumberTypeFloat32:
return sizeof(float);
case kNumberTypeInt8:
return sizeof(int8_t);
case kNumberTypeUInt8:
return sizeof(uint8_t);
case kNumberTypeFloat16:
case kNumberTypeInt16:
return sizeof(int16_t);
case kNumberTypeInt32:
return sizeof(int32_t);
case kNumberTypeInt64:
return sizeof(int64_t);
case kNumberTypeUInt16:
return sizeof(uint16_t);
case kNumberTypeUInt32:
return sizeof(uint32_t);
case kNumberTypeUInt64:
return sizeof(uint64_t);
case kNumberTypeBool:
return sizeof(bool);
case kObjectTypeString:
return sizeof(char);
default:
MS_LOG(ERROR) << "Not support the type: " << type;
return 0;
}
}

inline Tensor::Category TensorCategory(const schema::NodeType node_type, const size_t shape_num, const TypeId data_type,
const size_t data_size) {
return (node_type == schema::NodeType::NodeType_ValueNode)
? (shape_num == 0 && data_size == DataTypeSize(data_type) ? Tensor::Category::CONST_SCALAR
: Tensor::Category::CONST_TENSOR)
: Tensor::Category::VAR;
}
inline Tensor::Category TensorCategory(const schema::NodeType type) {
return (type == schema::NodeType::NodeType_ValueNode) ? Tensor::Category::CONST : Tensor::Category::VAR;

inline Tensor::Category TensorCategory(const schema::Tensor *tensor) {
if (tensor == nullptr) {
MS_LOG(ERROR) << "tensor is nullptr";
return Tensor::VAR;
}
auto shape_num = tensor->dims() == nullptr ? 0 : tensor->dims()->size();
auto data_size = tensor->data() == nullptr ? 0 : tensor->data()->size();
return TensorCategory(tensor->nodeType(), shape_num, TypeId(tensor->dataType()), data_size);
}

std::vector<tensor::MSTensor *> TensorVectorCast(const std::vector<Tensor *> &src);
} // namespace lite
} // namespace mindspore


+ 8
- 8
mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc View File

@@ -49,8 +49,8 @@ TEST_F(TestStridedSlice, StridedSlice) {
lite::Tensor out_tensor(kNumberTypeFloat32, {1, 1, 2});
float input_data[] = {0.2390374, 0.92039955, 0.05051243, 0.49574447, 0.8355223, 0.02647042, 0.08811307, 0.4566604};
float output_data[2] = {0};
in_tensor.SetData(input_data);
out_tensor.SetData(output_data);
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
std::vector<lite::Tensor *> inputs = {&in_tensor};
std::vector<lite::Tensor *> outputs = {&out_tensor};

@@ -73,8 +73,8 @@ TEST_F(TestStridedSlice, StridedSlice) {
float expect[2] = {0.2390374, 0.05051243};
CompareOutputData(output_data, expect, 2, 0.000001);

in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}

TEST_F(TestStridedSlice, StridedSliceInt8) {
@@ -82,8 +82,8 @@ TEST_F(TestStridedSlice, StridedSliceInt8) {
lite::Tensor out_tensor(kNumberTypeInt8, {2, 3, 4});
int8_t input_data[] = {-12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
int8_t output_data[4] = {0};
in_tensor.SetData(input_data);
out_tensor.SetData(output_data);
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
std::vector<lite::Tensor *> inputs = {&in_tensor};
std::vector<lite::Tensor *> outputs = {&out_tensor};

@@ -121,7 +121,7 @@ TEST_F(TestStridedSlice, StridedSliceInt8) {
EXPECT_EQ(output_data[i], expect[i]);
}

in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/reduce_fp16_tests.cc View File

@@ -43,8 +43,8 @@ class TestReduceFp16 : public mindspore::CommonTest {
};

void TestReduceFp16::TearDown() {
in_tensor_.SetData(nullptr);
out_tensor_.SetData(nullptr);
in_tensor_.set_data(nullptr);
out_tensor_.set_data(nullptr);
}

void TestReduceFp16::Prepare(const std::vector<int> &input_shape, const std::vector<int> &output_shape,
@@ -54,8 +54,8 @@ void TestReduceFp16::Prepare(const std::vector<int> &input_shape, const std::vec
in_tensor_.set_shape(input_shape);
out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_shape(output_shape);
in_tensor_.SetData(input_data);
out_tensor_.SetData(output_data);
in_tensor_.set_data(input_data);
out_tensor_.set_data(output_data);

bool keep_axis = false;



+ 12
- 12
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc View File

@@ -98,7 +98,7 @@ TEST_F(TestActivationFp32, HSwishFp32) {

lite::Tensor input0_tensor;
inputs_tensor.push_back(&input0_tensor);
input0_tensor.SetData(input.data());
input0_tensor.set_data(input.data());
input0_tensor.set_shape(in_shape);

std::vector<float> output(8);
@@ -106,7 +106,7 @@ TEST_F(TestActivationFp32, HSwishFp32) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Activation};
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
@@ -123,8 +123,8 @@ TEST_F(TestActivationFp32, HSwishFp32) {
std::vector<float> expect_output = {-0, -0.33333334, -0.33333334, 0, 0.6666667, 5, 6, 7};
CompareOutputData(output.data(), expect_output.data(), 8, 0.00001);

input0_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

TEST_F(TestActivationFp32, HardTanh1) {
@@ -142,7 +142,7 @@ TEST_F(TestActivationFp32, HardTanh1) {

lite::Tensor input0_tensor;
inputs_tensor.push_back(&input0_tensor);
input0_tensor.SetData(input.data());
input0_tensor.set_data(input.data());
input0_tensor.set_shape(in_shape);

std::vector<float> output(8);
@@ -150,7 +150,7 @@ TEST_F(TestActivationFp32, HardTanh1) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Activation};
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
@@ -167,8 +167,8 @@ TEST_F(TestActivationFp32, HardTanh1) {
std::vector<float> expect_output = {-1.0, -1.0, -0.5, 0.0, 0.5, 1.0, 1.0, 1.0};
CompareOutputData(output.data(), expect_output.data(), 8, 0.00001);

input0_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

TEST_F(TestActivationFp32, HardTanh2) {
@@ -186,7 +186,7 @@ TEST_F(TestActivationFp32, HardTanh2) {

lite::Tensor input0_tensor;
inputs_tensor.push_back(&input0_tensor);
input0_tensor.SetData(input.data());
input0_tensor.set_data(input.data());
input0_tensor.set_shape(in_shape);

std::vector<float> output(8);
@@ -194,7 +194,7 @@ TEST_F(TestActivationFp32, HardTanh2) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Activation};
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
@@ -211,8 +211,8 @@ TEST_F(TestActivationFp32, HardTanh2) {
std::vector<float> expect_output = {-2.0, -2.0, -1.0, 0.0, 1.0, 2.0, 2.0, 2.0};
CompareOutputData(output.data(), expect_output.data(), 8, 0.00001);

input0_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

} // namespace mindspore

+ 54
- 54
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/arithmetic_fp32_tests.cc View File

@@ -67,11 +67,11 @@ void TestArithmeticTestFp32::PrepareInt(const std::vector<int> &input0_shape, co
}

in_tensor_0_.set_data_type(kNumberTypeInt);
in_tensor_0_.SetData(input0_data);
in_tensor_0_.set_data(input0_data);
in_tensor_0_.set_shape(input0_shape);
in_tensor_1_.SetData(input1_data);
in_tensor_1_.set_data(input1_data);
in_tensor_1_.set_shape(input1_shape);
out_tensor_.SetData(output_data);
out_tensor_.set_data(output_data);
out_tensor_.set_shape(output_shape);

auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc_);
@@ -83,9 +83,9 @@ void TestArithmeticTestFp32::PrepareInt(const std::vector<int> &input0_shape, co
}

void TestArithmeticTestFp32::TearDown() {
in_tensor_0_.SetData(nullptr);
in_tensor_1_.SetData(nullptr);
out_tensor_.SetData(nullptr);
in_tensor_0_.set_data(nullptr);
in_tensor_1_.set_data(nullptr);
out_tensor_.set_data(nullptr);
}

TEST_F(TestArithmeticTestFp32, AddTest) {
@@ -548,8 +548,8 @@ TEST_F(TestArithmeticTestFp32, MulFp32) {
lite::Tensor input0_tensor;
lite::Tensor input1_tensor;
input0_tensor.set_data_type(kNumberTypeFloat32);
input0_tensor.SetData(input0.data());
input1_tensor.SetData(input1.data());
input0_tensor.set_data(input0.data());
input1_tensor.set_data(input1.data());
input0_tensor.set_shape(input0_shape);
input1_tensor.set_shape(input1_shape);
inputs_tensor.push_back(&input0_tensor);
@@ -560,7 +560,7 @@ TEST_F(TestArithmeticTestFp32, MulFp32) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.set_shape(output_shape);

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Eltwise};
@@ -583,9 +583,9 @@ TEST_F(TestArithmeticTestFp32, MulFp32) {

CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

TEST_F(TestArithmeticTestFp32, MulReluFp32) {
@@ -622,8 +622,8 @@ TEST_F(TestArithmeticTestFp32, MulReluFp32) {
lite::Tensor input0_tensor;
lite::Tensor input1_tensor;
input0_tensor.set_data_type(kNumberTypeFloat32);
input0_tensor.SetData(input0.data());
input1_tensor.SetData(input1.data());
input0_tensor.set_data(input0.data());
input1_tensor.set_data(input1.data());
input0_tensor.set_shape(input0_shape);
input1_tensor.set_shape(input1_shape);
inputs_tensor.push_back(&input0_tensor);
@@ -634,7 +634,7 @@ TEST_F(TestArithmeticTestFp32, MulReluFp32) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.set_shape(output_shape);

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Eltwise};
@@ -657,9 +657,9 @@ TEST_F(TestArithmeticTestFp32, MulReluFp32) {

CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

TEST_F(TestArithmeticTestFp32, MulRelu6Fp32) {
@@ -696,8 +696,8 @@ TEST_F(TestArithmeticTestFp32, MulRelu6Fp32) {
lite::Tensor input0_tensor;
lite::Tensor input1_tensor;
input0_tensor.set_data_type(kNumberTypeFloat32);
input0_tensor.SetData(input0.data());
input1_tensor.SetData(input1.data());
input0_tensor.set_data(input0.data());
input1_tensor.set_data(input1.data());
input0_tensor.set_shape(input0_shape);
input1_tensor.set_shape(input1_shape);
inputs_tensor.push_back(&input0_tensor);
@@ -708,7 +708,7 @@ TEST_F(TestArithmeticTestFp32, MulRelu6Fp32) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.set_shape(output_shape);

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Eltwise};
@@ -730,9 +730,9 @@ TEST_F(TestArithmeticTestFp32, MulRelu6Fp32) {

CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

TEST_F(TestArithmeticTestFp32, MulInt0) {
@@ -1021,8 +1021,8 @@ TEST_F(TestArithmeticTestFp32, AddReluFp32) {
lite::Tensor input0_tensor;
lite::Tensor input1_tensor;
input0_tensor.set_data_type(kNumberTypeFloat32);
input0_tensor.SetData(input0.data());
input1_tensor.SetData(input1.data());
input0_tensor.set_data(input0.data());
input1_tensor.set_data(input1.data());
input0_tensor.set_shape(input0_shape);
input1_tensor.set_shape(input1_shape);
inputs_tensor.push_back(&input0_tensor);
@@ -1033,7 +1033,7 @@ TEST_F(TestArithmeticTestFp32, AddReluFp32) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.set_shape(output_shape);

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Eltwise};
@@ -1055,9 +1055,9 @@ TEST_F(TestArithmeticTestFp32, AddReluFp32) {

CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

TEST_F(TestArithmeticTestFp32, AddRelu6Fp32) {
@@ -1094,8 +1094,8 @@ TEST_F(TestArithmeticTestFp32, AddRelu6Fp32) {
lite::Tensor input0_tensor;
lite::Tensor input1_tensor;
input0_tensor.set_data_type(kNumberTypeFloat32);
input0_tensor.SetData(input0.data());
input1_tensor.SetData(input1.data());
input0_tensor.set_data(input0.data());
input1_tensor.set_data(input1.data());
input0_tensor.set_shape(input0_shape);
input1_tensor.set_shape(input1_shape);
inputs_tensor.push_back(&input0_tensor);
@@ -1106,7 +1106,7 @@ TEST_F(TestArithmeticTestFp32, AddRelu6Fp32) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.set_shape(output_shape);

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Eltwise};
@@ -1127,9 +1127,9 @@ TEST_F(TestArithmeticTestFp32, AddRelu6Fp32) {

CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

TEST_F(TestArithmeticTestFp32, DivReluFp32) {
@@ -1166,8 +1166,8 @@ TEST_F(TestArithmeticTestFp32, DivReluFp32) {
lite::Tensor input0_tensor;
lite::Tensor input1_tensor;
input0_tensor.set_data_type(kNumberTypeFloat32);
input0_tensor.SetData(input0.data());
input1_tensor.SetData(input1.data());
input0_tensor.set_data(input0.data());
input1_tensor.set_data(input1.data());
input0_tensor.set_shape(input0_shape);
input1_tensor.set_shape(input1_shape);
inputs_tensor.push_back(&input0_tensor);
@@ -1178,7 +1178,7 @@ TEST_F(TestArithmeticTestFp32, DivReluFp32) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.set_shape(output_shape);

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Eltwise};
@@ -1201,9 +1201,9 @@ TEST_F(TestArithmeticTestFp32, DivReluFp32) {

CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

TEST_F(TestArithmeticTestFp32, DivRelu6Fp32) {
@@ -1240,8 +1240,8 @@ TEST_F(TestArithmeticTestFp32, DivRelu6Fp32) {
lite::Tensor input0_tensor;
lite::Tensor input1_tensor;
input0_tensor.set_data_type(kNumberTypeFloat32);
input0_tensor.SetData(input0.data());
input1_tensor.SetData(input1.data());
input0_tensor.set_data(input0.data());
input1_tensor.set_data(input1.data());
input0_tensor.set_shape(input0_shape);
input1_tensor.set_shape(input1_shape);
inputs_tensor.push_back(&input0_tensor);
@@ -1252,7 +1252,7 @@ TEST_F(TestArithmeticTestFp32, DivRelu6Fp32) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.set_shape(output_shape);

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Eltwise};
@@ -1273,9 +1273,9 @@ TEST_F(TestArithmeticTestFp32, DivRelu6Fp32) {

CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

TEST_F(TestArithmeticTestFp32, EqualFp32) {
@@ -1311,8 +1311,8 @@ TEST_F(TestArithmeticTestFp32, EqualFp32) {
lite::Tensor input0_tensor;
lite::Tensor input1_tensor;
input0_tensor.set_data_type(kNumberTypeFloat32);
input0_tensor.SetData(input0.data());
input1_tensor.SetData(input1.data());
input0_tensor.set_data(input0.data());
input1_tensor.set_data(input1.data());
input0_tensor.set_shape(input0_shape);
input1_tensor.set_shape(input1_shape);
inputs_tensor.push_back(&input0_tensor);
@@ -1323,7 +1323,7 @@ TEST_F(TestArithmeticTestFp32, EqualFp32) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.set_shape(output_shape);

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Eltwise};
@@ -1343,8 +1343,8 @@ TEST_F(TestArithmeticTestFp32, EqualFp32) {

CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 28
- 28
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc View File

@@ -39,9 +39,9 @@ TEST_F(TestBatchnormFp32, BNTest) {
lite::Tensor input0_tensor(kNumberTypeFloat32, {1, 2, 2, 3});
lite::Tensor input1_tensor(kNumberTypeFloat32, {3});
lite::Tensor input2_tensor(kNumberTypeFloat32, {3});
input0_tensor.SetData(in_data.data());
input1_tensor.SetData(in_data1.data());
input2_tensor.SetData(in_data2.data());
input0_tensor.set_data(in_data.data());
input1_tensor.set_data(in_data1.data());
input2_tensor.set_data(in_data2.data());
std::vector<lite::Tensor *> inputs_tensor = {&input0_tensor, &input1_tensor, &input2_tensor};

std::vector<float> output(12);
@@ -49,7 +49,7 @@ TEST_F(TestBatchnormFp32, BNTest) {
-3.5422924, -14.005781, -2.3525476, -6.7113695, -16.396551, -1.4275324};

lite::Tensor output0_tensor(kNumberTypeFloat32, {1, 2, 2, 3});
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
std::vector<lite::Tensor *> outputs_tensor = {&output0_tensor};

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_BatchNorm};
@@ -71,10 +71,10 @@ TEST_F(TestBatchnormFp32, BNTest) {
std::cout << std::endl;
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
input2_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
input2_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

TEST_F(TestBatchnormFp32, FusedBNTest) {
@@ -94,11 +94,11 @@ TEST_F(TestBatchnormFp32, FusedBNTest) {
lite::Tensor input2(kNumberTypeFloat32, {3});
lite::Tensor input3(kNumberTypeFloat32, {3});
lite::Tensor input4(kNumberTypeFloat32, {3});
input0.SetData(in_data.data());
input1.SetData(scale.data());
input2.SetData(offset.data());
input3.SetData(mean.data());
input4.SetData(var.data());
input0.set_data(in_data.data());
input1.set_data(scale.data());
input2.set_data(offset.data());
input3.set_data(mean.data());
input4.set_data(var.data());
std::vector<lite::Tensor *> inputs_tensor = {&input0, &input1, &input2, &input3, &input4};

std::vector<float> output(12);
@@ -106,7 +106,7 @@ TEST_F(TestBatchnormFp32, FusedBNTest) {
5.1857452, 56.60399, -77.215096, -181.18402, 49.81066, -59.204563};

lite::Tensor output0(kNumberTypeFloat32, {1, 2, 2, 3});
output0.SetData(output.data());
output0.set_data(output.data());
std::vector<lite::Tensor *> outputs_tensor = {&output0};

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_FusedBatchNorm};
@@ -127,12 +127,12 @@ TEST_F(TestBatchnormFp32, FusedBNTest) {
std::cout << std::endl;
CompareOutputData(output.data(), corr_out.data(), output0.ElementsNum(), 0.001);

input0.SetData(nullptr);
input1.SetData(nullptr);
input2.SetData(nullptr);
input3.SetData(nullptr);
input4.SetData(nullptr);
output0.SetData(nullptr);
input0.set_data(nullptr);
input1.set_data(nullptr);
input2.set_data(nullptr);
input3.set_data(nullptr);
input4.set_data(nullptr);
output0.set_data(nullptr);
}

TEST_F(TestBatchnormFp32, easyTest) {
@@ -147,9 +147,9 @@ TEST_F(TestBatchnormFp32, easyTest) {
lite::Tensor input0(kNumberTypeFloat32, {1, 1, 6, 2});
lite::Tensor input1(kNumberTypeFloat32, {2});
lite::Tensor input2(kNumberTypeFloat32, {2});
input0.SetData(in_data.data());
input1.SetData(in_data1.data());
input2.SetData(in_data2.data());
input0.set_data(in_data.data());
input1.set_data(in_data1.data());
input2.set_data(in_data2.data());
std::vector<lite::Tensor *> inputs_tensor = {&input0, &input1, &input2};

std::vector<float> output(12);
@@ -157,7 +157,7 @@ TEST_F(TestBatchnormFp32, easyTest) {
-0.63498, -2.29971, -1.21223, -2.79965, -1.78949, -3.29959};

lite::Tensor output0(kNumberTypeFloat32, {1, 1, 6, 2});
output0.SetData(output.data());
output0.set_data(output.data());
std::vector<lite::Tensor *> outputs_tensor = {&output0};

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_BatchNorm};
@@ -178,10 +178,10 @@ TEST_F(TestBatchnormFp32, easyTest) {
std::cout << std::endl;
CompareOutputData(output.data(), corr_out.data(), output0.ElementsNum(), 0.001);

input0.SetData(nullptr);
input1.SetData(nullptr);
input2.SetData(nullptr);
output0.SetData(nullptr);
input0.set_data(nullptr);
input1.set_data(nullptr);
input2.set_data(nullptr);
output0.set_data(nullptr);
}

} // namespace mindspore

+ 2
- 2
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc View File

@@ -27,7 +27,7 @@ class TestConstantOfShapeFp32 : public mindspore::CommonTest {

int ConstantOfShapeTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr,
std::vector<int> a_shape) {
auto in_t = new lite::Tensor(kNumberTypeInt32, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto in_t = new lite::Tensor(kNumberTypeInt32, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);
@@ -36,7 +36,7 @@ int ConstantOfShapeTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<li
for (int i = 0; i < c_shape.size(); ++i) {
c_shape[i] = a_ptr[i];
}
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);



+ 10
- 16
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc View File

@@ -172,8 +172,7 @@ TEST_F(TestConv1x1Fp32, Conv1x1WeightTest1) {

int Conv1x1TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, float **correct) {
lite::Tensor *in_t = new lite::Tensor(kNumberTypeFloat, {1, 2, 3, 4}, schema::Format_NHWC,
lite::TensorCategory(static_cast<schema::NodeType>(1)));
lite::Tensor *in_t = new lite::Tensor(kNumberTypeFloat, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::VAR);
in_t->MallocData();
float in[] = {12.216284, 3.3466918, 15.327419, 5.234958, 0.804376, 9.952188, 14.727955, -8.080715,
13.71383, 8.055829, 6.5845337, -9.25232, -4.24519, 11.550042, 9.262012, 1.2780352,
@@ -181,23 +180,21 @@ int Conv1x1TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Ten
memcpy(in_t->MutableData(), in, sizeof(float) * 24);
inputs_->push_back(in_t);

lite::Tensor *weight_t = new lite::Tensor(kNumberTypeFloat, {3, 1, 1, 4}, schema::Format_NHWC,
lite::TensorCategory(static_cast<schema::NodeType>(1)));
lite::Tensor *weight_t =
new lite::Tensor(kNumberTypeFloat, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
weight_t->MallocData();
float weight[] = {-0.7308652, 0.5257509, -0.87825793, -1.123181, -1.2206168, 0.562695,
1.5382664, -0.5020635, 0.8591602, -0.26410004, 1.1262615, 0.073132955}; /* nhwc */
memcpy(weight_t->MutableData(), weight, sizeof(float) * 12);
inputs_->push_back(weight_t);

lite::Tensor *bias_t = new lite::Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC,
lite::TensorCategory(static_cast<schema::NodeType>(1)));
lite::Tensor *bias_t = new lite::Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
bias_t->MallocData();
float bias[] = {2, 2, 2};
memcpy(bias_t->MutableData(), bias, sizeof(float) * 3);
inputs_->push_back(bias_t);

lite::Tensor *out_t = new lite::Tensor(kNumberTypeFloat, {1, 2, 3, 3}, schema::Format_NHWC,
lite::TensorCategory(static_cast<schema::NodeType>(1)));
lite::Tensor *out_t = new lite::Tensor(kNumberTypeFloat, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::VAR);
out_t->MallocData();
outputs_->push_back(out_t);

@@ -239,32 +236,29 @@ TEST_F(TestConv1x1Fp32, Conv1x1Test1) {
int Conv1x1TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, float **correct) {
size_t buffer_size;
lite::Tensor *in_t = new lite::Tensor(kNumberTypeFloat, {1, 300, 300, 24}, schema::Format_NHWC,
lite::TensorCategory(static_cast<schema::NodeType>(1)));
lite::Tensor *in_t = new lite::Tensor(kNumberTypeFloat, {1, 300, 300, 24}, schema::Format_NHWC, lite::Tensor::VAR);
in_t->MallocData();
std::string input_path = "./conv/conv1x1fp32_input1_nhwc.bin";
auto in = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &buffer_size));
memcpy(in_t->MutableData(), in, buffer_size);
inputs_->push_back(in_t);

lite::Tensor *weight_t = new lite::Tensor(kNumberTypeFloat, {40, 1, 1, 24}, schema::Format_NHWC,
lite::TensorCategory(static_cast<schema::NodeType>(1)));
lite::Tensor *weight_t =
new lite::Tensor(kNumberTypeFloat, {40, 1, 1, 24}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
weight_t->MallocData();
std::string weight_path = "./conv/conv1x1fp32_weight1_nhwc.bin";
auto weight = reinterpret_cast<float *>(mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size));
memcpy(weight_t->MutableData(), weight, buffer_size);
inputs_->push_back(weight_t);

lite::Tensor *bias_t = new lite::Tensor(kNumberTypeFloat, {40}, schema::Format_NHWC,
lite::TensorCategory(static_cast<schema::NodeType>(1)));
lite::Tensor *bias_t = new lite::Tensor(kNumberTypeFloat, {40}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
bias_t->MallocData();
std::string bias_path = "./conv/conv1x1fp32_bias1_nhwc.bin";
auto bias = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size);
memcpy(bias_t->MutableData(), bias, buffer_size);
inputs_->push_back(bias_t);

lite::Tensor *out_t = new lite::Tensor(kNumberTypeFloat, {1, 300, 300, 40}, schema::Format_NHWC,
lite::TensorCategory(static_cast<schema::NodeType>(1)));
lite::Tensor *out_t = new lite::Tensor(kNumberTypeFloat, {1, 300, 300, 40}, schema::Format_NHWC, lite::Tensor::VAR);
out_t->MallocData();
outputs_->push_back(out_t);



+ 2
- 2
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc View File

@@ -257,12 +257,12 @@ TEST_F(CropTestFp32, CropTest11) {
std::vector<int> out_shape = {1, 4, 2, 2};
std::vector<lite::Tensor *> inputs;
std::vector<lite::Tensor *> outputs;
auto in_t = new lite::Tensor(kNumberTypeFloat, in_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto in_t = new lite::Tensor(kNumberTypeFloat, in_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), input, sizeof(float) * in_t->ElementsNum());
inputs.push_back(in_t);

auto out_t = new lite::Tensor(kNumberTypeFloat, out_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto out_t = new lite::Tensor(kNumberTypeFloat, out_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs.push_back(out_t);



+ 18
- 16
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc View File

@@ -324,7 +324,7 @@ int DeConvTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
ConvParameter *conv_param, float **correct) {
std::vector<int> in_dims_nhwc = {1, 5, 7, 2};
lite::Tensor *in_t =
new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST);
new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
float in_nchw[] = {
0.39451003, 0.15045597, 0.5367726, 0.62690735, 0.113554195, 0.5402554, 0.5522764, 0.044319753, 0.25721782,
@@ -340,7 +340,7 @@ int DeConvTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens

std::vector<int> weight_dims_nhwc = {2, 3, 3, 6};
lite::Tensor *weight_t =
new lite::Tensor(kNumberTypeFloat, weight_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST);
new lite::Tensor(kNumberTypeFloat, weight_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
float weight_nchw[] = {
0.061163727, -0.06261389, 0.07708351, -0.019354159, -0.3859104, -0.082844816, -0.21268463, -0.15746808,
@@ -361,7 +361,8 @@ int DeConvTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
weight_t->Channel());
inputs_->push_back(weight_t);

lite::Tensor *bias_t = new lite::Tensor(kNumberTypeFloat, {6}, schema::Format_NHWC, lite::Tensor::Category::CONST);
lite::Tensor *bias_t =
new lite::Tensor(kNumberTypeFloat, {6}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
bias_t->MallocData();
float bias[] = {-0.19064677, -0.0034778118, 0.63741624, -1.0311537, -1.0288948, 0.71384084};
memcpy(bias_t->MutableData(), bias, sizeof(float) * 6);
@@ -369,7 +370,7 @@ int DeConvTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens

std::vector<int> output_nhwc_dims = {1, 9, 13, 6};
lite::Tensor *out_t =
new lite::Tensor(kNumberTypeFloat, output_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST);
new lite::Tensor(kNumberTypeFloat, output_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);

@@ -497,7 +498,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest1) {

int DeConvTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, float **correct) {
auto *in_t = new lite::Tensor(kNumberTypeFloat, {1, 4, 2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto *in_t = new lite::Tensor(kNumberTypeFloat, {1, 4, 2, 3}, schema::Format_NHWC, lite::Tensor::Category::VAR);
in_t->MallocData();
float in[] = {7.7566547, 19.250782, 17.923292, 13.584222, 3.3293908, 9.734102, 18.83455, -1.5142503,
-0.29382008, 18.686155, 0.087307654, 4.2010098, -2.2539594, 4.1795673, 13.142356, -3.5939367,
@@ -505,7 +506,8 @@ int DeConvTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
memcpy(in_t->MutableData(), in, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);

auto *weight_t = new lite::Tensor(kNumberTypeFloat, {3, 3, 3, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto *weight_t =
new lite::Tensor(kNumberTypeFloat, {3, 3, 3, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
float weight[] = {-0.39557076, 0.15087655, 0.35216075, -0.20893791, 0.28683448, 0.08006268, 0.9830812,
0.27212173, 0.5171944, -0.0014505, 0.78694165, 0.25425306, 0.16605458, -0.06127124,
@@ -519,7 +521,7 @@ int DeConvTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
inputs_->push_back(weight_t);

std::vector<int> out_nhwc_dims = {1, 7, 3, 2};
auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::VAR);
out_t->MallocData();
outputs_->push_back(out_t);

@@ -564,7 +566,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest2) {
int DeConvTestInit3(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, float **correct) {
std::vector<int> in_dims_nhwc = {1, 3, 3, 2};
auto *in_t = new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto *in_t = new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::VAR);
in_t->MallocData();
float in_nchw[] = {0.10411751, 0.24034509, 0.71456534, 0.75286126, 0.9778457, 0.21043599,
0.26498786, 0.6701024, 0.9744634, 0.49075702, 0.03877404, 0.48646277,
@@ -574,8 +576,8 @@ int DeConvTestInit3(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
inputs_->push_back(in_t);

std::vector<int> w_dims_nhwc = {2, 2, 2, 2};
auto *weight_t = new lite::Tensor(kNumberTypeFloat, w_dims_nhwc, schema::Format_NHWC,
lite::TensorCategory(schema::NodeType_Parameter));
auto *weight_t =
new lite::Tensor(kNumberTypeFloat, w_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
float w_nchw[] = {-0.108016446, -0.44254777, 0.29249913, 0.18764605, 1.1250675, 0.29441583,
-0.34362152, 0.7557833, 0.16503833, 0.2418737, -0.26612744, 0.5072577,
@@ -585,8 +587,7 @@ int DeConvTestInit3(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
inputs_->push_back(weight_t);

std::vector<int> out_dims_nhwc = {1, 9, 9, 2};
auto *out_t = new lite::Tensor(kNumberTypeFloat, out_dims_nhwc, schema::Format_NC4HW4,
lite::TensorCategory(schema::NodeType_Parameter));
auto *out_t = new lite::Tensor(kNumberTypeFloat, out_dims_nhwc, schema::Format_NC4HW4, lite::Tensor::Category::VAR);
out_t->MallocData();
outputs_->push_back(out_t);

@@ -644,7 +645,7 @@ int DeConvTestInit4(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
ConvParameter *conv_param, float **correct) {
size_t buffer_size;
std::vector<int> in_nhwc_dims = {1, 300, 300, 30};
auto *in_t = new lite::Tensor(kNumberTypeFloat, in_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto *in_t = new lite::Tensor(kNumberTypeFloat, in_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::VAR);
in_t->MallocData();
std::string in_nhwc_path = "./deconv/deconv_fp32_nhwc_input1.bin";
auto in_nhwc = reinterpret_cast<float *>(mindspore::lite::ReadFile(in_nhwc_path.c_str(), &buffer_size));
@@ -652,7 +653,8 @@ int DeConvTestInit4(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
inputs_->push_back(in_t);

std::vector<int> w_nhwc_dims = {30, 3, 3, 40};
auto *weight_t = new lite::Tensor(kNumberTypeFloat, w_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto *weight_t =
new lite::Tensor(kNumberTypeFloat, w_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
std::string weight_path = "./deconv/deconv_fp32_nchw_weight1.bin";
auto weight_nchw = reinterpret_cast<float *>(mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size));
@@ -660,7 +662,7 @@ int DeConvTestInit4(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
weight_t->Channel());
inputs_->push_back(weight_t);

auto *bias_t = new lite::Tensor(kNumberTypeFloat, {40}, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto *bias_t = new lite::Tensor(kNumberTypeFloat, {40}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
bias_t->MallocData();
std::string bias_path = "./deconv/deconv_fp32_nchw_bias1.bin";
auto bias = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size);
@@ -668,7 +670,7 @@ int DeConvTestInit4(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
inputs_->push_back(bias_t);

std::vector<int> out_nhwc_dims = {1, 302, 302, 40};
auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::VAR);
out_t->MallocData();
outputs_->push_back(out_t);



+ 3
- 2
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc View File

@@ -30,13 +30,14 @@ class TestEluFp32 : public mindspore::CommonTest {
};

void EluTestInit(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outputs_, EluParameter *elu_param) {
Tensor *in_t_first = new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *in_t_first =
new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t_first->MallocData();
float in_first[] = {-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 0};
memcpy(in_t_first->MutableData(), in_first, sizeof(float) * in_t_first->ElementsNum());
inputs_->push_back(in_t_first);

Tensor *outputs_t = new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *outputs_t = new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
outputs_t->MallocData();
outputs_->push_back(outputs_t);



+ 7
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc View File

@@ -31,25 +31,28 @@ class TestEmbeddingLookupFp32 : public mindspore::CommonTest {

void ElTestInit(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outputs_,
EmbeddingLookupParameter *embedding_lookup_param) {
Tensor *in_t_first = new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *in_t_first =
new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t_first->MallocData();
float in_first[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
memcpy(in_t_first->MutableData(), in_first, sizeof(float) * in_t_first->ElementsNum());
inputs_->push_back(in_t_first);

Tensor *in_t_second = new Tensor(kNumberTypeFloat32, {4, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *in_t_second =
new Tensor(kNumberTypeFloat32, {4, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t_second->MallocData();
float in_second[] = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8};
memcpy(in_t_second->MutableData(), in_second, sizeof(float) * in_t_second->ElementsNum());
inputs_->push_back(in_t_second);

Tensor *ids_t = new Tensor(kNumberTypeFloat32, {2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *ids_t = new Tensor(kNumberTypeFloat32, {2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
ids_t->MallocData();
int ids[] = {1, 9, 2, 4, 6, 7};
memcpy(ids_t->MutableData(), ids, sizeof(int) * ids_t->ElementsNum());
inputs_->push_back(ids_t);

Tensor *outputs_t = new Tensor(kNumberTypeInt32, {2, 3, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *outputs_t =
new Tensor(kNumberTypeInt32, {2, 3, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
outputs_t->MallocData();
outputs_->push_back(outputs_t);



+ 12
- 11
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc View File

@@ -33,14 +33,14 @@ class TestFcFp32 : public mindspore::CommonTest {

int FcTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
MatMulParameter *matmal_param, float **correct) {
Tensor *in_t = new Tensor(kNumberTypeFloat, {2, 2, 2, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *in_t = new Tensor(kNumberTypeFloat, {2, 2, 2, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
float in[] = {-3.2366564, -4.7733846, -7.8329225, 16.146885, 5.060793, -6.1471, -1.7680453, -6.5721383,
17.87506, -5.1192183, 10.742863, 1.4536934, 19.693445, 19.45783, 5.063163, 0.5234792};
memcpy(in_t->MutableData(), in, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);

Tensor *weight_t = new Tensor(kNumberTypeFloat, {3, 8}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *weight_t = new Tensor(kNumberTypeFloat, {3, 8}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
float weight[] = {-0.0024438887, 0.0006738146, -0.008169129, 0.0021510671, -0.012470592, -0.0053063435,
0.006050155, 0.008656233, 0.012911413, -0.0028635843, -0.00034080597, -0.0010622552,
@@ -49,13 +49,13 @@ int FcTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *
memcpy(weight_t->MutableData(), weight, sizeof(float) * weight_t->ElementsNum());
inputs_->push_back(weight_t);

Tensor *bias_t = new Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *bias_t = new Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
bias_t->MallocData();
float bias[] = {1.6103756, -0.9872417, 0.546849};
memcpy(bias_t->MutableData(), bias, sizeof(float) * bias_t->ElementsNum());
inputs_->push_back(bias_t);

Tensor *out_t = new Tensor(kNumberTypeFloat, {2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *out_t = new Tensor(kNumberTypeFloat, {2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);

@@ -91,28 +91,29 @@ int FcTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *
MatMulParameter *matmal_param, float **correct) {
size_t buffer_size;

Tensor *in_t = new Tensor(kNumberTypeFloat, {20, 4, 2, 10}, schema::Format_NCHW, lite::Tensor::Category::CONST);
Tensor *in_t =
new Tensor(kNumberTypeFloat, {20, 4, 2, 10}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
std::string in_path = "./matmul/FcFp32_input1.bin";
auto in_data = mindspore::lite::ReadFile(in_path.c_str(), &buffer_size);
memcpy(in_t->MutableData(), in_data, buffer_size);
inputs_->push_back(in_t);

Tensor *weight_t = new Tensor(kNumberTypeFloat, {30, 80}, schema::Format_NCHW, lite::Tensor::Category::CONST);
Tensor *weight_t = new Tensor(kNumberTypeFloat, {30, 80}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
std::string weight_path = "./matmul/FcFp32_weight1.bin";
auto w_data = mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size);
memcpy(weight_t->MutableData(), w_data, buffer_size);
inputs_->push_back(weight_t);

Tensor *bias_t = new Tensor(kNumberTypeFloat, {30}, schema::Format_NCHW, lite::Tensor::Category::CONST);
Tensor *bias_t = new Tensor(kNumberTypeFloat, {30}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
bias_t->MallocData();
std::string bias_path = "./matmul/FcFp32_bias1.bin";
auto bias_data = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size);
memcpy(bias_t->MutableData(), bias_data, buffer_size);
inputs_->push_back(bias_t);

Tensor *out_t = new Tensor(kNumberTypeFloat, {20, 30}, schema::Format_NCHW, lite::Tensor::Category::CONST);
Tensor *out_t = new Tensor(kNumberTypeFloat, {20, 30}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);

@@ -147,13 +148,13 @@ TEST_F(TestFcFp32, FcTest2) {

int FcTestInit3(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
MatMulParameter *matmal_param, float **correct) {
Tensor *in_t = new Tensor(kNumberTypeFloat, {1, 1, 1, 20}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *in_t = new Tensor(kNumberTypeFloat, {1, 1, 1, 20}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
float in[] = {1, 0, 3, 0, 4, 5, 2, 5, 2, 5, 1, 5, 0, 1, 2, 0, 2, 1, 0, 5};
memcpy(in_t->MutableData(), in, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);

Tensor *weight_t = new Tensor(kNumberTypeFloat, {16, 20}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *weight_t = new Tensor(kNumberTypeFloat, {16, 20}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
float weight[] = {0, 5, 5, 3, 0, 5, 3, 1, 0, 1, 3, 0, 5, 5, 2, 4, 0, 1, 1, 2, 3, 0, 5, 5, 4, 4, 1, 4, 1, 1, 5, 3,
3, 1, 0, 3, 1, 2, 4, 5, 3, 4, 4, 0, 3, 5, 0, 3, 4, 1, 0, 1, 3, 4, 0, 5, 2, 5, 0, 4, 2, 2, 2, 2,
@@ -168,7 +169,7 @@ int FcTestInit3(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *
memcpy(weight_t->MutableData(), weight, sizeof(float) * weight_t->ElementsNum());
inputs_->push_back(weight_t);

Tensor *out_t = new Tensor(kNumberTypeFloat, {1, 16}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *out_t = new Tensor(kNumberTypeFloat, {1, 16}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);



+ 16
- 16
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/instance_norm_fp32_tests.cc View File

@@ -39,9 +39,9 @@ TEST_F(TestInstanceNormFp32, INTest1) {
lite::Tensor input0_tensor(kNumberTypeFloat32, {1, 2, 2, 3});
lite::Tensor input1_tensor(kNumberTypeFloat32, {3});
lite::Tensor input2_tensor(kNumberTypeFloat32, {3});
input0_tensor.SetData(in_data.data());
input1_tensor.SetData(in_data1.data());
input2_tensor.SetData(in_data2.data());
input0_tensor.set_data(in_data.data());
input1_tensor.set_data(in_data1.data());
input2_tensor.set_data(in_data2.data());
std::vector<lite::Tensor *> inputs_tensor = {&input0_tensor, &input1_tensor, &input2_tensor};

std::vector<float> output(12);
@@ -49,7 +49,7 @@ TEST_F(TestInstanceNormFp32, INTest1) {
-3.5422924, -14.005781, -2.3525476, -6.7113695, -16.396551, -1.4275324};

lite::Tensor output0_tensor(kNumberTypeFloat32, {1, 2, 2, 3});
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
std::vector<lite::Tensor *> outputs_tensor = {&output0_tensor};

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_InstanceNorm};
@@ -71,10 +71,10 @@ TEST_F(TestInstanceNormFp32, INTest1) {
std::cout << std::endl;
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
input2_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
input2_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

TEST_F(TestInstanceNormFp32, INTest2) {
@@ -92,9 +92,9 @@ TEST_F(TestInstanceNormFp32, INTest2) {
lite::Tensor input0_tensor(kNumberTypeFloat32, {2, 2, 2, 3});
lite::Tensor input1_tensor(kNumberTypeFloat32, {6});
lite::Tensor input2_tensor(kNumberTypeFloat32, {6});
input0_tensor.SetData(in_data.data());
input1_tensor.SetData(in_data1.data());
input2_tensor.SetData(in_data2.data());
input0_tensor.set_data(in_data.data());
input1_tensor.set_data(in_data1.data());
input2_tensor.set_data(in_data2.data());
std::vector<lite::Tensor *> inputs_tensor = {&input0_tensor, &input1_tensor, &input2_tensor};

std::vector<float> output(24);
@@ -104,7 +104,7 @@ TEST_F(TestInstanceNormFp32, INTest2) {
-3.5422924, -14.005781, -2.3525476, -6.7113695, -16.396551, -1.4275324};

lite::Tensor output0_tensor(kNumberTypeFloat32, {2, 2, 2, 3});
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
std::vector<lite::Tensor *> outputs_tensor = {&output0_tensor};

kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_InstanceNorm};
@@ -126,9 +126,9 @@ TEST_F(TestInstanceNormFp32, INTest2) {
std::cout << std::endl;
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
input2_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
input2_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc View File

@@ -42,8 +42,8 @@ class TestL2NormFp32 : public mindspore::CommonTest {
};

void TestL2NormFp32::TearDown() {
in_tensor_.SetData(nullptr);
out_tensor_.SetData(nullptr);
in_tensor_.set_data(nullptr);
out_tensor_.set_data(nullptr);
}

void TestL2NormFp32::Init(const std::vector<int> &input_shape, const std::vector<int> &output_shape, float *input_data,
@@ -53,8 +53,8 @@ void TestL2NormFp32::Init(const std::vector<int> &input_shape, const std::vector
in_tensor_.set_shape(input_shape);
out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_shape(output_shape);
in_tensor_.SetData(input_data);
out_tensor_.SetData(output_data);
in_tensor_.set_data(input_data);
out_tensor_.set_data(output_data);

param_.axis_num_ = axis_num;
if (axis_num == 1) {


+ 20
- 20
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc View File

@@ -45,10 +45,10 @@ TEST_F(TestLshProjectionFp32, Dense1DInputs) {
int32_t input_data1[] = {12345, 54321, 67890, 9876, -12345678};
float input_data2[] = {1.0, 1.0, 1.0, 1.0, 1.0};
int32_t output_data[6] = {0};
in_tensor0.SetData(input_data0);
in_tensor1.SetData(input_data1);
in_tensor2.SetData(input_data2);
out_tensor.SetData(output_data);
in_tensor0.set_data(input_data0);
in_tensor1.set_data(input_data1);
in_tensor2.set_data(input_data2);
out_tensor.set_data(output_data);

std::vector<lite::Tensor *> inputs = {&in_tensor0, &in_tensor1, &in_tensor2};
std::vector<lite::Tensor *> outputs = {&out_tensor};
@@ -73,9 +73,9 @@ TEST_F(TestLshProjectionFp32, Dense1DInputs) {
PrintData("output data", output_data, 6);
CompareOutputData(output_data, except_result.data(), 6, 0.000001);

in_tensor0.SetData(nullptr);
in_tensor1.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor0.set_data(nullptr);
in_tensor1.set_data(nullptr);
out_tensor.set_data(nullptr);
}

TEST_F(TestLshProjectionFp32, Sparse1DInputs) {
@@ -86,9 +86,9 @@ TEST_F(TestLshProjectionFp32, Sparse1DInputs) {
float input_data0[] = {0.123, 0.456, -0.321, 1.234, 5.678, -4.321};
int32_t input_data1[] = {12345, 54321, 67890, 9876, -12345678};
int32_t output_data[3] = {0};
in_tensor0.SetData(input_data0);
in_tensor1.SetData(input_data1);
out_tensor.SetData(output_data);
in_tensor0.set_data(input_data0);
in_tensor1.set_data(input_data1);
out_tensor.set_data(output_data);

std::vector<lite::Tensor *> inputs = {&in_tensor0, &in_tensor1};
std::vector<lite::Tensor *> outputs = {&out_tensor};
@@ -113,9 +113,9 @@ TEST_F(TestLshProjectionFp32, Sparse1DInputs) {
PrintData("output data", output_data, 3);
CompareOutputData(output_data, except_result.data(), 3, 0.000001);

in_tensor0.SetData(nullptr);
in_tensor1.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor0.set_data(nullptr);
in_tensor1.set_data(nullptr);
out_tensor.set_data(nullptr);
}

TEST_F(TestLshProjectionFp32, Sparse3DInputs) {
@@ -129,10 +129,10 @@ TEST_F(TestLshProjectionFp32, Sparse3DInputs) {
9123, 7890, -987, -876, -765, -987, -543, -432, -321, -543};
float input_data2[] = {0.12, 0.34, 0.56, 0.67, 0.78};
int32_t output_data[3] = {0};
in_tensor0.SetData(input_data0);
in_tensor1.SetData(input_data1);
in_tensor2.SetData(input_data2);
out_tensor.SetData(output_data);
in_tensor0.set_data(input_data0);
in_tensor1.set_data(input_data1);
in_tensor2.set_data(input_data2);
out_tensor.set_data(output_data);

std::vector<lite::Tensor *> inputs = {&in_tensor0, &in_tensor1, &in_tensor2};
std::vector<lite::Tensor *> outputs = {&out_tensor};
@@ -157,8 +157,8 @@ TEST_F(TestLshProjectionFp32, Sparse3DInputs) {
PrintData("output data", output_data, 3);
CompareOutputData(output_data, except_result.data(), 3, 0.000001);

in_tensor0.SetData(nullptr);
in_tensor1.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor0.set_data(nullptr);
in_tensor1.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 10
- 7
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc View File

@@ -70,17 +70,18 @@ TEST_F(TestMatMulFp32, Row2Col8Test2) {

int MMTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr, float *b_ptr,
std::vector<int> a_shape, std::vector<int> b_shape, std::vector<int> c_shape) {
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);

auto weight_t = new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto weight_t =
new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
memcpy(weight_t->MutableData(), b_ptr, sizeof(float) * weight_t->ElementsNum());
inputs_->push_back(weight_t);

auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);

@@ -90,22 +91,24 @@ int MMTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *>
int MMTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr, float *b_ptr,
float *bias_ptr, std::vector<int> a_shape, std::vector<int> b_shape, std::vector<int> bias_shape,
std::vector<int> c_shape) {
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);

auto weight_t = new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto weight_t =
new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
memcpy(weight_t->MutableData(), b_ptr, sizeof(float) * weight_t->ElementsNum());
inputs_->push_back(weight_t);

auto bias_t = new lite::Tensor(kNumberTypeFloat, bias_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto bias_t =
new lite::Tensor(kNumberTypeFloat, bias_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
bias_t->MallocData();
memcpy(bias_t->MutableData(), bias_ptr, sizeof(float) * bias_t->ElementsNum());
inputs_->push_back(bias_t);

auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);



+ 10
- 10
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc View File

@@ -51,11 +51,11 @@ class TestNMSFp32 : public mindspore::CommonTest {
};

void TestNMSFp32::TearDown() {
box_tensor_.SetData(nullptr);
score_tensor_.SetData(nullptr);
max_output_box_per_class_tensor_.SetData(nullptr);
iou_threshold_tensor_.SetData(nullptr);
score_threshold_tensor_.SetData(nullptr);
box_tensor_.set_data(nullptr);
score_tensor_.set_data(nullptr);
max_output_box_per_class_tensor_.set_data(nullptr);
iou_threshold_tensor_.set_data(nullptr);
score_threshold_tensor_.set_data(nullptr);
out_tensor_.FreeData();
}

@@ -65,19 +65,19 @@ void TestNMSFp32::Init(const std::vector<int> &box_tensor_shape, float *box_data
box_tensor_.set_data_type(kNumberTypeFloat32);
box_tensor_.SetFormat(Format_NHWC);
box_tensor_.set_shape(box_tensor_shape);
box_tensor_.SetData(box_data);
box_tensor_.set_data(box_data);

score_tensor_.set_data_type(kNumberTypeFloat32);
score_tensor_.SetFormat(Format_NHWC);
score_tensor_.set_shape(score_tensor_shape);
score_tensor_.SetData(score_data);
score_tensor_.set_data(score_data);

max_output_ = max_output;
max_output_box_per_class_tensor_.SetData(&max_output_);
max_output_box_per_class_tensor_.set_data(&max_output_);
iou_threshold_ = iou_threshold;
iou_threshold_tensor_.SetData(&iou_threshold_);
iou_threshold_tensor_.set_data(&iou_threshold_);
score_threshold_ = score_threshold;
score_threshold_tensor_.SetData(&score_threshold_);
score_threshold_tensor_.set_data(&score_threshold_);

out_tensor_.set_data_type(kNumberTypeInt32);



+ 6
- 6
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/pad_fp32_test.cc View File

@@ -52,9 +52,9 @@ class TestPadFp32 : public mindspore::CommonTest {
};

void TestPadFp32::TearDown() {
paddings_tensor_.SetData(nullptr);
in_tensor_.SetData(nullptr);
out_tensor_.SetData(nullptr);
paddings_tensor_.set_data(nullptr);
in_tensor_.set_data(nullptr);
out_tensor_.set_data(nullptr);
}

void TestPadFp32::Prepare(const std::vector<int> &input_shape, const std::vector<int> &output_shape, float *input_data,
@@ -65,8 +65,8 @@ void TestPadFp32::Prepare(const std::vector<int> &input_shape, const std::vector
in_tensor_.set_shape(input_shape);
out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_shape(output_shape);
in_tensor_.SetData(input_data);
out_tensor_.SetData(output_data);
in_tensor_.set_data(input_data);
out_tensor_.set_data(output_data);

param_.pad_mode_ = static_cast<int>(mode);
if (mode == PaddingMode_CONSTANT) {
@@ -78,7 +78,7 @@ void TestPadFp32::Prepare(const std::vector<int> &input_shape, const std::vector
} else {
paddings_tensor_.set_data_type(kNumberTypeInt32);
paddings_tensor_.set_shape({4, 2});
paddings_tensor_.SetData(paddings);
paddings_tensor_.set_data(paddings);
inputs_.emplace_back(&paddings_tensor_);
}



+ 6
- 5
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc View File

@@ -27,17 +27,18 @@ class TestPowerFp32 : public mindspore::CommonTest {

int PowerTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr,
float *b_ptr, std::vector<int> a_shape, std::vector<int> b_shape, std::vector<int> c_shape) {
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);

auto weight_t = new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto weight_t =
new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
memcpy(weight_t->MutableData(), b_ptr, sizeof(float) * weight_t->ElementsNum());
inputs_->push_back(weight_t);

auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);

@@ -46,12 +47,12 @@ int PowerTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor

int PowerTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr,
std::vector<int> a_shape, std::vector<int> c_shape) {
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);

auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);



+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc View File

@@ -61,8 +61,8 @@ class TestReduceFp32 : public mindspore::CommonTest {

void TestReduceFp32::TearDown() {
delete ctx_;
in_tensor_.SetData(nullptr);
out_tensor_.SetData(nullptr);
in_tensor_.set_data(nullptr);
out_tensor_.set_data(nullptr);
}

void TestReduceFp32::Prepare(const std::vector<int> &in_shape, const std::vector<int> &out_shape, float *input_data,
@@ -70,11 +70,11 @@ void TestReduceFp32::Prepare(const std::vector<int> &in_shape, const std::vector
bool reduce_to_end, float coeff) {
in_tensor_.set_data_type(kNumberTypeFloat32);
in_tensor_.set_shape(in_shape);
in_tensor_.SetData(input_data);
in_tensor_.set_data(input_data);

out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_shape(out_shape);
out_tensor_.SetData(output_data);
out_tensor_.set_data(output_data);

param_.mode_ = static_cast<int>(mode);
param_.num_axes_ = num_axes;


+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc View File

@@ -46,8 +46,8 @@ class TestResizeBilinearFp32 : public mindspore::CommonTest {
};

void TestResizeBilinearFp32::TearDown() {
in_tensor_.SetData(nullptr);
out_tensor_.SetData(nullptr);
in_tensor_.set_data(nullptr);
out_tensor_.set_data(nullptr);
}

void TestResizeBilinearFp32::Prepare(const std::vector<int> &input_shape, const std::vector<int> &output_shape,
@@ -58,8 +58,8 @@ void TestResizeBilinearFp32::Prepare(const std::vector<int> &input_shape, const
in_tensor_.set_shape(input_shape);
out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_shape(output_shape);
in_tensor_.SetData(input_data);
out_tensor_.SetData(output_data);
in_tensor_.set_data(input_data);
out_tensor_.set_data(output_data);

ResizeParameter param_ = {
{}, static_cast<int>(schema::ResizeMethod_LINEAR), output_shape[1], output_shape[2], align_corners};


+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc View File

@@ -42,8 +42,8 @@ class TestResizeNearestNeighborFp32 : public mindspore::CommonTest {
};

void TestResizeNearestNeighborFp32::TearDown() {
in_tensor_.SetData(nullptr);
out_tensor_.SetData(nullptr);
in_tensor_.set_data(nullptr);
out_tensor_.set_data(nullptr);
}

void TestResizeNearestNeighborFp32::Prepare(const std::vector<int> &input_shape, const std::vector<int> &output_shape,
@@ -53,8 +53,8 @@ void TestResizeNearestNeighborFp32::Prepare(const std::vector<int> &input_shape,
in_tensor_.set_shape(input_shape);
out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_shape(output_shape);
in_tensor_.SetData(input_data);
out_tensor_.SetData(output_data);
in_tensor_.set_data(input_data);
out_tensor_.set_data(output_data);

ResizeParameter param_ = {
{}, static_cast<int>(schema::ResizeMethod_NEAREST), output_shape[1], output_shape[2], align_corners};


+ 18
- 18
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc View File

@@ -35,9 +35,9 @@ TEST_F(TestReverseSequenceFp32, BatchLessSeq) {
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47};
int input_data1[] = {2, 3, 4};
float output_data[2 * 3 * 4 * 2] = {0};
in_tensor0.SetData(input_data0);
in_tensor1.SetData(input_data1);
out_tensor.SetData(output_data);
in_tensor0.set_data(input_data0);
in_tensor1.set_data(input_data1);
out_tensor.set_data(output_data);
std::vector<lite::Tensor *> inputs = {&in_tensor0, &in_tensor1};
std::vector<lite::Tensor *> outputs = {&out_tensor};

@@ -65,9 +65,9 @@ TEST_F(TestReverseSequenceFp32, BatchLessSeq) {
EXPECT_EQ(output_data[i], expect[i]);
}

in_tensor0.SetData(nullptr);
in_tensor1.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor0.set_data(nullptr);
in_tensor1.set_data(nullptr);
out_tensor.set_data(nullptr);
}

TEST_F(TestReverseSequenceFp32, BatchGreaterSeq) {
@@ -79,9 +79,9 @@ TEST_F(TestReverseSequenceFp32, BatchGreaterSeq) {
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47};
int input_data1[] = {2, 3, 3, 2};
float output_data[2 * 3 * 4 * 2] = {0};
in_tensor0.SetData(input_data0);
in_tensor1.SetData(input_data1);
out_tensor.SetData(output_data);
in_tensor0.set_data(input_data0);
in_tensor1.set_data(input_data1);
out_tensor.set_data(output_data);
std::vector<lite::Tensor *> inputs = {&in_tensor0, &in_tensor1};
std::vector<lite::Tensor *> outputs = {&out_tensor};

@@ -109,9 +109,9 @@ TEST_F(TestReverseSequenceFp32, BatchGreaterSeq) {
EXPECT_EQ(output_data[i], expect[i]);
}

in_tensor0.SetData(nullptr);
in_tensor1.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor0.set_data(nullptr);
in_tensor1.set_data(nullptr);
out_tensor.set_data(nullptr);
}

TEST_F(TestReverseSequenceFp32, BatchSeqNotAdjacent) {
@@ -123,9 +123,9 @@ TEST_F(TestReverseSequenceFp32, BatchSeqNotAdjacent) {
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47};
int input_data1[] = {2, 4};
float output_data[2 * 3 * 4 * 2] = {0};
in_tensor0.SetData(input_data0);
in_tensor1.SetData(input_data1);
out_tensor.SetData(output_data);
in_tensor0.set_data(input_data0);
in_tensor1.set_data(input_data1);
out_tensor.set_data(output_data);
std::vector<lite::Tensor *> inputs = {&in_tensor0, &in_tensor1};
std::vector<lite::Tensor *> outputs = {&out_tensor};

@@ -153,8 +153,8 @@ TEST_F(TestReverseSequenceFp32, BatchSeqNotAdjacent) {
EXPECT_EQ(output_data[i], expect[i]);
}

in_tensor0.SetData(nullptr);
in_tensor1.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor0.set_data(nullptr);
in_tensor1.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 3
- 3
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc View File

@@ -27,17 +27,17 @@ class TestROIPoolingFp32 : public mindspore::CommonTest {

int ROIPoolingTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr,
float *b_ptr, std::vector<int> a_shape, std::vector<int> b_shape, std::vector<int> c_shape) {
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);

auto roi_t = new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto roi_t = new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
roi_t->MallocData();
memcpy(roi_t->MutableData(), b_ptr, sizeof(float) * roi_t->ElementsNum());
inputs_->push_back(roi_t);

auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);



+ 8
- 8
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc View File

@@ -55,10 +55,10 @@ class TestScaleFp32 : public mindspore::CommonTest {
};

void TestScaleFp32::TearDown() {
in_tensor_.SetData(nullptr);
scale_tensor_.SetData(nullptr);
offset_tensor_.SetData(nullptr);
out_tensor_.SetData(nullptr);
in_tensor_.set_data(nullptr);
scale_tensor_.set_data(nullptr);
offset_tensor_.set_data(nullptr);
out_tensor_.set_data(nullptr);
}

void TestScaleFp32::Prepare(const std::vector<int> &input_shape, const std::vector<int> &scale_shape,
@@ -77,10 +77,10 @@ void TestScaleFp32::Prepare(const std::vector<int> &input_shape, const std::vect
out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_shape(output_shape);

in_tensor_.SetData(input_data);
scale_tensor_.SetData(scale_data);
offset_tensor_.SetData(offset_data);
out_tensor_.SetData(output_data);
in_tensor_.set_data(input_data);
scale_tensor_.set_data(scale_data);
offset_tensor_.set_data(offset_data);
out_tensor_.set_data(output_data);

param_.activation_type_ = act_type;
param_.axis_ = axis;


+ 2
- 2
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc View File

@@ -33,14 +33,14 @@ class TestSkipGramFp32 : public mindspore::CommonTest {

void SkipGramTestInit(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outputs_,
SkipGramParameter *skip_gram_param) {
Tensor *in_t_first = new Tensor(kObjectTypeString, {}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *in_t_first = new Tensor(kObjectTypeString, {}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
char sentence[] = "The quick brown fox jumps over the lazy dog";
std::vector<StringPack> str;
str.push_back({43, sentence});
mindspore::lite::WriteStringsToTensor(in_t_first, str);
inputs_->push_back(in_t_first);

Tensor *output = new Tensor(kObjectTypeString, {}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *output = new Tensor(kObjectTypeString, {}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
outputs_->push_back(output);

skip_gram_param->ngram_size = 3;


+ 2
- 2
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc View File

@@ -51,7 +51,7 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) {
std::vector<float> input = {1, 2, 5, 6, 10, 20, 3, 8, 18, 10, 3, 4, 11, 55, 15, 25};
std::vector<int> in_shape = {1, 4, 4, 1};
lite::Tensor input_tensor;
input_tensor.SetData(input.data());
input_tensor.set_data(input.data());
input_tensor.set_shape(in_shape);
input_tensor.SetFormat(schema::Format_NHWC);
input_tensor.set_data_type(kNumberTypeFloat32);
@@ -63,7 +63,7 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) {
std::vector<float> output(16);
std::vector<int> out_shape = {1, 2, 2, 4};
lite::Tensor output_tensor;
output_tensor.SetData(output.data());
output_tensor.set_data(output.data());
output_tensor.set_shape(out_shape);
output_tensor.SetFormat(schema::Format_NHWC);
output_tensor.set_data_type(kNumberTypeFloat32);


+ 50
- 50
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc View File

@@ -42,22 +42,22 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test1) {

TypeId tid = kNumberTypeFloat32;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->set_data_type(tid);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->set_data_type(tid);

lite::Tensor *input_tensor3 = new lite::Tensor;
input_tensor3->SetData(input3.data());
input_tensor3->set_data(input3.data());
input_tensor3->set_shape(shape3);
input_tensor3->set_data_type(tid);

lite::Tensor *input_tensor4 = new lite::Tensor;
input_tensor4->SetData(input4.data());
input_tensor4->set_data(input4.data());
input_tensor4->set_shape(shape4);
input_tensor4->set_data_type(tid);

@@ -72,7 +72,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test1) {
std::vector<int> output_shape = {6, 10};

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->set_data_type(tid);
std::vector<lite::Tensor *> outputs_tensor(1);
@@ -101,11 +101,11 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test1) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
input_tensor3->SetData(nullptr);
input_tensor4->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
input_tensor3->set_data(nullptr);
input_tensor4->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete input_tensor3;
@@ -126,22 +126,22 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test2) {

TypeId tid = kNumberTypeFloat32;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->set_data_type(tid);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->set_data_type(tid);

lite::Tensor *input_tensor3 = new lite::Tensor;
input_tensor3->SetData(input3.data());
input_tensor3->set_data(input3.data());
input_tensor3->set_shape(shape3);
input_tensor3->set_data_type(tid);

lite::Tensor *input_tensor4 = new lite::Tensor;
input_tensor4->SetData(input4.data());
input_tensor4->set_data(input4.data());
input_tensor4->set_shape(shape4);
input_tensor4->set_data_type(tid);

@@ -156,7 +156,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test2) {
std::vector<int> output_shape = {6, 10};

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->set_data_type(tid);
std::vector<lite::Tensor *> outputs_tensor(1);
@@ -185,11 +185,11 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
input_tensor3->SetData(nullptr);
input_tensor4->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
input_tensor3->set_data(nullptr);
input_tensor4->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete input_tensor3;
@@ -210,22 +210,22 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test3) {

TypeId tid = kNumberTypeFloat32;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->set_data_type(tid);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->set_data_type(tid);

lite::Tensor *input_tensor3 = new lite::Tensor;
input_tensor3->SetData(input3.data());
input_tensor3->set_data(input3.data());
input_tensor3->set_shape(shape3);
input_tensor3->set_data_type(tid);

lite::Tensor *input_tensor4 = new lite::Tensor;
input_tensor4->SetData(input4.data());
input_tensor4->set_data(input4.data());
input_tensor4->set_shape(shape4);
input_tensor4->set_data_type(tid);

@@ -240,7 +240,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test3) {
std::vector<int> output_shape = {1, 10};

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->set_data_type(tid);
std::vector<lite::Tensor *> outputs_tensor(1);
@@ -267,11 +267,11 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test3) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
input_tensor3->SetData(nullptr);
input_tensor4->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
input_tensor3->set_data(nullptr);
input_tensor4->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete input_tensor3;
@@ -292,22 +292,22 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test4) {

TypeId tid = kNumberTypeFloat32;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->set_data_type(tid);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->set_data_type(tid);

lite::Tensor *input_tensor3 = new lite::Tensor;
input_tensor3->SetData(input3.data());
input_tensor3->set_data(input3.data());
input_tensor3->set_shape(shape3);
input_tensor3->set_data_type(tid);

lite::Tensor *input_tensor4 = new lite::Tensor;
input_tensor4->SetData(input4.data());
input_tensor4->set_data(input4.data());
input_tensor4->set_shape(shape4);
input_tensor4->set_data_type(tid);

@@ -322,7 +322,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test4) {
std::vector<int> output_shape = {1, 10};

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->set_data_type(tid);
std::vector<lite::Tensor *> outputs_tensor(1);
@@ -349,11 +349,11 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test4) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
input_tensor3->SetData(nullptr);
input_tensor4->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
input_tensor3->set_data(nullptr);
input_tensor4->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete input_tensor3;
@@ -374,22 +374,22 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test5) {

TypeId tid = kNumberTypeFloat32;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->set_data_type(tid);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->set_data_type(tid);

lite::Tensor *input_tensor3 = new lite::Tensor;
input_tensor3->SetData(input3.data());
input_tensor3->set_data(input3.data());
input_tensor3->set_shape(shape3);
input_tensor3->set_data_type(tid);

lite::Tensor *input_tensor4 = new lite::Tensor;
input_tensor4->SetData(input4.data());
input_tensor4->set_data(input4.data());
input_tensor4->set_shape(shape4);
input_tensor4->set_data_type(tid);

@@ -404,7 +404,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test5) {
std::vector<int> output_shape = {6, 10};

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->set_data_type(tid);
std::vector<lite::Tensor *> outputs_tensor(1);
@@ -433,11 +433,11 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test5) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
input_tensor3->SetData(nullptr);
input_tensor4->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
input_tensor3->set_data(nullptr);
input_tensor4->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete input_tensor3;


+ 31
- 31
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strided_slice_fp32_tests.cc View File

@@ -137,7 +137,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice3) {
std::vector<int> output_shape = {1, 1, 2};

lite::Tensor input_tensor;
input_tensor.SetData(input_data);
input_tensor.set_data(input_data);
input_tensor.set_shape(input_shape);
std::vector<lite::Tensor *> inputs_tensor(1);
inputs_tensor[0] = &input_tensor;
@@ -145,7 +145,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice3) {
std::vector<lite::Tensor *> outputs_tensor;
lite::Tensor output_tensor;
outputs_tensor.push_back(&output_tensor);
output_tensor.SetData(output_data);
output_tensor.set_data(output_data);
output_tensor.set_data_type(input_tensor.data_type());
output_tensor.set_shape(output_shape);

@@ -163,8 +163,8 @@ TEST_F(TestStridedSliceFp32, StridedSlice3) {
delete ctx;

CompareOutputData(output_data, correct, 2, 0.000001);
input_tensor.SetData(nullptr);
output_tensor.SetData(nullptr);
input_tensor.set_data(nullptr);
output_tensor.set_data(nullptr);
}

TEST_F(TestStridedSliceFp32, StridedSlice4) {
@@ -187,7 +187,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice4) {
std::vector<int> output_shape = {2, 2};

lite::Tensor input_tensor;
input_tensor.SetData(input_data);
input_tensor.set_data(input_data);
input_tensor.set_shape(input_shape);
std::vector<lite::Tensor *> inputs_tensor(1);
inputs_tensor[0] = &input_tensor;
@@ -195,7 +195,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice4) {
std::vector<lite::Tensor *> outputs_tensor;
lite::Tensor output_tensor;
outputs_tensor.push_back(&output_tensor);
output_tensor.SetData(output_data);
output_tensor.set_data(output_data);
output_tensor.set_data_type(input_tensor.data_type());
output_tensor.set_shape(output_shape);

@@ -213,8 +213,8 @@ TEST_F(TestStridedSliceFp32, StridedSlice4) {
delete ctx;

CompareOutputData(output_data, correct, 4, 0.000001);
input_tensor.SetData(nullptr);
output_tensor.SetData(nullptr);
input_tensor.set_data(nullptr);
output_tensor.set_data(nullptr);
}

TEST_F(TestStridedSliceFp32, StridedSlice5) {
@@ -244,7 +244,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice5) {
std::vector<int> output_shape = {3, 2, 2};

lite::Tensor input_tensor;
input_tensor.SetData(input_data);
input_tensor.set_data(input_data);
input_tensor.set_shape(input_shape);
std::vector<lite::Tensor *> inputs_tensor(1);
inputs_tensor[0] = &input_tensor;
@@ -252,7 +252,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice5) {
std::vector<lite::Tensor *> outputs_tensor;
lite::Tensor output_tensor;
outputs_tensor.push_back(&output_tensor);
output_tensor.SetData(output_data);
output_tensor.set_data(output_data);
output_tensor.set_data_type(input_tensor.data_type());
output_tensor.set_shape(output_shape);

@@ -270,8 +270,8 @@ TEST_F(TestStridedSliceFp32, StridedSlice5) {
delete ctx;

CompareOutputData(output_data, correct, 12, 0.000001);
input_tensor.SetData(nullptr);
output_tensor.SetData(nullptr);
input_tensor.set_data(nullptr);
output_tensor.set_data(nullptr);
}

TEST_F(TestStridedSliceFp32, StridedSlice6) {
@@ -301,7 +301,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice6) {
std::vector<int> output_shape = {2, 2, 2};

lite::Tensor input_tensor;
input_tensor.SetData(input_data);
input_tensor.set_data(input_data);
input_tensor.set_shape(input_shape);
std::vector<lite::Tensor *> inputs_tensor(1);
inputs_tensor[0] = &input_tensor;
@@ -309,7 +309,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice6) {
std::vector<lite::Tensor *> outputs_tensor;
lite::Tensor output_tensor;
outputs_tensor.push_back(&output_tensor);
output_tensor.SetData(output_data);
output_tensor.set_data(output_data);
output_tensor.set_data_type(input_tensor.data_type());
output_tensor.set_shape(output_shape);

@@ -327,8 +327,8 @@ TEST_F(TestStridedSliceFp32, StridedSlice6) {
delete ctx;

CompareOutputData(output_data, correct, 8, 0.000001);
input_tensor.SetData(nullptr);
output_tensor.SetData(nullptr);
input_tensor.set_data(nullptr);
output_tensor.set_data(nullptr);
}

TEST_F(TestStridedSliceFp32, StridedSlice7) {
@@ -350,7 +350,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice7) {
std::vector<int> output_shape = {1, 1};

lite::Tensor input_tensor;
input_tensor.SetData(input_data);
input_tensor.set_data(input_data);
input_tensor.set_shape(input_shape);
std::vector<lite::Tensor *> inputs_tensor(1);
inputs_tensor[0] = &input_tensor;
@@ -358,7 +358,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice7) {
std::vector<lite::Tensor *> outputs_tensor;
lite::Tensor output_tensor;
outputs_tensor.push_back(&output_tensor);
output_tensor.SetData(output_data);
output_tensor.set_data(output_data);
output_tensor.set_data_type(input_tensor.data_type());
output_tensor.set_shape(output_shape);

@@ -376,8 +376,8 @@ TEST_F(TestStridedSliceFp32, StridedSlice7) {
delete ctx;

CompareOutputData(output_data, correct, 1, 0.000001);
input_tensor.SetData(nullptr);
output_tensor.SetData(nullptr);
input_tensor.set_data(nullptr);
output_tensor.set_data(nullptr);
}

TEST_F(TestStridedSliceFp32, StridedSlice8) {
@@ -407,7 +407,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice8) {
std::vector<int> output_shape = {1, 1, 5};

lite::Tensor input_tensor;
input_tensor.SetData(input_data);
input_tensor.set_data(input_data);
input_tensor.set_shape(input_shape);
std::vector<lite::Tensor *> inputs_tensor(1);
inputs_tensor[0] = &input_tensor;
@@ -415,7 +415,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice8) {
std::vector<lite::Tensor *> outputs_tensor;
lite::Tensor output_tensor;
outputs_tensor.push_back(&output_tensor);
output_tensor.SetData(output_data);
output_tensor.set_data(output_data);
output_tensor.set_data_type(input_tensor.data_type());
output_tensor.set_shape(output_shape);

@@ -433,8 +433,8 @@ TEST_F(TestStridedSliceFp32, StridedSlice8) {
delete ctx;

CompareOutputData(output_data, correct, 5, 0.000001);
input_tensor.SetData(nullptr);
output_tensor.SetData(nullptr);
input_tensor.set_data(nullptr);
output_tensor.set_data(nullptr);
}

// 5d input, multi inputs
@@ -549,16 +549,16 @@ TEST_F(TestStridedSliceFp32, StridedSlice9) {
std::vector<int> output_shape = {1, 1, 10, 7, 7};

lite::Tensor input_tensor;
input_tensor.SetData(input_data);
input_tensor.set_data(input_data);
input_tensor.set_shape(in_shape);
lite::Tensor begins_tensor;
begins_tensor.SetData(begins.data());
begins_tensor.set_data(begins.data());
begins_tensor.set_shape({1});
lite::Tensor ends_tensor;
ends_tensor.SetData(ends.data());
ends_tensor.set_data(ends.data());
ends_tensor.set_shape({1});
lite::Tensor strides_tensor;
strides_tensor.SetData(strides.data());
strides_tensor.set_data(strides.data());
strides_tensor.set_shape({1});

std::vector<lite::Tensor *> inputs_tensor{&input_tensor, &begins_tensor, &ends_tensor, &strides_tensor};
@@ -566,7 +566,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice9) {
std::vector<lite::Tensor *> outputs_tensor;
lite::Tensor output_tensor;
outputs_tensor.push_back(&output_tensor);
output_tensor.SetData(output_data);
output_tensor.set_data(output_data);
output_tensor.set_data_type(input_tensor.data_type());
output_tensor.set_shape(output_shape);

@@ -583,8 +583,8 @@ TEST_F(TestStridedSliceFp32, StridedSlice9) {
delete ctx;

CompareOutputData(output_data, correct, 490, 0.000001);
input_tensor.SetData(nullptr);
output_tensor.SetData(nullptr);
input_tensor.set_data(nullptr);
output_tensor.set_data(nullptr);
}

} // namespace mindspore

+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc View File

@@ -31,8 +31,8 @@ TEST_F(TestTileFp32, Tile) {
lite::Tensor out_tensor(kNumberTypeFloat32, {4, 6});
float input_data[] = {1, 2, 3, 4};
float output_data[24] = {0};
in_tensor.SetData(input_data);
out_tensor.SetData(output_data);
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
std::vector<lite::Tensor *> inputs = {&in_tensor};
std::vector<lite::Tensor *> outputs = {&out_tensor};

@@ -65,7 +65,7 @@ TEST_F(TestTileFp32, Tile) {
EXPECT_EQ(output_data[i], expect[i]);
}

in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 6
- 6
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc View File

@@ -33,9 +33,9 @@ TEST_F(TestTopKFp32, TopK) {
float input_data[] = {1, 2, 3, 6, 5, 4, 9, 8, 7, 10, 12, 11};
float output_data0[8] = {0};
int32_t output_data1[8] = {0};
in_tensor.SetData(input_data);
out_tensor0.SetData(output_data0);
out_tensor1.SetData(output_data1);
in_tensor.set_data(input_data);
out_tensor0.set_data(output_data0);
out_tensor1.set_data(output_data1);
std::vector<lite::Tensor *> inputs = {&in_tensor};
std::vector<lite::Tensor *> outputs = {&out_tensor0, &out_tensor1};

@@ -60,8 +60,8 @@ TEST_F(TestTopKFp32, TopK) {
EXPECT_EQ(output_data1[i], expect1[i]);
}

in_tensor.SetData(nullptr);
out_tensor0.SetData(nullptr);
out_tensor1.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor0.set_data(nullptr);
out_tensor1.set_data(nullptr);
}
} // namespace mindspore

+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc View File

@@ -183,7 +183,7 @@ TEST_F(TestTransposeFp32, TransposeFp32_test5) {
}

lite::Tensor input_tensor;
input_tensor.SetData(input.data());
input_tensor.set_data(input.data());
input_tensor.set_shape(input_shape);
input_tensor.SetFormat(schema::Format_NHWC);
input_tensor.set_data_type(kNumberTypeFloat32);
@@ -191,7 +191,7 @@ TEST_F(TestTransposeFp32, TransposeFp32_test5) {
inputs_tensor.emplace_back(&input_tensor);

lite::Tensor output_tensor;
output_tensor.SetData(output.data());
output_tensor.set_data(output.data());
output_tensor.set_shape(output_shape);
output_tensor.SetFormat(schema::Format_NHWC);
output_tensor.set_data_type(kNumberTypeFloat32);
@@ -214,8 +214,8 @@ TEST_F(TestTransposeFp32, TransposeFp32_test5) {
}
std::cout << "\n";
CompareOutputData(output.data(), correct, 24, 0.000001);
input_tensor.SetData(nullptr);
output_tensor.SetData(nullptr);
input_tensor.set_data(nullptr);
output_tensor.set_data(nullptr);
}

} // namespace mindspore

+ 6
- 6
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc View File

@@ -33,9 +33,9 @@ TEST_F(TestUniqueFp32, Unique) {
float input_data[] = {1, 1, 2, 4, 4, 4, 7, 8, 8};
float output_data0[9] = {0};
int output_data1[9] = {0};
in_tensor.SetData(input_data);
out_tensor0.SetData(output_data0);
out_tensor1.SetData(output_data1);
in_tensor.set_data(input_data);
out_tensor0.set_data(output_data0);
out_tensor1.set_data(output_data1);
std::vector<lite::Tensor *> inputs = {&in_tensor};
std::vector<lite::Tensor *> outputs = {&out_tensor0, &out_tensor1};

@@ -64,8 +64,8 @@ TEST_F(TestUniqueFp32, Unique) {
EXPECT_EQ(output_data1[i], expect1[i]);
}

in_tensor.SetData(nullptr);
out_tensor0.SetData(nullptr);
out_tensor1.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor0.set_data(nullptr);
out_tensor1.set_data(nullptr);
}
} // namespace mindspore

+ 18
- 18
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc View File

@@ -37,11 +37,11 @@ TEST_F(TestUnstackFp32, Unstack) {
float output_data1[6] = {0};
float output_data2[6] = {0};
float output_data3[6] = {0};
in_tensor.SetData(input_data);
out_tensor0.SetData(output_data0);
out_tensor1.SetData(output_data1);
out_tensor2.SetData(output_data2);
out_tensor3.SetData(output_data3);
in_tensor.set_data(input_data);
out_tensor0.set_data(output_data0);
out_tensor1.set_data(output_data1);
out_tensor2.set_data(output_data2);
out_tensor3.set_data(output_data3);
std::vector<lite::Tensor *> inputs = {&in_tensor};
std::vector<lite::Tensor *> outputs = {&out_tensor0, &out_tensor1, &out_tensor2, &out_tensor3};

@@ -70,11 +70,11 @@ TEST_F(TestUnstackFp32, Unstack) {
EXPECT_EQ(output_data3[i], expect3[i]);
}

in_tensor.SetData(nullptr);
out_tensor0.SetData(nullptr);
out_tensor1.SetData(nullptr);
out_tensor2.SetData(nullptr);
out_tensor3.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor0.set_data(nullptr);
out_tensor1.set_data(nullptr);
out_tensor2.set_data(nullptr);
out_tensor3.set_data(nullptr);
}

TEST_F(TestUnstackFp32, Unstack2) {
@@ -86,10 +86,10 @@ TEST_F(TestUnstackFp32, Unstack2) {
float output_data0[8] = {0};
float output_data1[8] = {0};
float output_data2[8] = {0};
in_tensor.SetData(input_data);
out_tensor0.SetData(output_data0);
out_tensor1.SetData(output_data1);
out_tensor2.SetData(output_data2);
in_tensor.set_data(input_data);
out_tensor0.set_data(output_data0);
out_tensor1.set_data(output_data1);
out_tensor2.set_data(output_data2);
std::vector<lite::Tensor *> inputs = {&in_tensor};
std::vector<lite::Tensor *> outputs = {&out_tensor0, &out_tensor1, &out_tensor2};

@@ -116,9 +116,9 @@ TEST_F(TestUnstackFp32, Unstack2) {
EXPECT_EQ(output_data2[i], expect2[i]);
}

in_tensor.SetData(nullptr);
out_tensor0.SetData(nullptr);
out_tensor1.SetData(nullptr);
out_tensor2.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor0.set_data(nullptr);
out_tensor1.set_data(nullptr);
out_tensor2.set_data(nullptr);
}
} // namespace mindspore

+ 18
- 18
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc View File

@@ -82,23 +82,23 @@ std::vector<lite::Tensor *> GenerateTensorsForTest(const char *test, int test_id

auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(test, &input_size));
lite::Tensor *dy_tensor = new lite::Tensor(TypeId::kNumberTypeFloat32, large_dim);
dy_tensor->SetData(dy_data);
dy_tensor->set_data(dy_data);

auto x1_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dx1_file, &input_size));
lite::Tensor *x1_tensor = new lite::Tensor(TypeId::kNumberTypeFloat32, large_dim);
x1_tensor->SetData(x1_data);
x1_tensor->set_data(x1_data);

auto x2_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dx2_file, &input_size));
lite::Tensor *x2_tensor = new lite::Tensor(TypeId::kNumberTypeFloat32, small_dim);
x2_tensor->SetData(x2_data);
x2_tensor->set_data(x2_data);

auto dx1_data = new float[large_size];
lite::Tensor *dx1_tensor = new lite::Tensor(TypeId::kNumberTypeFloat32, large_dim);
dx1_tensor->SetData(dx1_data);
dx1_tensor->set_data(dx1_data);

auto dx2_data = new float[small_size];
lite::Tensor *dx2_tensor = new lite::Tensor(TypeId::kNumberTypeFloat32, small_dim);
dx2_tensor->SetData(dx2_data);
dx2_tensor->set_data(dx2_data);

std::vector<lite::Tensor *> ret_vector = {dy_tensor, x1_tensor, x2_tensor, dx1_tensor, dx2_tensor};
return ret_vector;
@@ -135,7 +135,7 @@ TEST_F(TestArithmeticGradFp32, TestAddGradFp32) {
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
delete kernel_obj;
@@ -173,7 +173,7 @@ TEST_F(TestArithmeticGradFp32, TestAddGrad2Fp32) {
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
// for (int i = 0; i < 5; i++) delete all_tensors[i]; //TODO tensor data is unique pointer
@@ -214,7 +214,7 @@ TEST_F(TestArithmeticGradFp32, TestAddGrad3Fp32) {

for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
// for (int i = 0; i < 5; i++) delete all_tensors[i];
@@ -255,7 +255,7 @@ TEST_F(TestArithmeticGradFp32, TestSubGradFp32) {

for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
// for (int i = 0; i < 5; i++) delete all_tensors[i];
@@ -296,7 +296,7 @@ TEST_F(TestArithmeticGradFp32, TestSubGrad2Fp32) {

for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
delete kernel_obj;
@@ -344,7 +344,7 @@ TEST_F(TestArithmeticGradFp32, TestMulGradFp32) {
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
delete kernel_obj;
@@ -383,7 +383,7 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad2Fp32) {
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
// for (int i = 0; i < 5; i++) delete all_tensors[i];
@@ -423,7 +423,7 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad3Fp32) {
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
// for (int i = 0; i < 5; i++) delete all_tensors[i];
@@ -463,7 +463,7 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad4Fp32) {
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
// for (int i = 0; i < 5; i++) delete all_tensors[i];
@@ -503,7 +503,7 @@ TEST_F(TestArithmeticGradFp32, TestDivGradFp32) {
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
// for (int i = 0; i < 5; i++) delete all_tensors[i];
@@ -544,7 +544,7 @@ TEST_F(TestArithmeticGradFp32, TestDivGrad2Fp32) {

for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
// for (int i = 0; i < 5; i++) delete all_tensors[i];
@@ -584,7 +584,7 @@ TEST_F(TestArithmeticGradFp32, TestDivGrad3Fp32) {
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, output_path));
for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
// for (int i = 0; i < 5; i++) delete all_tensors[i];
@@ -624,7 +624,7 @@ TEST_F(TestArithmeticGradFp32, Test3DDivGrad2Fp32) {
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, output_path));
for (auto tensor : all_tensors) {
delete[] reinterpret_cast<float *>(tensor->MutableData());
tensor->SetData(nullptr);
tensor->set_data(nullptr);
delete tensor;
}
delete kernel_obj;


+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc View File

@@ -36,13 +36,13 @@ TEST_F(TestBiasGradFp32, BiasGradFp32) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_dy({10, 28, 28, 7});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(input_data);
dy_tensor.set_data(input_data);

std::vector<lite::Tensor *> inputs = {&dy_tensor};
auto output_data = new float[7];
std::vector<int> dim_dw = {7};
lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw);
dw_tensor.SetData(output_data);
dw_tensor.set_data(output_data);
std::vector<lite::Tensor *> outputs = {&dw_tensor};

lite::InnerContext ctx;
@@ -66,8 +66,8 @@ TEST_F(TestBiasGradFp32, BiasGradFp32) {
delete[] input_data;
delete[] output_data;
// delete bias_param;
dy_tensor.SetData(nullptr);
dw_tensor.SetData(nullptr);
dy_tensor.set_data(nullptr);
dw_tensor.set_data(nullptr);
delete kernel_obj;
MS_LOG(INFO) << "BiasGradFp32 passed";
}


+ 3
- 3
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc View File

@@ -36,7 +36,7 @@ lite::Tensor *TestBNGradFp32::CreateInTensor(std::string file_name, std::vector<
size_t input_size = 0;
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_name.c_str(), &input_size));
auto tensor = new lite::Tensor(TypeId::kNumberTypeFloat32, dim);
tensor->SetData(input_data);
tensor->set_data(input_data);
EXPECT_EQ(input_size, tensor->Size());
return tensor;
}
@@ -108,7 +108,7 @@ TEST_F(TestBNGradFp32, BNGradFp32) {
EXPECT_EQ(res, 0);
for (auto v : inputs) {
delete[] reinterpret_cast<float *>(v->MutableData());
v->SetData(nullptr);
v->set_data(nullptr);
delete v;
}
mindspore::kernel::LiteKernel::FreeWorkspace();
@@ -197,7 +197,7 @@ TEST_F(TestBNGradFp32, BNTtrainFp32) {
res = mindspore::lite::CompareRelativeOutput(save_var, "./test_data/bngrad/running_var_3.bin");
EXPECT_EQ(res, 0);

x_tensor->SetData(nullptr);
x_tensor->set_data(nullptr);
delete x_tensor;
mindspore::kernel::LiteKernel::FreeWorkspace();
delete kernel_obj;


+ 54
- 54
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc View File

@@ -85,7 +85,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32FilterGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({1, 28, 28, 32});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

// runtime part
printf("Calculating runtime cost...\n");
@@ -98,12 +98,12 @@ TEST_F(TestConvolutionGradFp32, ConvFp32FilterGrad) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_x({1, 28, 28, 3});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(input_data);
x_tensor.set_data(input_data);

auto dw_data = new float[output_data_size];
std::vector<int> dim_dw({32, 3, 3, 3});
lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw);
dw_tensor.SetData(dw_data);
dw_tensor.set_data(dw_data);
std::vector<lite::Tensor *> inputs = {&dy_tensor, &x_tensor};
std::vector<lite::Tensor *> outputs = {&dw_tensor};

@@ -141,9 +141,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32FilterGrad) {
mindspore::kernel::LiteKernel::FreeWorkspace();
delete kernel;
// delete conv_param;
dw_tensor.SetData(nullptr);
x_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
dw_tensor.set_data(nullptr);
x_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
MS_LOG(INFO) << "TestConvolutionGradFp32 Filter Grad passed";
}

@@ -157,21 +157,21 @@ TEST_F(TestConvolutionGradFp32, ConvFp32InputGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({1, 28, 28, 32});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

size_t w_size;
std::string w_path = "./test_data/conv/convfp32_w_32_3_3_3.bin";
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
std::vector<int> dim_dw({32, 3, 3, 3});
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_dw);
w_tensor.SetData(w_data);
w_tensor.set_data(w_data);

size_t output_data_size =
conv_param->input_batch_ * conv_param->input_h_ * conv_param->input_w_ * conv_param->input_channel_;
auto dx_data = new float[output_data_size];
std::vector<int> dim_dx({1, 28, 28, 3});
lite::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx);
dx_tensor.SetData(dx_data);
dx_tensor.set_data(dx_data);

std::vector<lite::Tensor *> inputs = {&dy_tensor, &w_tensor};
std::vector<lite::Tensor *> outputs = {&dx_tensor};
@@ -210,9 +210,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32InputGrad) {
delete[] dx_data;
delete[] w_data;
delete[] dy_data;
w_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
dx_tensor.SetData(nullptr);
w_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
dx_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
delete kernel;
// delete conv_param;
@@ -230,7 +230,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupFilterGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({1, 28, 28, 18});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

// runtime part
printf("Calculating runtime cost...\n");
@@ -243,12 +243,12 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupFilterGrad) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_x({1, 28, 28, 3});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(input_data);
x_tensor.set_data(input_data);

auto dw_data = new float[output_data_size];
std::vector<int> dim_dw({18, 3, 3, 1});
lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw);
dw_tensor.SetData(dw_data);
dw_tensor.set_data(dw_data);
std::vector<lite::Tensor *> inputs = {&dy_tensor, &x_tensor};
std::vector<lite::Tensor *> outputs = {&dw_tensor};

@@ -282,9 +282,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupFilterGrad) {
delete[] input_data;
delete[] dy_data;
delete[] dw_data;
dw_tensor.SetData(nullptr);
x_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
dw_tensor.set_data(nullptr);
x_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
delete kernel;
// delete conv_param;
@@ -301,21 +301,21 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupInputGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({1, 28, 28, 18});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

size_t w_size;
std::string w_path = "./test_data/conv/convfp32_w_g3_18_3_3_3.bin";
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
std::vector<int> dim_dw({18, 3, 3, 1});
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_dw);
w_tensor.SetData(w_data);
w_tensor.set_data(w_data);

size_t output_data_size =
conv_param->input_batch_ * conv_param->input_h_ * conv_param->input_w_ * conv_param->input_channel_;
auto dx_data = new float[output_data_size];
std::vector<int> dim_dx({1, 28, 28, 3});
lite::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx);
dx_tensor.SetData(dx_data);
dx_tensor.set_data(dx_data);

std::vector<lite::Tensor *> inputs = {&dy_tensor, &w_tensor};
std::vector<lite::Tensor *> outputs = {&dx_tensor};
@@ -353,9 +353,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupInputGrad) {
delete[] dx_data;
delete[] w_data;
delete[] dy_data;
dx_tensor.SetData(nullptr);
w_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
dx_tensor.set_data(nullptr);
w_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);

delete kernel;
mindspore::kernel::LiteKernel::FreeWorkspace();
@@ -374,7 +374,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationFilterGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({1, 26, 26, 18});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

// runtime part
printf("Calculating runtime cost...\n");
@@ -387,12 +387,12 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationFilterGrad) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_x({1, 28, 28, 3});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(input_data);
x_tensor.set_data(input_data);

auto dw_data = new float[output_data_size];
std::vector<int> dim_dw({18, 3, 3, 1});
lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw);
dw_tensor.SetData(dw_data);
dw_tensor.set_data(dw_data);
std::vector<lite::Tensor *> inputs = {&dy_tensor, &x_tensor};
std::vector<lite::Tensor *> outputs = {&dw_tensor};

@@ -426,9 +426,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationFilterGrad) {
delete[] input_data;
delete[] dy_data;
delete[] dw_data;
dw_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
x_tensor.SetData(nullptr);
dw_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
x_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
delete kernel;
// delete conv_param;
@@ -445,21 +445,21 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationInputGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({1, 26, 26, 18});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

size_t w_size;
std::string w_path = "./test_data/conv/convfp32_w_g3_d2_18_3_3_3.bin";
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
std::vector<int> dim_w({18, 3, 3, 1});
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_w);
w_tensor.SetData(w_data);
w_tensor.set_data(w_data);

size_t output_data_size =
conv_param->input_batch_ * conv_param->input_h_ * conv_param->input_w_ * conv_param->input_channel_;
auto dx_data = new float[output_data_size];
std::vector<int> dim_dx({1, 28, 28, 3});
lite::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx);
dx_tensor.SetData(dx_data);
dx_tensor.set_data(dx_data);

std::vector<lite::Tensor *> inputs = {&dy_tensor, &w_tensor};
std::vector<lite::Tensor *> outputs = {&dx_tensor};
@@ -493,9 +493,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationInputGrad) {
delete[] dx_data;
delete[] w_data;
delete[] dy_data;
dx_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
w_tensor.SetData(nullptr);
dx_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
w_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
delete kernel;
// delete conv_param;
@@ -512,21 +512,21 @@ TEST_F(TestConvolutionGradFp32, ConvGroupDilation) {
auto x_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(x_path.c_str(), &x_size));
std::vector<int> dim_x({1, 28, 28, 3});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(x_data);
x_tensor.set_data(x_data);

size_t w_size;
std::string w_path = "./test_data/conv/convfp32_w_g3_d2_18_3_3_3.bin";
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
std::vector<int> dim_w({18, 3, 3, 1});
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_w);
w_tensor.SetData(w_data);
w_tensor.set_data(w_data);

size_t output_data_size =
conv_param->output_batch_ * conv_param->output_h_ * conv_param->output_w_ * conv_param->output_channel_;
auto y_data = new float[output_data_size];
std::vector<int> dim_y({1, 26, 26, 18});
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
y_tensor.SetData(y_data);
y_tensor.set_data(y_data);

std::vector<lite::Tensor *> inputs = {&x_tensor, &w_tensor};
std::vector<lite::Tensor *> outputs = {&y_tensor};
@@ -569,9 +569,9 @@ TEST_F(TestConvolutionGradFp32, ConvGroupDilation) {
delete[] y_data;
delete[] x_data;
delete[] w_data;
x_tensor.SetData(nullptr);
y_tensor.SetData(nullptr);
w_tensor.SetData(nullptr);
x_tensor.set_data(nullptr);
y_tensor.set_data(nullptr);
w_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
delete kernel;

@@ -614,7 +614,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32Dilation2Group2Stride2FilterGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({2, 15, 15, 12});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

// runtime part
printf("Calculating runtime cost...\n");
@@ -627,12 +627,12 @@ TEST_F(TestConvolutionGradFp32, ConvFp32Dilation2Group2Stride2FilterGrad) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_x({2, 32, 32, 4});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(input_data);
x_tensor.set_data(input_data);

auto dw_data = new float[output_data_size];
std::vector<int> dim_dw({12, 3, 3, 2});
lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw);
dw_tensor.SetData(dw_data);
dw_tensor.set_data(dw_data);
std::vector<lite::Tensor *> inputs = {&dy_tensor, &x_tensor};
std::vector<lite::Tensor *> outputs = {&dw_tensor};

@@ -670,9 +670,9 @@ TEST_F(TestConvolutionGradFp32, ConvFp32Dilation2Group2Stride2FilterGrad) {
delete[] dw_data;
delete kernel;
// delete conv_param;
dw_tensor.SetData(nullptr);
x_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
dw_tensor.set_data(nullptr);
x_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
MS_LOG(INFO) << "TestConvolutionGradFp32 Filter Grad passed";
}
@@ -713,21 +713,21 @@ TEST_F(TestConvolutionGradFp32, ConvGroup2Dilation2Stride2) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({2, 15, 15, 12});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

size_t w_size;
std::string w_path = "./test_data/conv/convfp32_w_d2_g2_s2_12_2_3_3.bin";
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
std::vector<int> dim_w({12, 3, 3, 2});
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_w);
w_tensor.SetData(w_data);
w_tensor.set_data(w_data);

size_t output_data_size =
conv_param->input_batch_ * conv_param->input_h_ * conv_param->input_w_ * conv_param->input_channel_;
auto dx_data = new float[output_data_size];
std::vector<int> dim_dx({2, 32, 32, 4});
lite::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx);
dx_tensor.SetData(dx_data);
dx_tensor.set_data(dx_data);

std::vector<lite::Tensor *> inputs = {&dy_tensor, &w_tensor};
std::vector<lite::Tensor *> outputs = {&dx_tensor};
@@ -766,9 +766,9 @@ TEST_F(TestConvolutionGradFp32, ConvGroup2Dilation2Stride2) {
delete[] dx_data;
delete[] w_data;
delete[] dy_data;
dx_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
w_tensor.SetData(nullptr);
dx_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
w_tensor.set_data(nullptr);
delete kernel;
mindspore::kernel::LiteKernel::FreeWorkspace();
MS_LOG(INFO) << "TestConvolutionGradFp32 Filter Grad passed";


+ 36
- 36
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc View File

@@ -67,7 +67,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32FilterGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({2, 63, 63, 9});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

// runtime part
printf("Calculating runtime cost...\n");
@@ -80,12 +80,12 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32FilterGrad) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_x({2, 32, 32, 3});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(input_data);
x_tensor.set_data(input_data);

auto dw_data = new float[output_data_size];
std::vector<int> dim_dw({3, 3, 3, 9});
lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw);
dw_tensor.SetData(dw_data);
dw_tensor.set_data(dw_data);
std::vector<lite::Tensor *> inputs = {&dy_tensor, &x_tensor};
std::vector<lite::Tensor *> outputs = {&dw_tensor};

@@ -123,9 +123,9 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32FilterGrad) {
delete[] dw_data;
delete kernel;
// delete conv_param;
dw_tensor.SetData(nullptr);
x_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
dw_tensor.set_data(nullptr);
x_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
MS_LOG(INFO) << "TestDeConvolutionGradFp32 Filter Grad passed";
}
@@ -166,7 +166,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2FilterGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({2, 65, 65, 9});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

// runtime part
printf("Calculating runtime cost...\n");
@@ -179,12 +179,12 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2FilterGrad) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_x({2, 32, 32, 3});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(input_data);
x_tensor.set_data(input_data);

auto dw_data = new float[output_data_size];
std::vector<int> dim_dw({9, 3, 3, 3});
lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw);
dw_tensor.SetData(dw_data);
dw_tensor.set_data(dw_data);
std::vector<lite::Tensor *> inputs = {&dy_tensor, &x_tensor};
std::vector<lite::Tensor *> outputs = {&dw_tensor};

@@ -222,9 +222,9 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2FilterGrad) {
delete[] dw_data;
delete kernel;
// delete conv_param;
dw_tensor.SetData(nullptr);
x_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
dw_tensor.set_data(nullptr);
x_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
MS_LOG(INFO) << "TestDeConvolutionGradFp32 Filter Grad passed";
}
@@ -265,7 +265,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3FilterGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({2, 65, 65, 9});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

// runtime part
printf("Calculating runtime cost...\n");
@@ -278,12 +278,12 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3FilterGrad) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_x({2, 32, 32, 3});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(input_data);
x_tensor.set_data(input_data);

auto dw_data = new float[output_data_size];
std::vector<int> dim_dw({3, 3, 3, 3});
lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw);
dw_tensor.SetData(dw_data);
dw_tensor.set_data(dw_data);
std::vector<lite::Tensor *> inputs = {&dy_tensor, &x_tensor};
std::vector<lite::Tensor *> outputs = {&dw_tensor};

@@ -321,9 +321,9 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3FilterGrad) {
delete[] dw_data;
delete kernel;
// delete conv_param;
dw_tensor.SetData(nullptr);
x_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
dw_tensor.set_data(nullptr);
x_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
MS_LOG(INFO) << "TestDeConvolutionGradFp32 Filter Grad passed";
}
@@ -364,7 +364,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3Stride1FilterGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({2, 34, 34, 9});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

// runtime part
printf("Calculating runtime cost...\n");
@@ -377,12 +377,12 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3Stride1FilterGrad) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_x({2, 32, 32, 3});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(input_data);
x_tensor.set_data(input_data);

auto dw_data = new float[output_data_size];
std::vector<int> dim_dw({3, 3, 3, 3});
lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw);
dw_tensor.SetData(dw_data);
dw_tensor.set_data(dw_data);
std::vector<lite::Tensor *> inputs = {&dy_tensor, &x_tensor};
std::vector<lite::Tensor *> outputs = {&dw_tensor};

@@ -420,9 +420,9 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3Stride1FilterGrad) {
delete[] dw_data;
delete kernel;
// delete conv_param;
dw_tensor.SetData(nullptr);
x_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
dw_tensor.set_data(nullptr);
x_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
MS_LOG(INFO) << "TestDeConvolutionGradFp32 Filter Grad passed";
}
@@ -463,7 +463,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group2Stride2FilterGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({2, 65, 65, 12});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

// runtime part
printf("Calculating runtime cost...\n");
@@ -476,12 +476,12 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group2Stride2FilterGrad) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_x({2, 32, 32, 4});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(input_data);
x_tensor.set_data(input_data);

auto dw_data = new float[output_data_size];
std::vector<int> dim_dw({6, 3, 3, 4});
lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw);
dw_tensor.SetData(dw_data);
dw_tensor.set_data(dw_data);
std::vector<lite::Tensor *> inputs = {&dy_tensor, &x_tensor};
std::vector<lite::Tensor *> outputs = {&dw_tensor};

@@ -519,9 +519,9 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group2Stride2FilterGrad) {
delete[] dw_data;
delete kernel;
// delete conv_param;
dw_tensor.SetData(nullptr);
x_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
dw_tensor.set_data(nullptr);
x_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
MS_LOG(INFO) << "TestDeConvolutionGradFp32 Filter Grad passed";
}
@@ -562,7 +562,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group12Stride2FilterGrad) {
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
std::vector<int> dim_dy({2, 65, 65, 12});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(dy_data);
dy_tensor.set_data(dy_data);

// runtime part
printf("Calculating runtime cost...\n");
@@ -575,12 +575,12 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group12Stride2FilterGrad) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_x({2, 32, 32, 12});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(input_data);
x_tensor.set_data(input_data);

auto dw_data = new float[output_data_size];
std::vector<int> dim_dw({1, 3, 3, 12});
lite::Tensor dw_tensor(TypeId::kNumberTypeFloat32, dim_dw);
dw_tensor.SetData(dw_data);
dw_tensor.set_data(dw_data);
std::vector<lite::Tensor *> inputs = {&dy_tensor, &x_tensor};
std::vector<lite::Tensor *> outputs = {&dw_tensor};

@@ -618,9 +618,9 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group12Stride2FilterGrad) {
delete[] dw_data;
delete kernel;
// delete conv_param;
dw_tensor.SetData(nullptr);
x_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
dw_tensor.set_data(nullptr);
x_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
MS_LOG(INFO) << "TestDeConvolutionGradFp32 Filter Grad passed";
}


+ 36
- 36
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc View File

@@ -124,20 +124,20 @@ TEST_F(TestPoolingGradFp32, AvgPoolingKernelGradFp32) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_dy({1, 28, 28, 3});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(input_data);
dy_tensor.set_data(input_data);

std::string input1_path = "./test_data/pooling/avgpoolgradfp32_1_x_1_28_28_3.bin";
auto input1_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input1_path.c_str(), &input_size));
std::vector<int> dim_x({1, 28, 28, 3});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(input1_data);
x_tensor.set_data(input1_data);

std::vector<lite::Tensor *> inputs = {&dy_tensor, &x_tensor};

auto output_data = new float[output_data_size];
std::vector<int> dim_dx({1, 28, 28, 3});
lite::Tensor dx_tensor(TypeId::kNumberTypeFloat32, dim_dx);
dx_tensor.SetData(output_data);
dx_tensor.set_data(output_data);
std::vector<lite::Tensor *> outputs = {&dx_tensor};

lite::InnerContext context;
@@ -162,9 +162,9 @@ TEST_F(TestPoolingGradFp32, AvgPoolingKernelGradFp32) {
delete[] input_data;
delete[] input1_data;
delete[] output_data;
dx_tensor.SetData(nullptr);
x_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
dx_tensor.set_data(nullptr);
x_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
// delete pooling_param;
delete kernel_obj;
MS_LOG(INFO) << "TestAvgPoolingGradFp32 passed";
@@ -188,13 +188,13 @@ TEST_F(TestPoolingGradFp32, AvgPoolingBatchGradFp32) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_dy({3, 28, 28, 3});
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
dy_tensor.SetData(input_data);
dy_tensor.set_data(input_data);

std::string input1_path = "./test_data/pooling/avgpoolgradfp32_1_x_3_28_28_3.bin";
auto input1_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input1_path.c_str(), &input_size));
std::vector<int> dim_x({3, 28, 28, 3});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(input1_data);
x_tensor.set_data(input1_data);

std::vector<lite::Tensor *> inputs = {&dy_tensor, &x_tensor};

@@ -226,8 +226,8 @@ TEST_F(TestPoolingGradFp32, AvgPoolingBatchGradFp32) {

delete[] input_data;
delete[] input1_data;
x_tensor.SetData(nullptr);
dy_tensor.SetData(nullptr);
x_tensor.set_data(nullptr);
dy_tensor.set_data(nullptr);
// delete pooling_param;
delete kernel_obj;
MS_LOG(INFO) << "TestAvgPoolingGradBatchFp32 passed";
@@ -253,13 +253,13 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride2Fp32) {
mindspore::lite::ReadFile("./test_data/pooling/avgpoolgradfp32_s2_x_3_28_28_3.bin", &input_size));
std::vector<int> dim_x({pool->output_batch_, pool->input_h_, pool->input_w_, pool->input_channel_});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(x_data);
x_tensor.set_data(x_data);

auto yt_data = reinterpret_cast<float *>(
mindspore::lite::ReadFile("./test_data/pooling/avgpoolgradfp32_s2_dy_3_28_28_3.bin", &input_size));
std::vector<int> dim_y({pool->output_batch_, pool->output_h_, pool->output_w_, pool->output_channel_});
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
yt_tensor.SetData(yt_data);
yt_tensor.set_data(yt_data);
lite::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x);
ASSERT_EQ(out_tensor.MallocData(), 0);
float *out_data = static_cast<float *>(out_tensor.MutableData());
@@ -289,8 +289,8 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride2Fp32) {
delete[] yt_data;
// delete[] out_data;
// delete conv_param;
x_tensor.SetData(nullptr);
yt_tensor.SetData(nullptr);
x_tensor.set_data(nullptr);
yt_tensor.set_data(nullptr);
delete kernel;
MS_LOG(INFO) << "AvgPoolGradStride2Fp32 Filter Grad passed";
}
@@ -315,13 +315,13 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride3Fp32) {
mindspore::lite::ReadFile("./test_data/pooling/avgpoolgradfp32_s3_x_3_28_28_3.bin", &input_size));
std::vector<int> dim_x({pool->output_batch_, pool->input_h_, pool->input_w_, pool->input_channel_});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(x_data);
x_tensor.set_data(x_data);

auto yt_data = reinterpret_cast<float *>(
mindspore::lite::ReadFile("./test_data/pooling/avgpoolgradfp32_s3_dy_3_28_28_3.bin", &input_size));
std::vector<int> dim_y({pool->output_batch_, pool->output_h_, pool->output_w_, pool->output_channel_});
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
yt_tensor.SetData(yt_data);
yt_tensor.set_data(yt_data);

lite::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x);
ASSERT_EQ(out_tensor.MallocData(), 0);
@@ -354,8 +354,8 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride3Fp32) {
delete[] yt_data;
// delete[] out_data;
// delete conv_param;
x_tensor.SetData(nullptr);
yt_tensor.SetData(nullptr);
x_tensor.set_data(nullptr);
yt_tensor.set_data(nullptr);
delete kernel;
MS_LOG(INFO) << "AvgPoolGradStride3Fp32 Filter Grad passed";
}
@@ -431,18 +431,18 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradBatchFp32) {
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_1_x_3_28_28_3.bin", &input_size));
std::vector<int> dim_x({3, 28, 28, 3});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(x_data);
x_tensor.set_data(x_data);

auto y_data = reinterpret_cast<float *>(
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_1_dx_3_28_28_3.bin", &input_size));
std::vector<int> dim_y({3, 28, 28, 3});
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
y_tensor.SetData(y_data);
y_tensor.set_data(y_data);

auto yt_data = reinterpret_cast<float *>(
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_1_dy_3_28_28_3.bin", &input_size));
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
yt_tensor.SetData(yt_data);
yt_tensor.set_data(yt_data);

lite::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x);
ASSERT_EQ(out_tensor.MallocData(), 0);
@@ -476,9 +476,9 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradBatchFp32) {
delete[] yt_data;
// delete[] out_data;
// delete conv_param;
x_tensor.SetData(nullptr);
y_tensor.SetData(nullptr);
yt_tensor.SetData(nullptr);
x_tensor.set_data(nullptr);
y_tensor.set_data(nullptr);
yt_tensor.set_data(nullptr);
delete kernel;
MS_LOG(INFO) << "MaxPoolGradBatchFp32 Filter Grad passed";
}
@@ -504,18 +504,18 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride2Fp32) {
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s2_x_3_28_28_3.bin", &input_size));
std::vector<int> dim_x({maxpool->output_batch_, maxpool->input_h_, maxpool->input_w_, maxpool->input_channel_});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(x_data);
x_tensor.set_data(x_data);

auto y_data = reinterpret_cast<float *>(
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s2_dx_3_28_28_3.bin", &input_size));
std::vector<int> dim_y({maxpool->output_batch_, maxpool->output_h_, maxpool->output_w_, maxpool->output_channel_});
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
y_tensor.SetData(y_data);
y_tensor.set_data(y_data);

auto yt_data = reinterpret_cast<float *>(
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s2_dy_3_28_28_3.bin", &input_size));
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
yt_tensor.SetData(yt_data);
yt_tensor.set_data(yt_data);

lite::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x);
ASSERT_EQ(out_tensor.MallocData(), 0);
@@ -550,9 +550,9 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride2Fp32) {
delete[] yt_data;
// delete[] out_data;
// delete conv_param;
x_tensor.SetData(nullptr);
y_tensor.SetData(nullptr);
yt_tensor.SetData(nullptr);
x_tensor.set_data(nullptr);
y_tensor.set_data(nullptr);
yt_tensor.set_data(nullptr);
delete kernel;
MS_LOG(INFO) << "MaxPoolGradStride2Fp32 Filter Grad passed";
}
@@ -578,18 +578,18 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride3Fp32) {
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s3_x_3_28_28_3.bin", &input_size));
std::vector<int> dim_x({maxpool->output_batch_, maxpool->input_h_, maxpool->input_w_, maxpool->input_channel_});
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
x_tensor.SetData(x_data);
x_tensor.set_data(x_data);

auto y_data = reinterpret_cast<float *>(
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s3_dx_3_28_28_3.bin", &input_size));
std::vector<int> dim_y({maxpool->output_batch_, maxpool->output_h_, maxpool->output_w_, maxpool->output_channel_});
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
y_tensor.SetData(y_data);
y_tensor.set_data(y_data);

auto yt_data = reinterpret_cast<float *>(
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s3_dy_3_28_28_3.bin", &input_size));
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
yt_tensor.SetData(yt_data);
yt_tensor.set_data(yt_data);

lite::Tensor out_tensor(TypeId::kNumberTypeFloat32, dim_x);
ASSERT_EQ(out_tensor.MallocData(), 0);
@@ -624,9 +624,9 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride3Fp32) {
delete[] yt_data;
// delete[] out_data;
// delete conv_param;
x_tensor.SetData(nullptr);
y_tensor.SetData(nullptr);
yt_tensor.SetData(nullptr);
x_tensor.set_data(nullptr);
y_tensor.set_data(nullptr);
yt_tensor.set_data(nullptr);
delete kernel;
MS_LOG(INFO) << "MaxPoolGradStride3Fp32 Filter Grad passed";
}


+ 8
- 8
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc View File

@@ -37,7 +37,7 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) {
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
std::vector<int> dim_y({6, 4});
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
y_tensor.SetData(input_data);
y_tensor.set_data(input_data);

std::string label_path = "./test_data/operators/sce_fp32_1_l_6.bin";
auto ll_labels = reinterpret_cast<int64_t *>(mindspore::lite::ReadFile(label_path.c_str(), &input_size));
@@ -47,17 +47,17 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) {

std::vector<int> dim_l({6, 4});
lite::Tensor l_tensor(TypeId::kNumberTypeInt32, dim_l);
l_tensor.SetData(labels);
l_tensor.set_data(labels);

std::vector<lite::Tensor *> inputs = {&y_tensor, &l_tensor};

auto loss = new float[1];
std::vector<int> dim_dw({1});
lite::Tensor loss_tensor(TypeId::kNumberTypeFloat32, dim_dw);
loss_tensor.SetData(loss);
loss_tensor.set_data(loss);
auto grad = new float[24];
lite::Tensor grad_tensor(TypeId::kNumberTypeFloat32, dim_y);
grad_tensor.SetData(grad);
grad_tensor.set_data(grad);
std::vector<lite::Tensor *> outputs = {&loss_tensor, &grad_tensor};

lite::InnerContext context;
@@ -94,10 +94,10 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) {
delete[] input_data;
delete[] loss;
delete[] grad;
l_tensor.SetData(nullptr);
y_tensor.SetData(nullptr);
loss_tensor.SetData(nullptr);
grad_tensor.SetData(nullptr);
l_tensor.set_data(nullptr);
y_tensor.set_data(nullptr);
loss_tensor.set_data(nullptr);
grad_tensor.set_data(nullptr);
mindspore::kernel::LiteKernel::FreeWorkspace();
delete kernel_obj;
MS_LOG(INFO) << "SoftmaxCrossEntropyFp32 passed";


+ 6
- 6
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc View File

@@ -35,9 +35,9 @@ TEST_F(TestQuantizedAdd, Add) {
int8_t input_data0[] = {-102, 25, -51, 89, -102, 25, -51, 89, -102, 25}; // -0.8 0.2 -0.4 0.7
int8_t input_data1[] = {38, 51, 64, -102, 38, 51, 64, -102, 38, 51}; // 0.3 0.4 0.5 -0.8
int8_t output_data[10] = {0};
in_tensor0.SetData(input_data0);
in_tensor1.SetData(input_data1);
out_tensor.SetData(output_data);
in_tensor0.set_data(input_data0);
in_tensor1.set_data(input_data1);
out_tensor.set_data(output_data);

const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_in1 = {0.00784314f, 0};
@@ -68,8 +68,8 @@ TEST_F(TestQuantizedAdd, Add) {
EXPECT_EQ(output_data[i], expect0[i]);
}

in_tensor0.SetData(nullptr);
in_tensor1.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor0.set_data(nullptr);
in_tensor1.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 64
- 64
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc View File

@@ -47,7 +47,7 @@ TEST_F(TestArithmeticSelfInt8, floor_quant0_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -55,7 +55,7 @@ TEST_F(TestArithmeticSelfInt8, floor_quant0_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -82,8 +82,8 @@ TEST_F(TestArithmeticSelfInt8, floor_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -107,7 +107,7 @@ TEST_F(TestArithmeticSelfInt8, floor_quant1_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -115,7 +115,7 @@ TEST_F(TestArithmeticSelfInt8, floor_quant1_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -142,8 +142,8 @@ TEST_F(TestArithmeticSelfInt8, floor_quant1_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -167,7 +167,7 @@ TEST_F(TestArithmeticSelfInt8, round_quant0_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -175,7 +175,7 @@ TEST_F(TestArithmeticSelfInt8, round_quant0_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -202,8 +202,8 @@ TEST_F(TestArithmeticSelfInt8, round_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -227,7 +227,7 @@ TEST_F(TestArithmeticSelfInt8, round_quant1_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -235,7 +235,7 @@ TEST_F(TestArithmeticSelfInt8, round_quant1_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -262,8 +262,8 @@ TEST_F(TestArithmeticSelfInt8, round_quant1_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -287,7 +287,7 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant0_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -295,7 +295,7 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant0_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -322,8 +322,8 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -347,7 +347,7 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant1_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -355,7 +355,7 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant1_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -382,8 +382,8 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant1_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -407,7 +407,7 @@ TEST_F(TestArithmeticSelfInt8, abs_quant0_thread0) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -415,7 +415,7 @@ TEST_F(TestArithmeticSelfInt8, abs_quant0_thread0) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -442,8 +442,8 @@ TEST_F(TestArithmeticSelfInt8, abs_quant0_thread0) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -467,7 +467,7 @@ TEST_F(TestArithmeticSelfInt8, abs_quant1_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -475,7 +475,7 @@ TEST_F(TestArithmeticSelfInt8, abs_quant1_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -502,8 +502,8 @@ TEST_F(TestArithmeticSelfInt8, abs_quant1_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -527,7 +527,7 @@ TEST_F(TestArithmeticSelfInt8, sin_quant0_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -535,7 +535,7 @@ TEST_F(TestArithmeticSelfInt8, sin_quant0_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -562,8 +562,8 @@ TEST_F(TestArithmeticSelfInt8, sin_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -587,7 +587,7 @@ TEST_F(TestArithmeticSelfInt8, cos_quant0_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -595,7 +595,7 @@ TEST_F(TestArithmeticSelfInt8, cos_quant0_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -622,8 +622,8 @@ TEST_F(TestArithmeticSelfInt8, cos_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -647,7 +647,7 @@ TEST_F(TestArithmeticSelfInt8, log_quant0_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -655,7 +655,7 @@ TEST_F(TestArithmeticSelfInt8, log_quant0_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -682,8 +682,8 @@ TEST_F(TestArithmeticSelfInt8, log_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -707,7 +707,7 @@ TEST_F(TestArithmeticSelfInt8, sqrt_quant0_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -715,7 +715,7 @@ TEST_F(TestArithmeticSelfInt8, sqrt_quant0_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -742,8 +742,8 @@ TEST_F(TestArithmeticSelfInt8, sqrt_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -767,7 +767,7 @@ TEST_F(TestArithmeticSelfInt8, rsqrt_quant0_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -775,7 +775,7 @@ TEST_F(TestArithmeticSelfInt8, rsqrt_quant0_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -802,8 +802,8 @@ TEST_F(TestArithmeticSelfInt8, rsqrt_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -827,7 +827,7 @@ TEST_F(TestArithmeticSelfInt8, square_quant0_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -835,7 +835,7 @@ TEST_F(TestArithmeticSelfInt8, square_quant0_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -862,8 +862,8 @@ TEST_F(TestArithmeticSelfInt8, square_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -887,7 +887,7 @@ TEST_F(TestArithmeticSelfInt8, square_quant1_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -895,7 +895,7 @@ TEST_F(TestArithmeticSelfInt8, square_quant1_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -922,8 +922,8 @@ TEST_F(TestArithmeticSelfInt8, square_quant1_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -947,7 +947,7 @@ TEST_F(TestArithmeticSelfInt8, logical_not_quant0_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -955,7 +955,7 @@ TEST_F(TestArithmeticSelfInt8, logical_not_quant0_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -982,8 +982,8 @@ TEST_F(TestArithmeticSelfInt8, logical_not_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;


+ 20
- 20
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc View File

@@ -73,11 +73,11 @@ TEST_F(TestBatchnormInt8, FusedTest) {
inputs_tensor.push_back(&input2_tensor);
inputs_tensor.push_back(&input3_tensor);
inputs_tensor.push_back(&input4_tensor);
input0_tensor.SetData(in_data.data());
input1_tensor.SetData(in_data1.data());
input2_tensor.SetData(in_data2.data());
input3_tensor.SetData(in_data3.data());
input4_tensor.SetData(in_data4.data());
input0_tensor.set_data(in_data.data());
input1_tensor.set_data(in_data1.data());
input2_tensor.set_data(in_data2.data());
input3_tensor.set_data(in_data3.data());
input4_tensor.set_data(in_data4.data());
input0_tensor.set_shape(shape);
input1_tensor.set_shape({2});
input2_tensor.set_shape({2});
@@ -94,7 +94,7 @@ TEST_F(TestBatchnormInt8, FusedTest) {
std::vector<int8_t> corr_out = {-22, -28, -20, -26, -17, -24, -28, -42, -30, -44, -33, -46};
lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.set_shape(shape);
output0_tensor.AddQuantParam(output_quant_arg);

@@ -118,12 +118,12 @@ TEST_F(TestBatchnormInt8, FusedTest) {
std::cout << std::endl;
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
input2_tensor.SetData(nullptr);
input3_tensor.SetData(nullptr);
input4_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
input2_tensor.set_data(nullptr);
input3_tensor.set_data(nullptr);
input4_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
MS_LOG(INFO) << "TestBathNormFp32 accuracy passed";
}

@@ -160,9 +160,9 @@ TEST_F(TestBatchnormInt8, BNTest) {
inputs_tensor.push_back(&input0_tensor);
inputs_tensor.push_back(&input1_tensor);
inputs_tensor.push_back(&input2_tensor);
input0_tensor.SetData(in_data.data());
input1_tensor.SetData(in_data1.data());
input2_tensor.SetData(in_data2.data());
input0_tensor.set_data(in_data.data());
input1_tensor.set_data(in_data1.data());
input2_tensor.set_data(in_data2.data());
input0_tensor.set_shape(shape);
input1_tensor.set_shape({2});
input2_tensor.set_shape({2});
@@ -175,7 +175,7 @@ TEST_F(TestBatchnormInt8, BNTest) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.set_shape(shape);
output0_tensor.AddQuantParam(output_quant_arg);

@@ -199,10 +199,10 @@ TEST_F(TestBatchnormInt8, BNTest) {
std::cout << std::endl;
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
input2_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
input2_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
MS_LOG(INFO) << "TestBathNormFp32 accuracy passed";
}



+ 6
- 6
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/bias_add_int8_tests.cc View File

@@ -36,9 +36,9 @@ TEST_F(TestBiasAddInt8, BiasAdd) {
int8_t input_data0[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
int8_t input_data1[] = {1, 1};
int8_t output_data[12] = {0};
in_tensor0.SetData(input_data0);
in_tensor1.SetData(input_data1);
out_tensor.SetData(output_data);
in_tensor0.set_data(input_data0);
in_tensor1.set_data(input_data1);
out_tensor.set_data(output_data);
std::vector<lite::Tensor *> inputs = {&in_tensor0, &in_tensor1};
std::vector<lite::Tensor *> outputs = {&out_tensor};

@@ -70,8 +70,8 @@ TEST_F(TestBiasAddInt8, BiasAdd) {
EXPECT_EQ(output_data[i], expect[i]);
}

in_tensor0.SetData(nullptr);
in_tensor1.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor0.set_data(nullptr);
in_tensor1.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 18
- 18
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc View File

@@ -51,13 +51,13 @@ TEST_F(TestConcatInt8, Concat1_axis0) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->AddQuantParam(input_quant_arg);
input_tensor2->set_data_type(tid_int8);
@@ -68,7 +68,7 @@ TEST_F(TestConcatInt8, Concat1_axis0) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -93,9 +93,9 @@ TEST_F(TestConcatInt8, Concat1_axis0) {
std::vector<int8_t> except_result = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
PrintData("output data", output, input1.size() + input2.size());
CompareOutputData(output, except_result.data(), input1.size() + input2.size(), 0.000001);
input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete output0_tensor;
@@ -123,13 +123,13 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->AddQuantParam(input_quant_arg);
input_tensor2->set_data_type(tid_int8);
@@ -140,7 +140,7 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -166,9 +166,9 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2) {
PrintData("output data", output, input1.size() + input2.size());
CompareOutputData(output, except_result.data(), input1.size() + input2.size(), 0.000001);

input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete output0_tensor;
@@ -196,13 +196,13 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2_quant1) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->AddQuantParam(input_quant_arg);
input_tensor2->set_data_type(tid_int8);
@@ -213,7 +213,7 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2_quant1) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -239,9 +239,9 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2_quant1) {
PrintData("output data", output, input1.size() + input2.size());
CompareOutputData(output, except_result.data(), input1.size() + input2.size(), 0.000001);

input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete output0_tensor;


+ 13
- 10
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc View File

@@ -71,7 +71,7 @@ TEST_F(TestConv1x1Int8, Input1x1PrePack2) {

int Conv1x1Int8TestInit1_perchannel(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, int8_t **correct) {
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_quant_arg = new mindspore::lite::QuantArg();
in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647;
in_t->AddQuantParam(*in_quant_arg);
@@ -81,7 +81,8 @@ int Conv1x1Int8TestInit1_perchannel(std::vector<lite::Tensor *> *inputs_, std::v
memcpy(in_t->MutableData(), in, in_t->ElementsNum() * sizeof(int8_t));
inputs_->push_back(in_t);

Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *weight_t =
new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
auto weight_quant_arg1 = new mindspore::lite::QuantArg();
weight_quant_arg1->zeroPoint = 66, weight_quant_arg1->scale = 0.96439215686275;
@@ -96,7 +97,7 @@ int Conv1x1Int8TestInit1_perchannel(std::vector<lite::Tensor *> *inputs_, std::v
memcpy(weight_t->MutableData(), weight, weight_t->ElementsNum() * sizeof(int8_t));
inputs_->push_back(weight_t);

Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
auto output_quant_arg = new mindspore::lite::QuantArg();
output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.294321233;
@@ -139,7 +140,7 @@ TEST_F(TestConv1x1Int8, Conv1x1TestPerChannel) {

int Conv1x1Int8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, int8_t **correct) {
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_quant_arg = new mindspore::lite::QuantArg();
in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647;
in_t->AddQuantParam(*in_quant_arg);
@@ -151,7 +152,8 @@ int Conv1x1Int8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
reinterpret_cast<int8_t *>(in_t->MutableData()));
inputs_->push_back(in_t);

Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *weight_t =
new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto weight_quant_arg = new mindspore::lite::QuantArg();
weight_quant_arg->zeroPoint = 66, weight_quant_arg->scale = 0.036439215686275;
weight_t->AddQuantParam(*weight_quant_arg);
@@ -162,7 +164,7 @@ int Conv1x1Int8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
reinterpret_cast<int8_t *>(weight_t->MutableData()));
inputs_->push_back(weight_t);

Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
auto output_quant_arg = new mindspore::lite::QuantArg();
output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.234321233;
@@ -208,7 +210,7 @@ TEST_F(TestConv1x1Int8, Conv1x1Int8Test1) {
int Conv1x1Int8TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, int8_t **correct) {
size_t buffer_size;
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_quant_arg = new mindspore::lite::QuantArg();
in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647;
in_t->AddQuantParam(*in_quant_arg);
@@ -219,7 +221,8 @@ int Conv1x1Int8TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
inputs_->push_back(in_t);
delete[] input;

Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *weight_t =
new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto weight_quant_arg = new mindspore::lite::QuantArg();
weight_quant_arg->zeroPoint = 66, weight_quant_arg->scale = 0.036439215686275;
weight_t->AddQuantParam(*weight_quant_arg);
@@ -230,7 +233,7 @@ int Conv1x1Int8TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
inputs_->push_back(weight_t);
delete[] weight;

Tensor *bias_t = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *bias_t = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
std::string bias_path = "./bias";
auto bias = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size);
@@ -238,7 +241,7 @@ int Conv1x1Int8TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
inputs_->push_back(bias_t);
delete[] bias;

Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST);
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
auto output_quant_arg = new mindspore::lite::QuantArg();
output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.234321233;


+ 40
- 40
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc View File

@@ -48,7 +48,7 @@ TEST_F(TestCropInt8, crop_1d_axis0_offset0_quant0_thread2) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -58,7 +58,7 @@ TEST_F(TestCropInt8, crop_1d_axis0_offset0_quant0_thread2) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -87,8 +87,8 @@ TEST_F(TestCropInt8, crop_1d_axis0_offset0_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -112,7 +112,7 @@ TEST_F(TestCropInt8, crop_2d_axis1_offset0_quant0_thread2) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -122,7 +122,7 @@ TEST_F(TestCropInt8, crop_2d_axis1_offset0_quant0_thread2) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -151,8 +151,8 @@ TEST_F(TestCropInt8, crop_2d_axis1_offset0_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -176,7 +176,7 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread0) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -186,7 +186,7 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread0) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -215,8 +215,8 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread0) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -241,7 +241,7 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread2) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -251,7 +251,7 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread2) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -280,8 +280,8 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -305,7 +305,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread0) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -315,7 +315,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread0) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -344,8 +344,8 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread0) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -369,7 +369,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset0_quant0_thread0) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -379,7 +379,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset0_quant0_thread0) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -408,8 +408,8 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset0_quant0_thread0) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -433,7 +433,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant0_thread0) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -443,7 +443,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant0_thread0) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -475,8 +475,8 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant0_thread0) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -500,7 +500,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant1_thread0) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -510,7 +510,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant1_thread0) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -542,8 +542,8 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant1_thread0) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -569,7 +569,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread2) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -579,7 +579,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread2) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -608,8 +608,8 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -635,7 +635,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread3) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -645,7 +645,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread3) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -674,8 +674,8 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread3) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;


+ 3
- 3
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc View File

@@ -310,7 +310,7 @@ int DeConvInt8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::
ConvParameter *conv_param, int8_t **correct) {
/* float data from deconv fp32 testcase : DeConvTestInit2 */
/* vq = (vi - zp) * s vi = vq / s + zp */
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 4, 2, 3}, Format_NHWC, lite::TensorCategory(NodeType_Parameter));
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 4, 2, 3}, Format_NHWC, lite::Tensor::Category::VAR);
in_t->MallocData();
int8_t in[] = {6, 43, 38, 24, -8, 12, 41, -24, -20, 41, -19, -6, -26, -6, 23, -31, 34, 45, 8, 45, -39, -27, -48, 12};
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
@@ -319,7 +319,7 @@ int DeConvInt8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::
in_t->AddQuantParam(*in_quant_arg);
inputs_->push_back(in_t);

Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 3, 3, 2}, Format_NHWC, lite::TensorCategory(NodeType_Parameter));
Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 3, 3, 2}, Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
int8_t weight[] = {66, 89, 98, 74, 95, 86, 125, 95, 105, 83, 116, 94, 90, 80, 86, 59, 72, 92,
64, 76, 92, 80, 90, 87, 106, 55, 105, 60, 75, 53, 81, 81, 98, 81, 86, 59,
@@ -330,7 +330,7 @@ int DeConvInt8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::
weight_t->AddQuantParam(*w_quant_arg);
inputs_->push_back(weight_t);

Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 7, 3, 2}, Format_NHWC, lite::TensorCategory(NodeType_Parameter));
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 7, 3, 2}, Format_NHWC, lite::Tensor::Category::VAR);
out_t->MallocData();
QuantArg *out_quant_arg = new QuantArg();
out_quant_arg->zeroPoint = 31, out_quant_arg->scale = 0.3439215686275;


+ 6
- 6
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/div_int8_test.cc View File

@@ -36,9 +36,9 @@ TEST_F(TestDivInt8, DivInt8) {
int8_t input_data0[] = {105, 35, -27, 0, -63, 99, 16, 45, 67, -49};
int8_t input_data1[] = {126, -38, -115, 106, -98, 119, 103, 81, -114, 68};
int8_t output_data[10] = {0};
in_tensor0.SetData(input_data0);
in_tensor1.SetData(input_data1);
out_tensor.SetData(output_data);
in_tensor0.set_data(input_data0);
in_tensor1.set_data(input_data1);
out_tensor.set_data(output_data);

const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_in1 = {0.00784314f, 0};
@@ -69,8 +69,8 @@ TEST_F(TestDivInt8, DivInt8) {
EXPECT_EQ(output_data[i], expect0[i]);
}

in_tensor0.SetData(nullptr);
in_tensor1.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor0.set_data(nullptr);
in_tensor1.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 1
- 1
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc View File

@@ -42,7 +42,7 @@ extern void QuantProcess(float *input, int len, float min, float max, float *sca
extern lite::Tensor *MakeQuantTensor(int8_t *data, int len, std::vector<int> *shape, float scale, int zp);

lite::Tensor *MakeIntTensor(int *data, int len, std::vector<int> *shape) {
auto tensor = new lite::Tensor(kNumberTypeInt32, *shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto tensor = new lite::Tensor(kNumberTypeInt32, *shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
tensor->MallocData();
auto tensor_ptr = reinterpret_cast<int *>(tensor->MutableData());
memcpy(tensor_ptr, data, len * sizeof(int));


+ 6
- 6
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc View File

@@ -57,8 +57,8 @@ TEST_F(TestGatherNdInt8, GatherNdTest) {
inputs_tensor.push_back(&input0_tensor);
inputs_tensor.push_back(&input1_tensor);

input0_tensor.SetData(in_data.data());
input1_tensor.SetData(in_data1.data());
input0_tensor.set_data(in_data.data());
input1_tensor.set_data(in_data1.data());

input0_tensor.set_shape(shape);
input1_tensor.set_shape({3, 3});
@@ -71,7 +71,7 @@ TEST_F(TestGatherNdInt8, GatherNdTest) {
std::vector<int8_t> corr_out = {6, 7, 8, 9, 0, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5};
lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.set_shape(out_shape);
output0_tensor.AddQuantParam(output_quant_arg);

@@ -94,9 +94,9 @@ TEST_F(TestGatherNdInt8, GatherNdTest) {
std::cout << std::endl;
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
MS_LOG(INFO) << "TestGatherNd accuracy passed";
}
} // namespace mindspore

+ 6
- 6
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc View File

@@ -55,8 +55,8 @@ TEST_F(TestGatherInt8, GatherTest) {
inputs_tensor.push_back(&input0_tensor);
inputs_tensor.push_back(&input1_tensor);

input0_tensor.SetData(in_data.data());
input1_tensor.SetData(in_data1.data());
input0_tensor.set_data(in_data.data());
input1_tensor.set_data(in_data1.data());

input0_tensor.set_shape(shape);
input1_tensor.set_shape({2});
@@ -69,7 +69,7 @@ TEST_F(TestGatherInt8, GatherTest) {
std::vector<int8_t> corr_out = {-11, -41, -21, -51, -31, -61, 11, 41, 21, 51, 31, 61};
lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.set_shape(shape);
output0_tensor.AddQuantParam(output_quant_arg);

@@ -92,9 +92,9 @@ TEST_F(TestGatherInt8, GatherTest) {
std::cout << std::endl;
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);

input0_tensor.SetData(nullptr);
input1_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
input1_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
MS_LOG(INFO) << "TestGather_int8 accuracy passed";
}
} // namespace mindspore

+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc View File

@@ -36,8 +36,8 @@ TEST_F(TestHSwishInt8, HSwish) {

int8_t input_data[] = {-116, -105, -93, -35, 23, 35, 46, 104}; // -3.5f, -3.0f, -2.5f, 0.f, 2.5f, 3.0f, 3.5f, 6.0f
int8_t output_data[8] = {0};
in_tensor.SetData(input_data);
out_tensor.SetData(output_data);
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);

const lite::QuantArg quant_in = {0.0431373f, -35}; // -4.0 -- 7.0
const lite::QuantArg quant_out = {0.0392157f, -52}; // -3.0 -- 7.0
@@ -69,7 +69,7 @@ TEST_F(TestHSwishInt8, HSwish) {
EXPECT_EQ(output_data[i], expect[i]);
}

in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 1
- 1
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc View File

@@ -47,7 +47,7 @@ void QuantProcess(float *input, int len, float min, float max, float *scale, int
}

lite::Tensor *MakeQuantTensor(int8_t *data, int len, std::vector<int> *shape, float scale, int zp) {
auto tensor = new lite::Tensor(kNumberTypeInt8, *shape, schema::Format_NHWC, lite::Tensor::Category::CONST);
auto tensor = new lite::Tensor(kNumberTypeInt8, *shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
tensor->MallocData();
if (data) {
auto tensor_ptr = reinterpret_cast<int8_t *>(tensor->MutableData());


+ 30
- 30
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc View File

@@ -51,13 +51,13 @@ TEST_F(TestMulInt8, Mul_quant0) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->AddQuantParam(input_quant_arg);
input_tensor2->set_data_type(tid_int8);
@@ -68,7 +68,7 @@ TEST_F(TestMulInt8, Mul_quant0) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -92,9 +92,9 @@ TEST_F(TestMulInt8, Mul_quant0) {
std::vector<int8_t> except_result = {1, 4, 3, 8, 5, 12, 21, 32, 27, 40, 33, 48};
PrintData("output data", output, input1.size());
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);
input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete output0_tensor;
@@ -122,13 +122,13 @@ TEST_F(TestMulInt8, Mul_quant0_thread0) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->AddQuantParam(input_quant_arg);
input_tensor2->set_data_type(tid_int8);
@@ -139,7 +139,7 @@ TEST_F(TestMulInt8, Mul_quant0_thread0) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -163,9 +163,9 @@ TEST_F(TestMulInt8, Mul_quant0_thread0) {
std::vector<int8_t> except_result = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18};
PrintData("output data", output, input1.size());
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);
input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete output0_tensor;
@@ -193,13 +193,13 @@ TEST_F(TestMulInt8, Mul_quant1) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->AddQuantParam(input_quant_arg);
input_tensor2->set_data_type(tid_int8);
@@ -210,7 +210,7 @@ TEST_F(TestMulInt8, Mul_quant1) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -234,9 +234,9 @@ TEST_F(TestMulInt8, Mul_quant1) {
std::vector<int8_t> except_result = {1, 2, 2, 4, 3, 6, 11, 16, 14, 20, 17, 24};
PrintData("output data", output, input1.size());
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);
input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete output0_tensor;
@@ -264,13 +264,13 @@ TEST_F(TestMulInt8, Mul_quant1_thread1) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->AddQuantParam(input_quant_arg);
input_tensor2->set_data_type(tid_int8);
@@ -281,7 +281,7 @@ TEST_F(TestMulInt8, Mul_quant1_thread1) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -305,9 +305,9 @@ TEST_F(TestMulInt8, Mul_quant1_thread1) {
std::vector<int8_t> except_result = {1, 2, 2, 4, 3, 6, 11, 16, 14, 20, 17, 24};
PrintData("output data", output, input1.size());
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);
input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete output0_tensor;
@@ -335,13 +335,13 @@ TEST_F(TestMulInt8, test) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);

lite::Tensor *input_tensor2 = new lite::Tensor;
input_tensor2->SetData(input2.data());
input_tensor2->set_data(input2.data());
input_tensor2->set_shape(shape2);
input_tensor2->AddQuantParam(input_quant_arg);
input_tensor2->set_data_type(tid_int8);
@@ -352,7 +352,7 @@ TEST_F(TestMulInt8, test) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -376,9 +376,9 @@ TEST_F(TestMulInt8, test) {
std::vector<int8_t> except_result = {1, 4, 9, 16, 25, 36, 7, 16, 27, 40, 55, 72};
PrintData("output data", output, input1.size());
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);
input_tensor1->SetData(nullptr);
input_tensor2->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
input_tensor2->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete input_tensor2;
delete output0_tensor;


+ 6
- 8
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc View File

@@ -34,7 +34,7 @@ class TestPadInt8 : public mindspore::CommonTest {

int PadInt8TestInit1(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outputs_, PadParameter *pad_param,
int8_t **correct) {
Tensor *in_t = new Tensor(kNumberTypeInt8, {3}, schema::Format_NHWC, lite::TensorCategory(NodeType_Parameter));
Tensor *in_t = new Tensor(kNumberTypeInt8, {3}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
in_t->MallocData();
int8_t in[] = {1, 1, 1};
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
@@ -43,7 +43,7 @@ int PadInt8TestInit1(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outp
in_t->AddQuantParam(*in_quant_arg);
inputs_->push_back(in_t);

Tensor *out_t = new Tensor(kNumberTypeInt8, {7}, schema::Format_NHWC, lite::TensorCategory(NodeType_Parameter));
Tensor *out_t = new Tensor(kNumberTypeInt8, {7}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
out_t->MallocData();
QuantArg *out_quant_arg = new QuantArg();
out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156;
@@ -85,7 +85,7 @@ TEST_F(TestPadInt8, PadInt8Test1) {

int PadInt8TestInit2(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outputs_, PadParameter *pad_param,
int8_t **correct) {
Tensor *in_t = new Tensor(kNumberTypeInt8, {6, 2}, schema::Format_NHWC, lite::TensorCategory(NodeType_Parameter));
Tensor *in_t = new Tensor(kNumberTypeInt8, {6, 2}, schema::Format_NHWC, lite::Tensor::VAR);
in_t->MallocData();
int8_t in[] = {18, 71, 99, -6, 5, -119, 86, 13, 15, -85, -41, -77};
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
@@ -94,7 +94,7 @@ int PadInt8TestInit2(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outp
in_t->AddQuantParam(*in_quant_arg);
inputs_->push_back(in_t);

Tensor *out_t = new Tensor(kNumberTypeInt8, {10, 5}, schema::Format_NHWC, lite::TensorCategory(NodeType_Parameter));
Tensor *out_t = new Tensor(kNumberTypeInt8, {10, 5}, schema::Format_NHWC, lite::Tensor::VAR);
out_t->MallocData();
QuantArg *out_quant_arg = new QuantArg();
out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156;
@@ -138,8 +138,7 @@ TEST_F(TestPadInt8, PadInt8Test2) {

int PadInt8TestInit4(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outputs_, PadParameter *pad_param,
int8_t **correct) {
Tensor *in_t =
new Tensor(kNumberTypeInt8, {2, 3, 2, 1}, schema::Format_NHWC, lite::TensorCategory(NodeType_Parameter));
Tensor *in_t = new Tensor(kNumberTypeInt8, {2, 3, 2, 1}, schema::Format_NHWC, lite::Tensor::VAR);
in_t->MallocData();
int8_t in[] = {73, 24, 7, -31, -109, -2, 69, -64, 51, -45, 38, 53};
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
@@ -148,8 +147,7 @@ int PadInt8TestInit4(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outp
in_t->AddQuantParam(*in_quant_arg);
inputs_->push_back(in_t);

Tensor *out_t =
new Tensor(kNumberTypeInt8, {6, 6, 4, 3}, schema::Format_NHWC, lite::TensorCategory(NodeType_Parameter));
Tensor *out_t = new Tensor(kNumberTypeInt8, {6, 6, 4, 3}, schema::Format_NHWC, lite::Tensor::VAR);
out_t->MallocData();
QuantArg *out_quant_arg = new QuantArg();
out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156;


+ 9
- 9
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc View File

@@ -52,7 +52,7 @@ TEST_F(TestPowerInt8, PowerInt8) {
lite::Tensor input0_tensor;
TypeId tid_int8 = kNumberTypeInt8;
inputs_tensor.push_back(&input0_tensor);
input0_tensor.SetData(input.data());
input0_tensor.set_data(input.data());
input0_tensor.set_shape(in_shape);
input0_tensor.AddQuantParam(input_quant_arg);
input0_tensor.set_data_type(tid_int8);
@@ -62,7 +62,7 @@ TEST_F(TestPowerInt8, PowerInt8) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.AddQuantParam(output_quant_arg);
output0_tensor.set_data_type(tid_int8);

@@ -81,8 +81,8 @@ TEST_F(TestPowerInt8, PowerInt8) {
std::vector<int8_t> except_result = {-112, -65, 15, 127};
CompareOutputData(output.data(), except_result.data(), input.size(), 0.000001);

input0_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

TEST_F(TestPowerInt8, normal) {
@@ -116,12 +116,12 @@ TEST_F(TestPowerInt8, normal) {
TypeId tid_int8 = kNumberTypeInt8;
inputs_tensor.push_back(&input0_tensor);
inputs_tensor.push_back(&input1_tensor);
input0_tensor.SetData(input.data());
input0_tensor.set_data(input.data());
input0_tensor.set_shape(in_shape);
input0_tensor.AddQuantParam(input_quant_arg);
input0_tensor.set_data_type(tid_int8);

input1_tensor.SetData(input1.data());
input1_tensor.set_data(input1.data());
input1_tensor.set_shape(in_shape1);
input1_tensor.AddQuantParam(exp_quant_arg);
input1_tensor.set_data_type(tid_int8);
@@ -131,7 +131,7 @@ TEST_F(TestPowerInt8, normal) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.AddQuantParam(output_quant_arg);
output0_tensor.set_data_type(tid_int8);

@@ -150,7 +150,7 @@ TEST_F(TestPowerInt8, normal) {
std::vector<int8_t> except_result = {-99, 95, 124, -14};
CompareOutputData(output.data(), except_result.data(), input.size(), 0.000001);

input0_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc View File

@@ -48,7 +48,7 @@ TEST_F(TestPreluInt8, prelu_1) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -58,7 +58,7 @@ TEST_F(TestPreluInt8, prelu_1) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -87,8 +87,8 @@ TEST_F(TestPreluInt8, prelu_1) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete ctx;
}


+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc View File

@@ -40,7 +40,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) {
std::vector<int8_t> input = {10, 14, 29, 33, 52, 99, 19, 43, 90, 52, 19, 24, 57, 127, 76, 123};
std::vector<int> in_shape = {1, 4, 4, 1};
lite::Tensor input_tensor;
input_tensor.SetData(input.data());
input_tensor.set_data(input.data());
input_tensor.set_shape(in_shape);
input_tensor.set_data_type(kNumberTypeInt8);
input_tensor.SetFormat(schema::Format_NHWC);
@@ -55,7 +55,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) {
std::vector<float> output(16);
std::vector<int> out_shape = {1, 4, 4, 1};
lite::Tensor output_tensor;
output_tensor.SetData(output.data());
output_tensor.set_data(output.data());
output_tensor.set_shape(out_shape);
output_tensor.set_data_type(kNumberTypeFloat32);
// output_tensor.SetFormat(schema::Format_NHWC);
@@ -89,7 +89,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest2) {
std::vector<float> input = {1, 2, 5, 6, 10, -20, 3, 8, 18, 10, 3, 4, 11, 16, 15, 25};
std::vector<int> in_shape = {1, 4, 4, 1};
lite::Tensor input_tensor;
input_tensor.SetData(input.data());
input_tensor.set_data(input.data());
input_tensor.set_shape(in_shape);
// input_tensor.SetFormat(schema::Format_NHWC);
input_tensor.set_data_type(kNumberTypeFloat32);
@@ -102,7 +102,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest2) {
std::vector<int8_t> output(16);
std::vector<int> out_shape = {1, 4, 4, 1};
lite::Tensor output_tensor;
output_tensor.SetData(output.data());
output_tensor.set_data(output.data());
output_tensor.set_shape(out_shape);
output_tensor.SetFormat(schema::Format_NHWC);
output_tensor.set_data_type(kNumberTypeInt8);


+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc View File

@@ -57,20 +57,20 @@ class TestReduceInt8 : public mindspore::CommonTest {
};

void TestReduceInt8::TearDown() {
in_tensor_.SetData(nullptr);
out_tensor_.SetData(nullptr);
in_tensor_.set_data(nullptr);
out_tensor_.set_data(nullptr);
}

void TestReduceInt8::Prepare(const std::vector<int> &in_shape, const std::vector<int> &out_shape, int8_t *input_data,
int8_t *output_data, ReduceMode mode, const int *axes, const int num_axes) {
in_tensor_.set_data_type(kNumberTypeInt8);
in_tensor_.set_shape(in_shape);
in_tensor_.SetData(input_data);
in_tensor_.set_data(input_data);
in_tensor_.AddQuantParam(quant_in_);

out_tensor_.set_data_type(kNumberTypeInt8);
out_tensor_.set_shape(out_shape);
out_tensor_.SetData(output_data);
out_tensor_.set_data(output_data);
out_tensor_.AddQuantParam(quant_out_);

param_.mode_ = static_cast<int>(mode);


+ 8
- 8
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc View File

@@ -34,8 +34,8 @@ TEST_F(TestReluXInt8, Relu) {

int8_t input_data[] = {-102, 25, -51, 89}; // -0.8 0.2 -0.4 0.7
int8_t output_data[4] = {0};
in_tensor.SetData(input_data);
out_tensor.SetData(output_data);
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);

const lite::QuantArg quant_in = {0.00784314f, 0}; // -1.0--1.0 ->
const lite::QuantArg quant_out = {0.00784314f, 0};
@@ -67,8 +67,8 @@ TEST_F(TestReluXInt8, Relu) {
EXPECT_EQ(output_data[i], expect0[i]);
}

in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}

TEST_F(TestReluXInt8, Relu6) {
@@ -78,8 +78,8 @@ TEST_F(TestReluXInt8, Relu6) {
// -2.5f, -1.5f, 1.25f, 3.0f, 4.5f, 6.0f, 6.5f, 9.0f
int8_t input_data[] = {-118, -98, -44, -10, 19, 49, 59, 108};
int8_t output_data[8] = {0};
in_tensor.SetData(input_data);
out_tensor.SetData(output_data);
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);

const lite::QuantArg quant_in = {0.0509804f, -69}; // -3.0 -- 10.0
const lite::QuantArg quant_out = {0.0392157f, -128}; // 0.0 -- 10.0
@@ -112,7 +112,7 @@ TEST_F(TestReluXInt8, Relu6) {
EXPECT_EQ(output_data[i], expect[i]);
}

in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 8
- 8
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc View File

@@ -47,7 +47,7 @@ TEST_F(TestReshapeInt8, reshape_quant0) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -57,7 +57,7 @@ TEST_F(TestReshapeInt8, reshape_quant0) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -83,8 +83,8 @@ TEST_F(TestReshapeInt8, reshape_quant0) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;
@@ -107,7 +107,7 @@ TEST_F(TestReshapeInt8, reshape_quant1_thread2) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -117,7 +117,7 @@ TEST_F(TestReshapeInt8, reshape_quant1_thread2) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -143,8 +143,8 @@ TEST_F(TestReshapeInt8, reshape_quant1_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;


+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc View File

@@ -47,8 +47,8 @@ class TestResizeBilinearInt8 : public mindspore::CommonTest {
};

void TestResizeBilinearInt8::TearDown() {
in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}

void TestResizeBilinearInt8::Prepare(const std::vector<int> &in_shape, const std::vector<int> &out_shape,
@@ -57,12 +57,12 @@ void TestResizeBilinearInt8::Prepare(const std::vector<int> &in_shape, const std
const int thread_num) {
in_tensor.set_data_type(kNumberTypeInt8);
in_tensor.set_shape(in_shape);
in_tensor.SetData(input_data);
in_tensor.set_data(input_data);
in_tensor.AddQuantParam(quant_in);

out_tensor.set_data_type(kNumberTypeInt8);
out_tensor.set_shape(out_shape);
out_tensor.SetData(output_data);
out_tensor.set_data(output_data);
out_tensor.AddQuantParam(quant_out);

inputs.push_back(&in_tensor);


+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc View File

@@ -52,12 +52,12 @@ void TestResizeNearestNeighborInt8::Prepare(const std::vector<int> &in_shape, co
const QuantArg quant_out, const bool align_corners, const int thread_num) {
in_tensor.set_data_type(kNumberTypeInt8);
in_tensor.set_shape(in_shape);
in_tensor.SetData(input_data);
in_tensor.set_data(input_data);
in_tensor.AddQuantParam(quant_in);

out_tensor.set_data_type(kNumberTypeInt8);
out_tensor.set_shape(out_shape);
out_tensor.SetData(output_data);
out_tensor.set_data(output_data);
out_tensor.AddQuantParam(quant_out);

inputs.push_back(&in_tensor);
@@ -76,8 +76,8 @@ void TestResizeNearestNeighborInt8::Prepare(const std::vector<int> &in_shape, co
}

void TestResizeNearestNeighborInt8::TearDown() {
in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}

// 2*2*1 -> 4*4*1


+ 8
- 8
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/scale_int8.cc View File

@@ -55,10 +55,10 @@ class TestScaleInt8 : public mindspore::CommonTest {
};

void TestScaleInt8::TearDown() {
in_tensor_.SetData(nullptr);
scale_tensor_.SetData(nullptr);
bias_tensor_.SetData(nullptr);
out_tensor_.SetData(nullptr);
in_tensor_.set_data(nullptr);
scale_tensor_.set_data(nullptr);
bias_tensor_.set_data(nullptr);
out_tensor_.set_data(nullptr);
}

void TestScaleInt8::Prepare(const std::vector<int> &in_shape, int8_t *input_data, const std::vector<int> &scale_shape,
@@ -66,11 +66,11 @@ void TestScaleInt8::Prepare(const std::vector<int> &in_shape, int8_t *input_data
const std::vector<int> &out_shape, int8_t *output_data, int axis, bool has_bias) {
in_tensor_.set_data_type(kNumberTypeInt8);
in_tensor_.set_shape(in_shape);
in_tensor_.SetData(input_data);
in_tensor_.set_data(input_data);
in_tensor_.AddQuantParam(quant_in_);
scale_tensor_.set_data_type(kNumberTypeInt8);
scale_tensor_.set_shape(scale_shape);
scale_tensor_.SetData(scale_data);
scale_tensor_.set_data(scale_data);
scale_tensor_.AddQuantParam(quant_scale_);

inputs.clear();
@@ -79,14 +79,14 @@ void TestScaleInt8::Prepare(const std::vector<int> &in_shape, int8_t *input_data
if (has_bias) {
bias_tensor_.set_data_type(kNumberTypeInt8);
bias_tensor_.set_shape(bias_shape);
bias_tensor_.SetData(bias_data);
bias_tensor_.set_data(bias_data);
bias_tensor_.AddQuantParam(quant_bias_);
inputs.emplace_back(&bias_tensor_);
}

out_tensor_.set_data_type(kNumberTypeInt8);
out_tensor_.set_shape(out_shape);
out_tensor_.SetData(output_data);
out_tensor_.set_data(output_data);
out_tensor_.AddQuantParam(quant_out_);

param_.axis_ = axis;


+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc View File

@@ -33,8 +33,8 @@ TEST_F(TestSigmoidInt8, Sigmoid) {

int8_t input_data[] = {0, 0, 0, 0, 1, 1, 1, 1}; // -3.5f, -3.0f, -2.5f, 0.f, 2.5f, 3.0f, 3.5f, 6.0f
int8_t output_data[8] = {0};
in_tensor.SetData(input_data);
out_tensor.SetData(output_data);
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);

const lite::QuantArg quant_in = {1.0, 0}; // -4.0 -- 7.0
const lite::QuantArg quant_out = {1.0, 0}; // -3.0 -- 7.0
@@ -66,7 +66,7 @@ TEST_F(TestSigmoidInt8, Sigmoid) {
EXPECT_EQ(output_data[i], expect[i]);
}

in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc View File

@@ -34,8 +34,8 @@ TEST_F(TestSliceInt8, SliceInt8) {

int8_t input_data[] = {105, 35, -27, 0, -63, 99, 16, 45, 67, -49, -115, 106, -98, 119, 103, 81, -114, 68};
int8_t output_data[12];
in_tensor.SetData(input_data);
out_tensor.SetData(output_data);
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);

const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_out = {0.00784314f, 0};
@@ -71,7 +71,7 @@ TEST_F(TestSliceInt8, SliceInt8) {
EXPECT_EQ(output_data[i], expect0[i]);
}

in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc View File

@@ -56,7 +56,7 @@ TEST_F(TestSoftmaxInt8, SoftmaxInt8) {
lite::Tensor input0_tensor;
TypeId tid_int8 = kNumberTypeInt8;
inputs_tensor.push_back(&input0_tensor);
input0_tensor.SetData(input.data());
input0_tensor.set_data(input.data());
input0_tensor.set_shape(in_shape);
input0_tensor.AddQuantParam(input_quant_arg);
input0_tensor.set_data_type(tid_int8);
@@ -66,7 +66,7 @@ TEST_F(TestSoftmaxInt8, SoftmaxInt8) {

lite::Tensor output0_tensor;
outputs_tensor.push_back(&output0_tensor);
output0_tensor.SetData(output.data());
output0_tensor.set_data(output.data());
output0_tensor.AddQuantParam(output_quant_arg);
output0_tensor.set_data_type(tid_int8);

@@ -86,8 +86,8 @@ TEST_F(TestSoftmaxInt8, SoftmaxInt8) {
-127, -127, -127, -127, -59, -59, -61, -59, 58, 58, 59, 58};
CompareOutputData(output.data(), except_result.data(), input.size(), 0.000001);

input0_tensor.SetData(nullptr);
output0_tensor.SetData(nullptr);
input0_tensor.set_data(nullptr);
output0_tensor.set_data(nullptr);
}

} // namespace mindspore

+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/space_to_batch_int8_tests.cc View File

@@ -29,8 +29,8 @@ TEST_F(SpaceToBatchTestInt8, test1) {
lite::Tensor out_tensor(kNumberTypeInt8, {4, 2, 2, 1});
int8_t input_data[] = {1, 2, 3, 4};
int8_t output_data[16] = {0};
in_tensor.SetData(input_data);
out_tensor.SetData(output_data);
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
std::vector<lite::Tensor *> inputs = {&in_tensor};
std::vector<lite::Tensor *> outputs = {&out_tensor};

@@ -52,7 +52,7 @@ TEST_F(SpaceToBatchTestInt8, test1) {
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(output_data[i], expect[i]);
}
in_tensor.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 22
- 22
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc View File

@@ -52,7 +52,7 @@ TEST_F(TestSplitInt8, Split_quant0_thread2) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -60,12 +60,12 @@ TEST_F(TestSplitInt8, Split_quant0_thread2) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output1_tensor = new lite::Tensor;
output1_tensor->SetData(output1);
output1_tensor->set_data(output1);
output1_tensor->set_shape(output1_shape);
output1_tensor->AddQuantParam(output_quant_arg);
output1_tensor->set_data_type(tid_int8);
lite::Tensor *output2_tensor = new lite::Tensor;
output2_tensor->SetData(output2);
output2_tensor->set_data(output2);
output2_tensor->set_shape(output2_shape);
output2_tensor->AddQuantParam(output_quant_arg);
output2_tensor->set_data_type(tid_int8);
@@ -103,9 +103,9 @@ TEST_F(TestSplitInt8, Split_quant0_thread2) {
CompareOutputData(output1, except_result1.data(), output1_size, 0.000001);
CompareOutputData(output2, except_result2.data(), output2_size, 0.000001);

input_tensor1->SetData(nullptr);
output1_tensor->SetData(nullptr);
output2_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output1_tensor->set_data(nullptr);
output2_tensor->set_data(nullptr);
delete input_tensor1;
delete output1_tensor;
delete output2_tensor;
@@ -137,7 +137,7 @@ TEST_F(TestSplitInt8, Split_quant0_thread2_num) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -145,17 +145,17 @@ TEST_F(TestSplitInt8, Split_quant0_thread2_num) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output1_tensor = new lite::Tensor;
output1_tensor->SetData(output1);
output1_tensor->set_data(output1);
output1_tensor->set_shape(output1_shape);
output1_tensor->AddQuantParam(output_quant_arg);
output1_tensor->set_data_type(tid_int8);
lite::Tensor *output2_tensor = new lite::Tensor;
output2_tensor->SetData(output2);
output2_tensor->set_data(output2);
output2_tensor->set_shape(output2_shape);
output2_tensor->AddQuantParam(output_quant_arg);
output2_tensor->set_data_type(tid_int8);
lite::Tensor *output3_tensor = new lite::Tensor;
output3_tensor->SetData(output3);
output3_tensor->set_data(output3);
output3_tensor->set_shape(output3_shape);
output3_tensor->AddQuantParam(output_quant_arg);
output3_tensor->set_data_type(tid_int8);
@@ -198,10 +198,10 @@ TEST_F(TestSplitInt8, Split_quant0_thread2_num) {
CompareOutputData(output2, except_result2.data(), output2_size, 0.000001);
CompareOutputData(output3, except_result3.data(), output3_size, 0.000001);

input_tensor1->SetData(nullptr);
output1_tensor->SetData(nullptr);
output2_tensor->SetData(nullptr);
output3_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output1_tensor->set_data(nullptr);
output2_tensor->set_data(nullptr);
output3_tensor->set_data(nullptr);
delete input_tensor1;
delete output1_tensor;
delete output2_tensor;
@@ -234,7 +234,7 @@ TEST_F(TestSplitInt8, Split_quant1_thread2_num) {

TypeId tid_int8 = kNumberTypeInt8;
lite::Tensor *input_tensor1 = new lite::Tensor;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -242,17 +242,17 @@ TEST_F(TestSplitInt8, Split_quant1_thread2_num) {
inputs_tensor[0] = input_tensor1;

lite::Tensor *output1_tensor = new lite::Tensor;
output1_tensor->SetData(output1);
output1_tensor->set_data(output1);
output1_tensor->set_shape(output1_shape);
output1_tensor->AddQuantParam(output_quant_arg);
output1_tensor->set_data_type(tid_int8);
lite::Tensor *output2_tensor = new lite::Tensor;
output2_tensor->SetData(output2);
output2_tensor->set_data(output2);
output2_tensor->set_shape(output2_shape);
output2_tensor->AddQuantParam(output_quant_arg);
output2_tensor->set_data_type(tid_int8);
lite::Tensor *output3_tensor = new lite::Tensor;
output3_tensor->SetData(output3);
output3_tensor->set_data(output3);
output3_tensor->set_shape(output3_shape);
output3_tensor->AddQuantParam(output_quant_arg);
output3_tensor->set_data_type(tid_int8);
@@ -295,10 +295,10 @@ TEST_F(TestSplitInt8, Split_quant1_thread2_num) {
CompareOutputData(output2, except_result2.data(), output2_size, 0.000001);
CompareOutputData(output3, except_result3.data(), output3_size, 0.000001);

input_tensor1->SetData(nullptr);
output1_tensor->SetData(nullptr);
output2_tensor->SetData(nullptr);
output3_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output1_tensor->set_data(nullptr);
output2_tensor->set_data(nullptr);
output3_tensor->set_data(nullptr);
delete input_tensor1;
delete output1_tensor;
delete output2_tensor;


+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc View File

@@ -48,7 +48,7 @@ TEST_F(TestSqueezeInt8, Squeeze_1d_axis0_offset0_quant0_thread2) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -58,7 +58,7 @@ TEST_F(TestSqueezeInt8, Squeeze_1d_axis0_offset0_quant0_thread2) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -87,8 +87,8 @@ TEST_F(TestSqueezeInt8, Squeeze_1d_axis0_offset0_quant0_thread2) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;


+ 6
- 6
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc View File

@@ -36,9 +36,9 @@ TEST_F(TestSubInt8, SubInt8) {
int8_t input_data0[] = {105, 35, -27, 0, -63, 99, 16, 122, 67, -49};
int8_t input_data1[] = {24, -38, -115, 106, -98};
int8_t output_data[10] = {0};
in_tensor0.SetData(input_data0);
in_tensor1.SetData(input_data1);
out_tensor.SetData(output_data);
in_tensor0.set_data(input_data0);
in_tensor1.set_data(input_data1);
out_tensor.set_data(output_data);

const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_in1 = {0.00784314f, 0};
@@ -69,8 +69,8 @@ TEST_F(TestSubInt8, SubInt8) {
EXPECT_EQ(output_data[i], expect0[i]);
}

in_tensor0.SetData(nullptr);
in_tensor1.SetData(nullptr);
out_tensor.SetData(nullptr);
in_tensor0.set_data(nullptr);
in_tensor1.set_data(nullptr);
out_tensor.set_data(nullptr);
}
} // namespace mindspore

+ 6
- 6
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc View File

@@ -34,9 +34,9 @@ TEST_F(TestTopKInt8, TopK) {
int8_t input_data[] = {1, 2, 3, 6, 5, 4, 9, 8, 7, 10, 12, 11};
int8_t output_data0[8] = {0};
int32_t output_data1[8] = {0};
in_tensor.SetData(input_data);
out_tensor0.SetData(output_data0);
out_tensor1.SetData(output_data1);
in_tensor.set_data(input_data);
out_tensor0.set_data(output_data0);
out_tensor1.set_data(output_data1);
std::vector<lite::Tensor *> inputs = {&in_tensor};
std::vector<lite::Tensor *> outputs = {&out_tensor0, &out_tensor1};

@@ -59,8 +59,8 @@ TEST_F(TestTopKInt8, TopK) {
EXPECT_EQ(output_data1[i], expect1[i]);
}

in_tensor.SetData(nullptr);
out_tensor0.SetData(nullptr);
out_tensor1.SetData(nullptr);
in_tensor.set_data(nullptr);
out_tensor0.set_data(nullptr);
out_tensor1.set_data(nullptr);
}
} // namespace mindspore

+ 4
- 4
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc View File

@@ -47,7 +47,7 @@ TEST_F(TestUnsqueezeInt8, Unsqueeze_1) {

lite::Tensor *input_tensor1 = new lite::Tensor;
TypeId tid_int8 = kNumberTypeInt8;
input_tensor1->SetData(input1.data());
input_tensor1->set_data(input1.data());
input_tensor1->set_shape(shape1);
input_tensor1->AddQuantParam(input_quant_arg);
input_tensor1->set_data_type(tid_int8);
@@ -57,7 +57,7 @@ TEST_F(TestUnsqueezeInt8, Unsqueeze_1) {

std::vector<lite::Tensor *> outputs_tensor(1);
lite::Tensor *output0_tensor = new lite::Tensor;
output0_tensor->SetData(output);
output0_tensor->set_data(output);
output0_tensor->set_shape(output_shape);
output0_tensor->AddQuantParam(output_quant_arg);
output0_tensor->set_data_type(tid_int8);
@@ -86,8 +86,8 @@ TEST_F(TestUnsqueezeInt8, Unsqueeze_1) {
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
CompareOutputData(output, except_result.data(), output_size, 0.000001);

input_tensor1->SetData(nullptr);
output0_tensor->SetData(nullptr);
input_tensor1->set_data(nullptr);
output0_tensor->set_data(nullptr);
delete input_tensor1;
delete output0_tensor;
delete ctx;


+ 2
- 2
mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc View File

@@ -80,8 +80,8 @@ TEST_F(TestNormalize, TestSentence) {
printf("\n");
}

input_tensor_.SetData(nullptr);
output_tensor_.SetData(nullptr);
input_tensor_.set_data(nullptr);
output_tensor_.set_data(nullptr);
}

} // namespace mindspore

+ 7
- 7
mindspore/lite/test/ut/src/runtime/kernel/opencl/activation_tests.cc View File

@@ -92,7 +92,7 @@ TEST_F(TestActivationOpenCL, ReluFp_dim4) {
std::vector<int> input_shape = {1, 9};
schema::Format format = schema::Format_NC;
schema::Format op_format = schema::Format_NC4;
auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode);
auto tensor_type = lite::Tensor::CONST_TENSOR;
auto *input_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type);
if (input_tensor == nullptr) {
MS_LOG(ERROR) << "new input tensor error!";
@@ -199,7 +199,7 @@ TEST_F(TestActivationOpenCL, Relu6Fp_dim4) {
std::vector<int> input_shape = {1, 9};
schema::Format format = schema::Format_NC;
schema::Format op_format = schema::Format_NC4;
auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode);
auto tensor_type = lite::Tensor::CONST_TENSOR;
auto *input_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type);
if (input_tensor == nullptr) {
MS_LOG(ERROR) << "new input tensor error!";
@@ -309,7 +309,7 @@ TEST_F(TestActivationOpenCL, SigmoidFp_dim4) {
std::vector<int> input_shape = {1, 9};
schema::Format format = schema::Format_NC;
schema::Format op_format = schema::Format_NC4;
auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode);
auto tensor_type = lite::Tensor::CONST_TENSOR;
auto *input_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type);
if (input_tensor == nullptr) {
MS_LOG(ERROR) << "new input tensor error!";
@@ -417,7 +417,7 @@ TEST_F(TestActivationOpenCL, LeakyReluFp_dim4) {

MS_LOG(INFO) << "Init tensors.";
std::vector<int> input_shape = {1, 9}; // need modify
auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode);
auto tensor_type = lite::Tensor::CONST_TENSOR;
schema::Format format = schema::Format_NC; // need modify
schema::Format op_format = schema::Format_NHWC4; // need modify
auto *input_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type);
@@ -528,7 +528,7 @@ TEST_F(TestActivationOpenCLTanh, TanhFp_dim4) {
std::vector<int> input_shape = {1, 2, 3, 9};
schema::Format format = schema::Format_NHWC;
schema::Format op_format = schema::Format_NC4HW4;
auto tensor_type = lite::TensorCategory(schema::NodeType_ValueNode);
auto tensor_type = lite::Tensor::CONST_TENSOR;
auto *input_tensor = new (std::nothrow) lite::Tensor(data_type, input_shape, format, tensor_type);
if (input_tensor == nullptr) {
MS_LOG(ERROR) << "new input tensor error!";
@@ -618,9 +618,9 @@ TEST_F(TestActivationOpenCLTanh, TanhFp_dim4) {
printf_tensor<float>("Tanh:FP32--output data---", outputs[0]);
CompareRes<float>(output_tensor, out_file);
}
input_tensor->SetData(nullptr);
input_tensor->set_data(nullptr);
delete input_tensor;
output_tensor->SetData(nullptr);
output_tensor->set_data(nullptr);
delete output_tensor;
delete sub_graph;
}


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save