Browse Source

!14647 add benchmark calibration for micro

From: @yangjie159
Reviewed-by: @hangangqiang
Signed-off-by:
pull/14647/MERGE
mindspore-ci-bot Gitee 4 years ago
parent
commit
a4c308a38b
7 changed files with 305 additions and 8 deletions
  1. +1
    -0
      mindspore/lite/micro/cmake/file_list.cmake
  2. +25
    -7
      mindspore/lite/micro/coder/generator/component/const_blocks/benchmark.cc
  3. +246
    -0
      mindspore/lite/micro/coder/generator/component/const_blocks/calib_output.cc
  4. +27
    -0
      mindspore/lite/micro/coder/generator/component/const_blocks/calib_output.h
  5. +1
    -0
      mindspore/lite/micro/coder/generator/component/const_blocks/cmake_lists.cc
  6. +1
    -0
      mindspore/lite/micro/coder/generator/component/const_blocks/mstring.cc
  7. +4
    -1
      mindspore/lite/micro/coder/generator/generator.cc

+ 1
- 0
mindspore/lite/micro/cmake/file_list.cmake View File

@@ -32,6 +32,7 @@ set(CODER_GENERATOR_SRC
${MICRO_DIR}/coder/generator/component/const_blocks/model.cc
${MICRO_DIR}/coder/generator/component/const_blocks/license.cc
${MICRO_DIR}/coder/generator/component/const_blocks/load_input.cc
${MICRO_DIR}/coder/generator/component/const_blocks/calib_output.cc
${MICRO_DIR}/coder/generator/component/const_blocks/thread_pool.cc
${MICRO_DIR}/coder/generator/component/const_blocks/benchmark.cc
)


+ 25
- 7
mindspore/lite/micro/coder/generator/component/const_blocks/benchmark.cc View File

@@ -44,6 +44,7 @@ const char *benchmark_source = R"RAW(
#include "include/errorcode.h"

#include "load_input.h"
#include "calib_output.h"

using namespace mindspore;

@@ -54,8 +55,9 @@ void usage() {
"args[1]: inputs binary file\n"
"args[2]: model weight binary file\n"
"args[3]: loop count for performance test\n"
"args[4]: runtime thread num\n"
"args[5]: runtime thread bind mode\n\n");
"args[4]: calibration file\n"
"args[5]: runtime thread num\n"
"args[6]: runtime thread bind mode\n\n");
}

uint64_t GetTimeUs() {
@@ -131,15 +133,15 @@ int main(int argc, const char **argv) {
}

lite::Context *context = nullptr;
if (argc >= 6) {
if (argc >= 7) {
// config benchmark context
context = new (std::nothrow) lite::Context();
if (context == nullptr) {
return lite::RET_ERROR;
}
context->thread_num_ = atoi(argv[4]);
context->thread_num_ = atoi(argv[5]);
context->device_list_.resize(1);
context->device_list_[0] = {lite::DT_CPU, {{false, static_cast<lite::CpuBindMode>(atoi(argv[5]))}}};
context->device_list_[0] = {lite::DT_CPU, {{false, static_cast<lite::CpuBindMode>(atoi(argv[6]))}}};
printf("context: ThreadNum: %d, BindMode: %d\n", context->thread_num_,
context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_);
}
@@ -187,12 +189,29 @@ int main(int argc, const char **argv) {
return lite::RET_ERROR;
}

Vector<String> outputs_name = session->GetOutputTensorNames();
printf("\noutputs: \n");
Vector<String> outputs_name = session->GetOutputTensorNames();
Vector<tensor::MSTensor *> outputs;
for (const auto &name : outputs_name) {
auto output = session->GetOutputByTensorName(name);
outputs.push_back(output);
TensorToString(output);
}
if (argc >= 5) {
lite::Calibrator *calibrator = new (std::nothrow) lite::Calibrator();
if (calibrator == nullptr) {
return lite::RET_NULL_PTR;
}
ret = calibrator->ReadCalibData(argv[4]);
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
ret = calibrator->CompareOutputs(outputs);
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
delete calibrator;
}
printf("========run success=======\n");
delete session;
session = nullptr;
@@ -207,5 +226,4 @@ int main(int argc, const char **argv) {
return lite::RET_OK;
}
)RAW";

} // namespace mindspore::lite::micro

+ 246
- 0
mindspore/lite/micro/coder/generator/component/const_blocks/calib_output.cc View File

@@ -0,0 +1,246 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "coder/generator/component/const_blocks/calib_output.h"

namespace mindspore::lite::micro {

const char *calib_header = R"RAW(
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_
#define MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_

#include "include/lite_utils.h"
#include "include/ms_tensor.h"
#include "include/errorcode.h"

namespace mindspore {
namespace lite {

class CalibTensor {
public:
CalibTensor(String name, size_t elements_num) : tensor_name_(name), elements_num_(elements_num) {}
~CalibTensor() {
free(data_);
data_ = nullptr;
}
String tensor_name() const { return tensor_name_; }
int ElementsNum() const { return elements_num_; }
float *MutableData() {
if (data_ == nullptr) {
if (elements_num_ == 0 || elements_num_ > INT16_MAX) {
return nullptr;
}
data_ = static_cast<float *>(malloc(elements_num_ * sizeof(float)));
}
return data_;
}

private:
String tensor_name_;
int elements_num_{0};
float *data_{nullptr};
};

class Calibrator {
public:
Calibrator() = default;
~Calibrator() {
for (auto &calib : calib_outputs_) {
delete calib;
calib = nullptr;
}
calib_outputs_.clear();
}
int ReadCalibData(const char *calib_data_path);
int CompareOutputs(const Vector<tensor::MSTensor *> &outputs) const;

private:
Vector<CalibTensor *> calib_outputs_;
};

} // namespace lite
} // namespace mindspore

#endif // MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_
)RAW";

const char *calib_source = R"RAW(
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "calib_output.h"
#include <fstream>
#include <sstream>
#include <iostream>
#include <stdio.h>
#include <cmath>

namespace mindspore {
namespace lite {
constexpr float kToleranceVal = 0.0001;

#define MS_ERROR_IF_NULL(ptr) \
do { \
if ((ptr) == nullptr) { \
return mindspore::lite::RET_ERROR; \
} \
} while (0)

int Calibrator::ReadCalibData(const char *calib_data_path) {
std::ifstream in_file(calib_data_path);
if (!in_file.good()) {
printf("file is not exist, %s\n", calib_data_path);
return RET_ERROR;
}
if (!in_file.is_open()) {
printf("open file failed, %s\n", calib_data_path);
in_file.close();
return RET_ERROR;
}
while (!in_file.eof()) {
std::string line;
getline(in_file, line);
if (line.empty()) {
continue;
}
std::stringstream name_line(line);
std::string tensor_name;
size_t dim = 0;
name_line >> tensor_name >> dim;
size_t elements = 1;
for (size_t i = 0; i < dim; i++) {
size_t tmp_dim;
name_line >> tmp_dim;
elements *= tmp_dim;
}
getline(in_file, line);
std::stringstream data_line(line);
String name(tensor_name.c_str());
CalibTensor *output = new (std::nothrow) CalibTensor(name, elements);
MS_ERROR_IF_NULL(output);
float *data = output->MutableData();
MS_ERROR_IF_NULL(data);
for (size_t i = 0; i < elements; i++) {
data_line >> data[i];
}
calib_outputs_.push_back(output);
}
in_file.close();
return RET_OK;
}

template <typename T>
float CompareData(const T *output, const float *calib, size_t elements_num) {
float error = 0.;
if (output == nullptr || calib == nullptr) {
printf("output or calib is nullptr\n");
return error;
}
for (size_t i = 0; i < elements_num; ++i) {
if (std::isnan(output[i]) || std::isinf(output[i]) || std::isnan(calib[i]) || std::isinf(calib[i])) {
printf("error, output data is nan or inf\n");
return error;
}
error += std::abs(output[i] - calib[i]);
}
return error;
}

int Calibrator::CompareOutputs(const Vector<tensor::MSTensor *> &outputs) const {
if (outputs.size() != calib_outputs_.size()) {
printf("error, outputs and calibs size is mismatch\n");
return RET_ERROR;
}
float total_error = 0;
size_t outputs_num = outputs.size();
for (size_t i = 0; i < outputs_num; ++i) {
tensor::MSTensor *output = outputs[i];
MS_ERROR_IF_NULL(output);
CalibTensor *calib = calib_outputs_[i];
MS_ERROR_IF_NULL(calib);
if (output->tensor_name() != calib->tensor_name()) {
printf("error, output tensor name is not equal to calib\n");
return RET_ERROR;
}
if (output->ElementsNum() != calib->ElementsNum()) {
printf("error, output elements num is not equal to calib\n");
return RET_ERROR;
}
switch (output->data_type()) {
case TypeId::kNumberTypeFloat:
case TypeId::kNumberTypeFloat32: {
total_error += CompareData(static_cast<float *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeInt8: {
total_error += CompareData(static_cast<int8_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeUInt8: {
total_error += CompareData(static_cast<uint8_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeUInt:
case TypeId::kNumberTypeUInt32: {
total_error += CompareData(static_cast<int32_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
default: {
printf("unsupported tensor data type\n");
}
}
}
if (total_error > kToleranceVal) {
printf("compare outputs failed, total error: %f\n", total_error);
return RET_ERROR;
}
printf("compare outputs success, total error: %f\n", total_error);
return RET_OK;
}
} // namespace lite
} // namespace mindspore
)RAW";

} // namespace mindspore::lite::micro

+ 27
- 0
mindspore/lite/micro/coder/generator/component/const_blocks/calib_output.h View File

@@ -0,0 +1,27 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_MICRO_GENERATOR_CONST_BLOCK_CALIB_OUTPUT_H_
#define MINDSPORE_LITE_MICRO_GENERATOR_CONST_BLOCK_CALIB_OUTPUT_H_

namespace mindspore::lite::micro {

extern const char *calib_header;
extern const char *calib_source;

} // namespace mindspore::lite::micro

#endif // MINDSPORE_LITE_MICRO_GENERATOR_CONST_BLOCK_CALIB_OUTPUT_H_

+ 1
- 0
mindspore/lite/micro/coder/generator/component/const_blocks/cmake_lists.cc View File

@@ -72,6 +72,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
include_directories(${HEADER_PATH})
set(SRC_FILES
benchmark/benchmark.cc
benchmark/calib_output.cc
benchmark/load_input.c
)
add_executable(benchmark ${SRC_FILES})


+ 1
- 0
mindspore/lite/micro/coder/generator/component/const_blocks/mstring.cc View File

@@ -307,6 +307,7 @@ String operator+(const char *lhs, const String &rhs) {
return str;
}

bool operator!=(const String &lhs, const String &rhs) { return lhs.compare(rhs) != 0; }
bool operator==(const String &lhs, const String &rhs) { return lhs.compare(rhs) == 0; }
bool operator==(const String &lhs, const char *rhs) { return lhs.compare(rhs) == 0; }
bool operator==(const char *lhs, const String &rhs) { return rhs.compare(lhs) == 0; }


+ 4
- 1
mindspore/lite/micro/coder/generator/generator.cc View File

@@ -24,6 +24,7 @@
#include "coder/generator/component/const_blocks/cmake_lists.h"
#include "coder/generator/component/const_blocks/debug_utils.h"
#include "coder/generator/component/const_blocks/load_input.h"
#include "coder/generator/component/const_blocks/calib_output.h"
#include "coder/generator/component/const_blocks/msession.h"
#include "coder/generator/component/const_blocks/mtensor.h"
#include "coder/generator/component/const_blocks/mstring.h"
@@ -85,9 +86,11 @@ int Generator::CodeSourceCMakeFile() {

int Generator::CodeStaticContent() {
std::vector<std::pair<std::string, std::string>> const_blocks = {
{config_->code_path() + "/" + "CMakeLists.txt", bench_cmake_lists_txt},
{net_main_file_path_ + "calib_output.h", calib_header},
{net_main_file_path_ + "calib_output.cc", calib_source},
{net_main_file_path_ + "load_input.h", load_input_h},
{net_main_file_path_ + "load_input.c", load_input_c},
{config_->code_path() + "/" + "CMakeLists.txt", bench_cmake_lists_txt},
{net_main_file_path_ + "benchmark.cc", benchmark_source},
{net_src_file_path_ + "CMakeLists.txt", src_cmake_lists_txt},
{net_src_file_path_ + "session.h", session_header},


Loading…
Cancel
Save