Browse Source

update micro

tags/v1.2.0-rc1
zhujingxuan 5 years ago
parent
commit
53b68c2444
44 changed files with 1478 additions and 351 deletions
  1. +37
    -23
      mindspore/lite/micro/coder/coder.cc
  2. +4
    -5
      mindspore/lite/micro/coder/config.h
  3. +4
    -7
      mindspore/lite/micro/coder/context.cc
  4. +1
    -1
      mindspore/lite/micro/coder/generator/component/cmake_component.cc
  5. +31
    -24
      mindspore/lite/micro/coder/generator/component/common_component.cc
  6. +8
    -11
      mindspore/lite/micro/coder/generator/component/common_component.h
  7. +1
    -1
      mindspore/lite/micro/coder/generator/component/const_blocks/benchmark.cc
  8. +24
    -37
      mindspore/lite/micro/coder/generator/component/const_blocks/cmake_lists.cc
  9. +6
    -6
      mindspore/lite/micro/coder/generator/component/const_blocks/msession.cc
  10. +9
    -6
      mindspore/lite/micro/coder/generator/component/parallel_component.cc
  11. +3
    -3
      mindspore/lite/micro/coder/generator/component/parallel_component.h
  12. +28
    -26
      mindspore/lite/micro/coder/generator/component/train_component.cc
  13. +4
    -5
      mindspore/lite/micro/coder/generator/component/train_component.h
  14. +4
    -4
      mindspore/lite/micro/coder/generator/component/weight_component.cc
  15. +2
    -2
      mindspore/lite/micro/coder/generator/component/weight_component.h
  16. +8
    -27
      mindspore/lite/micro/coder/generator/generator.cc
  17. +0
    -1
      mindspore/lite/micro/coder/generator/generator.h
  18. +11
    -11
      mindspore/lite/micro/coder/generator/inference/inference_generator.cc
  19. +12
    -12
      mindspore/lite/micro/coder/generator/train/train_generator.cc
  20. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc
  21. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h
  22. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/matmul_base_int8_coder.cc
  23. +18
    -18
      mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc
  24. +2
    -6
      mindspore/lite/micro/coder/operator_library/CMakeLists.txt
  25. +8
    -9
      mindspore/lite/micro/coder/utils/dir_utils.cc
  26. +1
    -1
      mindspore/lite/micro/coder/utils/dir_utils.h
  27. +57
    -0
      mindspore/lite/micro/example/mnist/CMakeLists.txt
  28. +147
    -0
      mindspore/lite/micro/example/mnist/benchmark/benchmark.cc
  29. +95
    -0
      mindspore/lite/micro/example/mnist/benchmark/load_input.c
  30. +36
    -0
      mindspore/lite/micro/example/mnist/benchmark/load_input.h
  31. +26
    -98
      mindspore/lite/micro/example/mnist/mnist.sh
  32. BIN
      mindspore/lite/micro/example/mnist/mnist_input.bin
  33. +83
    -0
      mindspore/lite/micro/example/mnist/src/CMakeLists.txt
  34. BIN
      mindspore/lite/micro/example/mnist/src/net.bin
  35. +170
    -0
      mindspore/lite/micro/example/mnist/src/net.c
  36. +25
    -0
      mindspore/lite/micro/example/mnist/src/net.cmake
  37. +56
    -0
      mindspore/lite/micro/example/mnist/src/net.h
  38. +157
    -0
      mindspore/lite/micro/example/mnist/src/session.cc
  39. +78
    -0
      mindspore/lite/micro/example/mnist/src/session.h
  40. +93
    -0
      mindspore/lite/micro/example/mnist/src/tensor.cc
  41. +71
    -0
      mindspore/lite/micro/example/mnist/src/tensor.h
  42. +102
    -0
      mindspore/lite/micro/example/mnist/src/weight.c
  43. +46
    -0
      mindspore/lite/micro/example/mnist/src/weight.h
  44. +7
    -4
      scripts/check_clang_format.sh

+ 37
- 23
mindspore/lite/micro/coder/coder.cc View File

@@ -37,7 +37,6 @@ class CoderFlags : public virtual FlagParser {
CoderFlags() {
AddFlag(&CoderFlags::model_path_, "modelPath", "Input model path", "");
AddFlag(&CoderFlags::code_path_, "codePath", "Input code path", ".");
AddFlag(&CoderFlags::code_module_name_, "moduleName", "Input code module name", "");
AddFlag(&CoderFlags::target_, "target", "generated code target, x86| ARM32M| ARM32A| ARM64", "x86");
AddFlag(&CoderFlags::code_mode_, "codeMode", "generated code mode, Inference | Train", "Inference");
AddFlag(&CoderFlags::support_parallel_, "supportParallel", "whether support parallel launch, true | false", false);
@@ -48,7 +47,6 @@ class CoderFlags : public virtual FlagParser {

std::string model_path_;
bool support_parallel_{false};
std::string code_module_name_;
std::string code_path_;
std::string code_mode_;
bool debug_mode_{false};
@@ -84,6 +82,27 @@ int Coder::Run(const std::string &model_path) {
return status;
}

int Configurator::ParseProjDir(std::string model_path) {
// split model_path to get model file name
proj_dir_ = model_path;
size_t found = proj_dir_.find_last_of("/\\");
if (found != std::string::npos) {
proj_dir_ = proj_dir_.substr(found + 1);
}
found = proj_dir_.find(".ms");
if (found != std::string::npos) {
proj_dir_ = proj_dir_.substr(0, found);
} else {
MS_LOG(ERROR) << "model file's name must be end with \".ms\".";
return RET_ERROR;
}
if (proj_dir_.size() == 0) {
proj_dir_ = "net";
MS_LOG(WARNING) << "parse model's name failed, use \"net\" instead.";
}
return RET_OK;
}

int Coder::Init(const CoderFlags &flags) const {
static const std::map<std::string, Target> kTargetMap = {
{"x86", kX86}, {"ARM32M", kARM32M}, {"ARM32A", kARM32A}, {"ARM64", kARM64}, {"All", kAllTargets}};
@@ -91,6 +110,17 @@ int Coder::Init(const CoderFlags &flags) const {
Configurator *config = Configurator::GetInstance();

std::vector<std::function<bool()>> parsers;
parsers.emplace_back([&flags, config]() -> bool {
if (!FileExists(flags.model_path_)) {
MS_LOG(ERROR) << "model_path \"" << flags.model_path_ << "\" is not valid";
return false;
}
if (config->ParseProjDir(flags.model_path_) != RET_OK) {
return false;
}
return true;
});

parsers.emplace_back([&flags, config]() -> bool {
auto target_item = kTargetMap.find(flags.target_);
MS_CHECK_TRUE_RET_BOOL(target_item != kTargetMap.end(), "unsupported target: " + flags.target_);
@@ -119,20 +149,6 @@ int Coder::Init(const CoderFlags &flags) const {
return true;
});

parsers.emplace_back([&flags, config]() -> bool {
if (!FileExists(flags.model_path_)) {
MS_LOG(ERROR) << "model_path \"" << flags.model_path_ << "\" is not valid";
return false;
}
if (flags.code_module_name_.empty() || isdigit(flags.code_module_name_.at(0))) {
MS_LOG(ERROR) << "code_gen code module name " << flags.code_module_name_
<< " not valid: it must be given and the first char could not be number";
return false;
}
config->set_module_name(flags.code_module_name_);
return true;
});

parsers.emplace_back([&flags, config]() -> bool {
const std::string slash = std::string(kSlash);
if (!flags.code_path_.empty() && !DirExists(flags.code_path_)) {
@@ -141,18 +157,18 @@ int Coder::Init(const CoderFlags &flags) const {
}
config->set_code_path(flags.code_path_);
if (flags.code_path_.empty()) {
std::string path = ".." + slash + config->module_name();
std::string path = ".." + slash + config->proj_dir();
config->set_code_path(path);
} else {
if (flags.code_path_.substr(flags.code_path_.size() - 1, 1) != slash) {
std::string path = flags.code_path_ + slash + config->module_name();
std::string path = flags.code_path_ + slash + config->proj_dir();
config->set_code_path(path);
} else {
std::string path = flags.code_path_ + config->module_name();
std::string path = flags.code_path_ + config->proj_dir();
config->set_code_path(path);
}
}
return InitProjDirs(flags.code_path_, config->module_name()) != RET_ERROR;
return InitProjDirs(flags.code_path_, config->proj_dir()) != RET_ERROR;
});

if (!std::all_of(parsers.begin(), parsers.end(), [](auto &parser) -> bool { return parser(); })) {
@@ -162,17 +178,15 @@ int Coder::Init(const CoderFlags &flags) const {
}
return RET_ERROR;
}
config->set_module_name(kModelName);

auto print_parameter = [](auto name, auto value) {
MS_LOG(INFO) << std::setw(20) << std::left << name << "= " << value;
};

print_parameter("modelPath", flags.model_path_);
print_parameter("projectName", config->proj_dir());
print_parameter("target", config->target());
print_parameter("codePath", config->code_path());
print_parameter("codeMode", config->code_mode());
print_parameter("codeModuleName", config->module_name());
print_parameter("debugMode", config->debug_mode());

return RET_OK;


+ 4
- 5
mindspore/lite/micro/coder/config.h View File

@@ -30,9 +30,6 @@ class Configurator {
return &configurator;
}

void set_module_name(const std::string &module_name) { module_name_ = module_name; }
std::string module_name() const { return module_name_; }

void set_code_path(const std::string &code_path) { code_path_ = code_path; }
std::string code_path() const { return code_path_; }

@@ -48,16 +45,18 @@ class Configurator {
void set_support_parallel(bool parallel) { support_parallel_ = parallel; }
bool support_parallel() const { return support_parallel_; }

int ParseProjDir(std::string model_path);
std::string proj_dir() const { return proj_dir_; }

private:
Configurator() = default;
~Configurator() = default;

std::string module_name_;
std::string code_path_;
Target target_{kTargetUnknown};
CodeMode code_mode_{Code_Unknown};
bool support_parallel_{false};
bool debug_mode_{false};
std::string proj_dir_;
};
} // namespace mindspore::lite::micro



+ 4
- 7
mindspore/lite/micro/coder/context.cc View File

@@ -15,17 +15,14 @@
*/

#include "coder/context.h"
#include "coder/config.h"
#include "coder/allocator/allocator.h"

namespace mindspore::lite::micro {
CoderContext::CoderContext() {
Configurator *config = Configurator::GetInstance();
std::string module_name = config->module_name();
this->input_name_ = module_name + "_I";
this->output_name_ = module_name + "_O";
this->buffer_name_ = module_name + "_B";
this->weight_name_ = module_name + "_W";
this->input_name_ = "g_Input";
this->output_name_ = "g_Output";
this->buffer_name_ = "g_Buffer";
this->weight_name_ = "g_Weight";
}

void CoderContext::AppendCode(const std::string &codeBlock) { this->code_blocks_.emplace_back(codeBlock); }


+ 1
- 1
mindspore/lite/micro/coder/generator/component/cmake_component.cc View File

@@ -31,7 +31,7 @@ void CodeCMakeNetLibrary(std::ofstream &ofs, const std::unique_ptr<CoderContext>
for (const std::string &c_file : ctx->c_files()) {
ofs << " " << c_file << ".o\n";
}
ofs << " net_weight.c.o\n"
ofs << " weight.c.o\n"
<< " net.c.o\n"
<< " session.cc.o\n"
<< " tensor.cc.o\n";


+ 31
- 24
mindspore/lite/micro/coder/generator/component/common_component.cc View File

@@ -73,29 +73,30 @@ void CodeCopyOutputsImplement(std::ofstream &ofs, const std::unique_ptr<CoderCon
MS_CHECK_PTR_IF_NULL(output);
ofs << " memcpy(outputs[" << i << "], " << tensor_map[output] << ", " << output->Size() << ");\n";
}
ofs << " outputs[0] = net_B;\n"
" return RET_OK;\n"
ofs << " return RET_OK;\n"
"}\n\n";
}

void CodeInputState(std::ofstream &ofs, const std::string &module_name) {
void CodeInputState(std::ofstream &ofs) {
ofs << "/**\n"
<< " * set input tensors\n"
<< " * @param inputs, the input data ptr's array of the model, the tensors' count of input may be greater than "
"one.\n"
<< " * @param num, the input data's number of the model.\n"
<< " **/\n"
<< "int " << module_name << "_SetInputs(const void **inputs, int num);\n\n";
<< "int "
<< "SetInputs(const void **inputs, int num);\n\n";
}

void CodeInputImplement(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr<CoderContext> &ctx) {
void CodeInputImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
// input tensors
std::vector<Tensor *> inputs = ctx->graph_inputs();
for (size_t i = 0; i < inputs.size(); ++i) {
ofs << "static const unsigned char *" << ctx->input_name() + std::to_string(i) << " = 0;\n";
}
size_t size = inputs.size();
ofs << "int " << module_name << "_SetInputs(const void **inputs, int num) {\n"
ofs << "int "
<< "SetInputs(const void **inputs, int num) {\n"
<< " if (inputs == NULL) {\n"
" return RET_ERROR;\n"
" }\n"
@@ -108,15 +109,15 @@ void CodeInputImplement(std::ofstream &ofs, const std::string &module_name, cons
ofs << " return RET_OK;\n}\n";
}

void CodeGraphQuantArgsState(std::ofstream &ofs, const std::string &module_name) {
void CodeGraphQuantArgsState(std::ofstream &ofs) {
ofs << "/**\n"
<< " * get input and output QuantArgs of the model \n"
<< " **/\n"
<< "GraphQuantArgs " << module_name << "_GetInOutQuantArgs();\n\n";
<< "GraphQuantArgs "
<< "GetInOutQuantArgs();\n\n";
}

void CodeGraphQuantArgsImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx) {
void CodeGraphQuantArgsImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
std::vector<Tensor *> graph_inputs = ctx->graph_inputs();
Tensor *in_tensor = graph_inputs.at(kInputIndex);
MS_CHECK_PTR_IF_NULL(in_tensor);
@@ -129,36 +130,41 @@ void CodeGraphQuantArgsImplement(std::ofstream &ofs, const std::string &module_n
MS_LOG(ERROR) << "code model quant args failed";
return;
}
ofs << "GraphQuantArgs " << module_name << "_GetInOutQuantArgs() {\n"
ofs << "GraphQuantArgs "
<< "GetInOutQuantArgs() {\n"
<< "\t\tGraphQuantArgs quan_args = { " << in_quant_args.at(0).scale << ", " << out_quant_args.at(0).scale << ", "
<< in_quant_args.at(0).zeroPoint << ", " << out_quant_args.at(0).zeroPoint << "};\n"
<< "\t\treturn quan_args;\n"
<< "}\n";
}

void CodeManageResourceState(std::ofstream &ofs, const std::string &module_name) {
void CodeManageResourceState(std::ofstream &ofs) {
ofs << "/**\n"
<< " * get the memory space size of the inference.\n"
<< " **/\n"
<< "int " << module_name << "_GetBufferSize();\n";
<< "int "
<< "GetBufferSize();\n";

ofs << "/**\n"
<< " * set the memory space for the inference\n"
<< " **/\n"
<< "int " << module_name << "_SetBuffer(void *buffer);\n\n";
<< "int "
<< "SetBuffer(void *buffer);\n\n";

ofs << "/**\n"
<< " * free the memory of packed weights, and set the membuf buffer and input address to NULL\n"
<< " **/\n"
<< "void " << module_name << "_FreeResource();\n";
<< "void "
<< "FreeResource();\n";
}

void CodeInitResourceImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx) {
ofs << "int " << module_name << "_GetBufferSize() {\n"
void CodeInitResourceImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
ofs << "int "
<< "GetBufferSize() {\n"
<< " return " << ctx->total_buffer_size() << ";\n"
<< "}\n";
ofs << "int " << module_name << "_SetBuffer( void *buffer) {\n";
ofs << "int "
<< "SetBuffer( void *buffer) {\n";
ofs << " if (buffer == NULL) {\n"
" return RET_ERROR;\n"
" }\n";
@@ -167,9 +173,9 @@ void CodeInitResourceImplement(std::ofstream &ofs, const std::string &module_nam
"}\n";
}

void CodeFreeResourceImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx) {
ofs << "void " << module_name << "_FreeResource() {\n";
void CodeFreeResourceImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
ofs << "void "
<< "FreeResource() {\n";
ofs << " " << ctx->buffer_name() << "= NULL;\n";
std::vector<Tensor *> inputs = ctx->graph_inputs();
size_t size = inputs.size();
@@ -194,11 +200,12 @@ void CodeFreeResourceImplement(std::ofstream &ofs, const std::string &module_nam
ofs << "}\n";
}

void CodeInferenceState(std::ofstream &ofs, const std::string &module_name) {
void CodeInferenceState(std::ofstream &ofs) {
ofs << "/**\n"
<< " * net inference function\n"
<< " **/\n"
<< "void " << module_name << "_Inference();\n\n";
<< "void "
<< "Inference();\n\n";
}

} // namespace mindspore::lite::micro

+ 8
- 11
mindspore/lite/micro/coder/generator/component/common_component.h View File

@@ -31,21 +31,18 @@ void CodeSessionCompileGraph(std::ofstream &ofs, const std::unique_ptr<CoderCont
void CodeCopyOutputsState(std::ofstream &ofs);
void CodeCopyOutputsImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);

void CodeInputState(std::ofstream &ofs, const std::string &module_name);
void CodeInputImplement(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr<CoderContext> &ctx);
void CodeInputState(std::ofstream &ofs);
void CodeInputImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);

void CodeGraphQuantArgsState(std::ofstream &ofs, const std::string &module_name);
void CodeGraphQuantArgsImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx);
void CodeGraphQuantArgsState(std::ofstream &ofs);
void CodeGraphQuantArgsImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);

void CodeManageResourceState(std::ofstream &ofs, const std::string &module_name);
void CodeInitResourceImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx);
void CodeManageResourceState(std::ofstream &ofs);
void CodeInitResourceImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);

void CodeFreeResourceImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx);
void CodeFreeResourceImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);

void CodeInferenceState(std::ofstream &ofs, const std::string &module_name);
void CodeInferenceState(std::ofstream &ofs);
} // namespace mindspore::lite::micro

#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_COMMON_COMPONENT_H_

+ 1
- 1
mindspore/lite/micro/coder/generator/component/const_blocks/benchmark.cc View File

@@ -118,7 +118,7 @@ int main(int argc, const char **argv) {

const char *model_buffer = nullptr;
int model_size = 0;
// read .net file by ReadBinaryFile;
// read .bin file by ReadBinaryFile;
if (argc >= 3) {
model_buffer = static_cast<const char *>(ReadInputData(argv[2], &model_size));
}


+ 24
- 37
mindspore/lite/micro/coder/generator/component/const_blocks/cmake_lists.cc View File

@@ -19,32 +19,17 @@
namespace mindspore::lite::micro {

const char *bench_cmake_lists_txt = R"RAW(

cmake_minimum_required(VERSION 3.14)
project(benchmark)

if(NOT DEFINED MODEL_LIB)
message(FATAL_ERROR "MODEL_LIB not set")
endif()

if(NOT DEFINED HEADER_PATH)
message(FATAL_ERROR "HEADER_PATH not set")
if(NOT DEFINED PKG_PATH)
message(FATAL_ERROR "PKG_PATH not set")
endif()

get_filename_component(MODEL_LIB ${MODEL_LIB} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
get_filename_component(HEADER_PATH ${HEADER_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})

function(parse_lib_info lib_full_path lib_name lib_path)
string(FIND "${lib_full_path}" "/" POS REVERSE)
math(EXPR POS "${POS} + 1")
string(SUBSTRING ${lib_full_path} 0 ${POS} path)
set(${lib_path} ${path} PARENT_SCOPE)
string(SUBSTRING ${lib_full_path} "${POS}" "-1" name)
set(${lib_name} ${name} PARENT_SCOPE)
endfunction(parse_lib_info)
get_filename_component(PKG_PATH ${PKG_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})

parse_lib_info(${MODEL_LIB} MODEL_LIB_NAME MODEL_LIB_PATH)

message("project name: ${MODEL_LIB_NAME}")
set(HEADER_PATH ${PKG_PATH}/inference)

option(MICRO_BUILD_ARM64 "build android arm64" OFF)
option(MICRO_BUILD_ARM32A "build android arm32" OFF)
@@ -73,37 +58,39 @@ if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
endif()
link_directories(${MODEL_LIB_PATH})
include(benchmark.cmake)

add_subdirectory(src)
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/)
include_directories(${HEADER_PATH})
set(SRC_FILES
benchmark/benchmark.cc
benchmark/load_input.c
)
add_executable(benchmark ${SRC_FILES})
target_link_libraries(benchmark ${MODEL_LIB_NAME} -lm -pthread)
target_link_libraries(benchmark net -lm -pthread)

)RAW";

const char *src_cmake_lists_txt = R"RAW(

cmake_minimum_required(VERSION 3.14)
project(net)

if(NOT DEFINED OP_LIB)
message(FATAL_ERROR "OP_LIB not set")
if(NOT DEFINED PKG_PATH)
message(FATAL_ERROR "PKG_PATH not set")
endif()

if(NOT DEFINED OP_HEADER_PATH)
message(FATAL_ERROR "OP_HEADER_PATH not set")
endif()

if(NOT DEFINED HEADER_PATH)
message(FATAL_ERROR "HEADER_PATH not set")
endif()
get_filename_component(PKG_PATH ${PKG_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})

get_filename_component(OP_LIB ${OP_LIB} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
get_filename_component(OP_HEADER_PATH ${OP_HEADER_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
get_filename_component(HEADER_PATH ${HEADER_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(OP_LIB ${PKG_PATH}/tools/codegen/operator_library/lib/libops.a)
set(OP_HEADER_PATH ${PKG_PATH}/tools/codegen/operator_library/include)
set(HEADER_PATH ${PKG_PATH}/inference)

message("operator lib path: ${OP_LIB}")
message("operator header path: ${OP_HEADER_PATH}")


+ 6
- 6
mindspore/lite/micro/coder/generator/component/const_blocks/msession.cc View File

@@ -104,9 +104,9 @@ int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &af
for (size_t i = 0; i < inputs_.size(); ++i) {
inputs_data[i] = inputs_[i]->MutableData();
}
net_SetInputs(inputs_data, inputs_.size());
SetInputs(inputs_data, inputs_.size());

net_Inference();
Inference();

void *outputs_data[outputs_.size()];
for (size_t i = 0; i < outputs_.size(); ++i) {
@@ -118,7 +118,7 @@ int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &af
}

LiteSession::~LiteSession() {
net_FreeResource();
FreeResource();
if (runtime_buffer_ != nullptr) {
free(runtime_buffer_);
runtime_buffer_ = nullptr;
@@ -141,12 +141,12 @@ LiteSession::~LiteSession() {
}

int LiteSession::InitRuntimeBuffer() {
int buffer_size = net_GetBufferSize();
int buffer_size = GetBufferSize();
runtime_buffer_ = malloc(buffer_size);
if (runtime_buffer_ == nullptr) {
return RET_ERROR;
}
int ret = net_SetBuffer(runtime_buffer_);
int ret = SetBuffer(runtime_buffer_);
if (ret != RET_OK) {
return RET_ERROR;
}
@@ -215,7 +215,7 @@ session::LiteSession *session::LiteSession::CreateSession(const char *net_buf, s
if (ret != lite::RET_OK) {
return nullptr;
}
net_Init(const_cast<char *>(net_buf), size);
Init(const_cast<char *>(net_buf), size);
return session;
}
} // namespace mindspore


+ 9
- 6
mindspore/lite/micro/coder/generator/component/parallel_component.cc View File

@@ -19,7 +19,7 @@

namespace mindspore::lite::micro {

void CodeCreateThreadPool(std::ofstream &ofs, const std::string &module_name) {
void CodeCreateThreadPool(std::ofstream &ofs) {
ofs << " int thread_num = 4;\n"
" BindMode bind_mode = NO_BIND_MODE;\n"
" if (argc >= 6) {\n"
@@ -31,7 +31,8 @@ void CodeCreateThreadPool(std::ofstream &ofs, const std::string &module_name) {
" MICRO_ERROR(\"create thread pool failed\");\n"
" return RET_ERROR;\n"
" }\n"
<< " ret = " << module_name << "_SetThreadPool(thread_pool);\n"
<< " ret = "
<< "SetThreadPool(thread_pool);\n"
<< " if (ret != RET_OK) {\n"
" MICRO_ERROR(\"set global thread pool failed\");\n"
" return RET_ERROR;\n"
@@ -41,16 +42,18 @@ void CodeCreateThreadPool(std::ofstream &ofs, const std::string &module_name) {

void CodeDestroyThreadPool(std::ofstream &ofs) { ofs << " DestroyThreadPool(thread_pool);\n"; }

void CodeSetGlobalThreadPoolState(std::ofstream &ofs, const std::string &module_name) {
void CodeSetGlobalThreadPoolState(std::ofstream &ofs) {
ofs << "/*\n"
" * set global thread pool, which is created by user\n"
" */\n"
<< "int " << module_name << "_SetThreadPool(struct ThreadPool *thread_pool);\n\n";
<< "int "
<< "SetThreadPool(struct ThreadPool *thread_pool);\n\n";
}

void CodeSetGlobalThreadPoolImplement(std::ofstream &ofs, const std::string &module_name) {
void CodeSetGlobalThreadPoolImplement(std::ofstream &ofs) {
ofs << "struct ThreadPool *g_thread_pool = NULL;\n"
<< "int " << module_name << "_SetThreadPool(struct ThreadPool *thread_pool) {\n"
<< "int "
<< "SetThreadPool(struct ThreadPool *thread_pool) {\n"
<< " if (thread_pool == NULL) {\n"
" return RET_ERROR;\n"
" }\n"


+ 3
- 3
mindspore/lite/micro/coder/generator/component/parallel_component.h View File

@@ -22,13 +22,13 @@

namespace mindspore::lite::micro {

void CodeCreateThreadPool(std::ofstream &ofs, const std::string &module_name);
void CodeCreateThreadPool(std::ofstream &ofs);

void CodeDestroyThreadPool(std::ofstream &ofs);

void CodeSetGlobalThreadPoolState(std::ofstream &ofs, const std::string &module_name);
void CodeSetGlobalThreadPoolState(std::ofstream &ofs);

void CodeSetGlobalThreadPoolImplement(std::ofstream &ofs, const std::string &module_name);
void CodeSetGlobalThreadPoolImplement(std::ofstream &ofs);

} // namespace mindspore::lite::micro



+ 28
- 26
mindspore/lite/micro/coder/generator/component/train_component.cc View File

@@ -39,24 +39,23 @@ void CodeTrainParams(std::ofstream &ofs) {
"};\n\n";
}

void CodeFeaturesState(std::ofstream &ofs, const std::string &module_name) {
void CodeFeaturesState(std::ofstream &ofs) {
ofs << "/**\n"
" *\n"
" * @param size, return the number of features\n"
" * @return, the address of features\n"
" */\n"
<< "FeatureParam *" << module_name << "_GetFeatures(int *size);\n\n";
<< "FeatureParam *GetFeatures(int *size);\n\n";
ofs << "/**\n"
" *\n"
" * @param features, the address of features\n"
" * @param size, the number of features\n"
" * @return, status\n"
" */\n"
<< "int " << module_name << "_UpdateFeatures(FeatureParam *features, int size);\n\n";
<< "int UpdateFeatures(FeatureParam *features, int size);\n\n";
}

void CodeFeaturesImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx) {
void CodeFeaturesImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
size_t features_num = 0;
ofs << "static FeatureParam feature_params[] = {\n";
for (const auto &item : ctx->saved_weights()) {
@@ -72,12 +71,13 @@ void CodeFeaturesImplement(std::ofstream &ofs, const std::string &module_name,
}
ofs << "};\n";

ofs << "FeatureParam *" << module_name << "_GetFeatures(int *size) {\n"
ofs << "FeatureParam *GetFeatures(int *size) {\n"
<< " *size = " << features_num << ";\n"
<< " return feature_params;\n"
"}\n\n";

ofs << "int " << module_name << "_UpdateFeatures(FeatureParam *features, int size) {\n"
ofs << "int "
<< "UpdateFeatures(FeatureParam *features, int size) {\n"
<< " for (int i = 0; i < size; ++i) {\n"
" FeatureParam *src = features + i;\n"
" FeatureParam dst;\n"
@@ -106,22 +106,22 @@ void CodeFeaturesImplement(std::ofstream &ofs, const std::string &module_name,
"}\n\n";
}

void CodeTrainState(std::ofstream &ofs, const std::string &module_name) {
ofs << "/**\n"
" * Train Function\n"
" * @param epoch, the train epoch\n"
" * @param iterations, which is equal to batch_num, the number of iterations of each epoch\n"
" * @param use_train_param, default parameters already exists, such as the momentum, user can update these\n"
" * parameters to improve the accuracy\n"
" * @param parameter, the TrainParameter contains epsilon/beta1/beta2\n"
" * @return status\n"
" */\n"
<< "int " << module_name
<< "_Train(const int epoch, const int iterations, bool use_train_param, const struct TrainParameter *parameter, "
"const struct EarlyStop *early_stop);\n\n";
void CodeTrainState(std::ofstream &ofs) {
ofs
<< "/**\n"
" * Train Function\n"
" * @param epoch, the train epoch\n"
" * @param iterations, which is equal to batch_num, the number of iterations of each epoch\n"
" * @param use_train_param, default parameters already exists, such as the momentum, user can update these\n"
" * parameters to improve the accuracy\n"
" * @param parameter, the TrainParameter contains epsilon/beta1/beta2\n"
" * @return status\n"
" */\n"
<< "int Train(const int epoch, const int iterations, bool use_train_param, const struct TrainParameter *parameter, "
"const struct EarlyStop *early_stop);\n\n";
}

void CodeTrainImplement(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr<CoderContext> &ctx) {
void CodeTrainImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
std::vector<Tensor *> inputs = ctx->graph_inputs();
size_t inputs_num = inputs.size();
auto inputs_tostring = [&]() {
@@ -151,8 +151,7 @@ void CodeTrainImplement(std::ofstream &ofs, const std::string &module_name, cons
}
return result;
};
ofs << "int " << module_name
<< "_Train(const int epoch, const int iterations, bool use_train_param, const struct TrainParameter "
ofs << "int Train(const int epoch, const int iterations, bool use_train_param, const struct TrainParameter "
"*parameter, const struct EarlyStop *early_stop) {\n"
" if (iterations <= 0 || epoch <= 0) {\n"
" MICRO_ERROR(\"error iterations or epoch!, epoch:%d, iterations:%d\", epoch, iterations);\n"
@@ -169,9 +168,12 @@ void CodeTrainImplement(std::ofstream &ofs, const std::string &module_name, cons
<< " float loss = 0;\n"
<< " for (int j = 0; j < iterations; ++j) {\n"
<< " " << offset_inputs() << "\n"
<< " " << module_name << "_SetInputs(input_ptr, " << inputs_num << ");\n"
<< " " << module_name << "_Inference();\n"
<< " loss = " << module_name << "_ComputeLossAndGradient();\n"
<< " "
<< "_SetInputs(input_ptr, " << inputs_num << ");\n"
<< " "
<< "_Inference();\n"
<< " loss = "
<< "ComputeLossAndGradient();\n"
<< " }\n"
" }\n"
" return RET_OK;\n"


+ 4
- 5
mindspore/lite/micro/coder/generator/component/train_component.h View File

@@ -28,12 +28,11 @@
namespace mindspore::lite::micro {
void CodeTrainParams(std::ofstream &ofs);

void CodeFeaturesState(std::ofstream &ofs, const std::string &module_name);
void CodeFeaturesImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx);
void CodeFeaturesState(std::ofstream &ofs);
void CodeFeaturesImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);

void CodeTrainState(std::ofstream &ofs, const std::string &module_name);
void CodeTrainImplement(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr<CoderContext> &ctx);
void CodeTrainState(std::ofstream &ofs);
void CodeTrainImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);
} // namespace mindspore::lite::micro

#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_TRAIN_COMPONENT_H_

+ 4
- 4
mindspore/lite/micro/coder/generator/component/weight_component.cc View File

@@ -87,16 +87,16 @@ void CodeModelParamsForNet(std::ofstream &hofs, std::ofstream &cofs, const std::
cofs << "\n";
}

void CodeInitWeightState(std::ofstream &ofs, const std::string &module_name) {
void CodeInitWeightState(std::ofstream &ofs) {
ofs << "/**\n"
<< " * @param weight_buffer, the address of the weight binary file\n"
<< " * @param weight_size, the size of the model file in bytes\n"
<< " **/\n"
<< "int " << module_name << "_Init(void *weight_buffer, int weight_size);\n\n";
<< "int Init(void *weight_buffer, int weight_size);\n\n";
}

void CodeWeightInitFunc(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr<CoderContext> &ctx) {
ofs << "int " << module_name << "_Init(void *weight_buffer, int weight_size) {\n"
void CodeWeightInitFunc(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
ofs << "int Init(void *weight_buffer, int weight_size) {\n"
<< " if (weight_buffer == NULL) {\n"
<< " return RET_ERROR;\n"
<< " }\n";


+ 2
- 2
mindspore/lite/micro/coder/generator/component/weight_component.h View File

@@ -35,8 +35,8 @@ void CodeModelParamsData(std::ofstream &ofs, const std::map<std::string, Tensor
void SaveDataToNet(const std::map<std::string, Tensor *> &saved_weights, const std::string &net_file);
void CodeModelParamsForNet(std::ofstream &hofs, std::ofstream &cofs, const std::unique_ptr<CoderContext> &ctx);

void CodeInitWeightState(std::ofstream &ofs, const std::string &module_name);
void CodeWeightInitFunc(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr<CoderContext> &ctx);
void CodeInitWeightState(std::ofstream &ofs);
void CodeWeightInitFunc(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);

} // namespace mindspore::lite::micro



+ 8
- 27
mindspore/lite/micro/coder/generator/generator.cc View File

@@ -46,10 +46,9 @@ int WriteContentToFile(const std::string &file, const std::string &content) {
Generator::Generator(std::unique_ptr<CoderContext> ctx) {
ctx_ = std::move(ctx);
this->config_ = Configurator::GetInstance();
std::string module_name = config_->module_name();
this->net_inc_hfile_ = module_name + ".h";
this->net_src_cfile_ = module_name + ".c";
this->net_weight_hfile_ = module_name + "_weight.h";
this->net_inc_hfile_ = "net.h";
this->net_src_cfile_ = "net.c";
this->net_weight_hfile_ = "weight.h";
this->net_src_file_path_ = config_->code_path() + kSourcePath;
this->net_main_file_path_ = config_->code_path() + kBenchmarkPath;
origin_umask_ = umask(user_umask_);
@@ -60,7 +59,7 @@ Generator::~Generator() { (void)umask(origin_umask_); }

void Generator::CodeNetRunFunc(std::ofstream &ofs) {
// generate net inference code
ofs << "void " << config_->module_name() << "_Inference() {\n";
ofs << "void Inference() {\n";
if (config_->support_parallel()) {
ofs << " const int g_thread_num = GetCurrentThreadNum(g_thread_pool);\n";
} else {
@@ -72,23 +71,6 @@ void Generator::CodeNetRunFunc(std::ofstream &ofs) {
ofs << "}\n";
}

int Generator::CodeBenchmarkCMakeFile() {
std::string net_main_cmake_file_path = net_main_file_path_;
std::string test_cmake_file = net_main_cmake_file_path + "benchmark.cmake";
std::ofstream ofs(test_cmake_file);
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
MS_LOG(INFO) << "write " << test_cmake_file;
ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR})\n";
ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/)\n";
ofs << "include_directories(${HEADER_PATH})\n";
ofs << "set(SRC_FILES\n";
ofs << "\t\t" << kBenchmarkFile << "\n";
ofs << "\t\tload_input.c\n";
ofs << ")\n";
ofs.close();
return RET_OK;
}

int Generator::CodeSourceCMakeFile() {
std::string src_cmake_file = net_src_file_path_ + cmake_file_name_;
std::ofstream ofs(src_cmake_file);
@@ -103,7 +85,7 @@ int Generator::CodeStaticContent() {
std::vector<std::pair<std::string, std::string>> const_blocks = {
{net_main_file_path_ + "load_input.h", load_input_h},
{net_main_file_path_ + "load_input.c", load_input_c},
{net_main_file_path_ + "CMakeLists.txt", bench_cmake_lists_txt},
{config_->code_path() + "/" + "CMakeLists.txt", bench_cmake_lists_txt},
{net_main_file_path_ + "benchmark.cc", benchmark_source},
{net_src_file_path_ + "CMakeLists.txt", src_cmake_lists_txt},
{net_src_file_path_ + "session.h", session_header},
@@ -143,7 +125,7 @@ int Generator::CodeWeightFile() {
CodeWeightFileHeader(hofs, ctx_);

// weight source file
std::string cfile = net_src_file_path_ + config_->module_name() + "_weight.c";
std::string cfile = net_src_file_path_ + "weight.c";
std::ofstream cofs(cfile);
MS_CHECK_TRUE(!cofs.bad(), "filed to open file");
MS_LOG(INFO) << "write " << cfile;
@@ -152,10 +134,10 @@ int Generator::CodeWeightFile() {
cofs << "unsigned char * " << ctx_->buffer_name() << " = 0 ; \n";

if (config_->target() != kARM32M) {
std::string net_file = net_src_file_path_ + config_->module_name() + ".net";
std::string net_file = net_src_file_path_ + "net.bin";
SaveDataToNet(ctx_->saved_weights(), net_file);
CodeModelParamsForNet(hofs, cofs, ctx_);
CodeWeightInitFunc(cofs, config_->module_name(), ctx_);
CodeWeightInitFunc(cofs, ctx_);
} else {
CodeModelParamsState(hofs, ctx_->saved_weights());
CodeModelParamsData(cofs, ctx_->saved_weights());
@@ -170,7 +152,6 @@ int Generator::GenerateCode() {
MS_CHECK_RET_CODE(CodeNetCFile(), "code net c file failed.");
MS_CHECK_RET_CODE(CodeWeightFile(), "code weight file failed.");
MS_CHECK_RET_CODE(CodeSourceCMakeFile(), "code net cmake file failed.");
MS_CHECK_RET_CODE(CodeBenchmarkCMakeFile(), "code benchmark cmake file failed.");
MS_CHECK_RET_CODE(CodeStaticContent(), "code static content failed.");
MS_CHECK_RET_CODE(CodeSessionImplement(), "code session file failed.");
return RET_OK;


+ 0
- 1
mindspore/lite/micro/coder/generator/generator.h View File

@@ -61,7 +61,6 @@ class Generator {
std::string net_main_file_path_;

private:
int CodeBenchmarkCMakeFile();
int CodeSourceCMakeFile();
int CodeStaticContent();
int CodeSessionImplement();


+ 11
- 11
mindspore/lite/micro/coder/generator/inference/inference_generator.cc View File

@@ -35,19 +35,19 @@ int InferenceGenerator::CodeNetHFile() {
ofs << "#include \"thread_pool.h\"\n";
}
ofs << kExternCpp;
CodeInputState(ofs, config_->module_name());
CodeInputState(ofs);
CodeCopyOutputsState(ofs);
if (is_get_quant_args_) {
CodeGraphQuantArgsState(ofs, config_->module_name());
CodeGraphQuantArgsState(ofs);
}
if (config_->support_parallel()) {
CodeSetGlobalThreadPoolState(ofs, config_->module_name());
CodeSetGlobalThreadPoolState(ofs);
}
if (config_->target() != kARM32M) {
CodeInitWeightState(ofs, config_->module_name());
CodeInitWeightState(ofs);
}
CodeManageResourceState(ofs, config_->module_name());
CodeInferenceState(ofs, config_->module_name());
CodeManageResourceState(ofs);
CodeInferenceState(ofs);
ofs << kEndExternCpp;
return RET_OK;
}
@@ -64,14 +64,14 @@ int InferenceGenerator::CodeNetCFile() {
ofs << "#include \"" << kDebugUtils << "\"\n";
}
if (config_->support_parallel()) {
CodeSetGlobalThreadPoolImplement(ofs, config_->module_name());
CodeSetGlobalThreadPoolImplement(ofs);
}
CodeInputImplement(ofs, config_->module_name(), ctx_);
CodeInputImplement(ofs, ctx_);
CodeCopyOutputsImplement(ofs, ctx_);
CodeInitResourceImplement(ofs, config_->module_name(), ctx_);
CodeFreeResourceImplement(ofs, config_->module_name(), ctx_);
CodeInitResourceImplement(ofs, ctx_);
CodeFreeResourceImplement(ofs, ctx_);
if (is_get_quant_args_) {
CodeGraphQuantArgsImplement(ofs, config_->module_name(), ctx_);
CodeGraphQuantArgsImplement(ofs, ctx_);
}
CodeNetRunFunc(ofs);
ofs.close();


+ 12
- 12
mindspore/lite/micro/coder/generator/train/train_generator.cc View File

@@ -24,7 +24,7 @@

namespace mindspore::lite::micro {
void TrainGenerator::CodeGradientFunc(std::ofstream &ofs) const {
ofs << "float " << config_->module_name() << "_ComputeLossAndGradient() {\n";
ofs << "float ComputeLossAndGradient() {\n";
ofs << " float loss = 0;\n";
for (const auto &block : ctx_->train_blocks()) {
ofs << "\t{\n" << block << "\t}\n";
@@ -44,14 +44,14 @@ int TrainGenerator::CodeNetHFile() {
}
ofs << "#include \"microtensor.h\"\n\n";
CodeTrainParams(ofs);
CodeInputState(ofs, config_->module_name());
CodeInputState(ofs);
if (config_->target() != kARM32M) {
CodeInitWeightState(ofs, config_->module_name());
CodeInitWeightState(ofs);
}
CodeManageResourceState(ofs, config_->module_name());
CodeInferenceState(ofs, config_->module_name());
CodeFeaturesState(ofs, config_->module_name());
CodeTrainState(ofs, config_->module_name());
CodeManageResourceState(ofs);
CodeInferenceState(ofs);
CodeFeaturesState(ofs);
CodeTrainState(ofs);
return RET_OK;
}

@@ -60,13 +60,13 @@ int TrainGenerator::CodeNetCFile() {
std::ofstream ofs(net_impl_file);
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
MS_LOG(INFO) << "write " << net_impl_file;
CodeInputImplement(ofs, config_->module_name(), ctx_);
CodeInitResourceImplement(ofs, config_->module_name(), ctx_);
CodeFreeResourceImplement(ofs, config_->module_name(), ctx_);
CodeFeaturesImplement(ofs, config_->module_name(), ctx_);
CodeInputImplement(ofs, ctx_);
CodeInitResourceImplement(ofs, ctx_);
CodeFreeResourceImplement(ofs, ctx_);
CodeFeaturesImplement(ofs, ctx_);
CodeNetRunFunc(ofs);
CodeGradientFunc(ofs);
CodeTrainImplement(ofs, config_->module_name(), ctx_);
CodeTrainImplement(ofs, ctx_);
ofs.close();
return RET_OK;
}


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc View File

@@ -102,7 +102,7 @@ int Conv2D3x3Int8Coder::InitTmpBuffer(CoderContext *const context) {

/*=============================tmp_out_============================*/
tmp_out_size_ = oc4 * C4NUM * output_batch * output_w * output_h * sizeof(uint8_t);
tmp_out_ = static_cast<uint8_t *>(allocator_->Malloc(kNumberTypeUInt8, tmp_out_size_, kWorkspace));
tmp_out_ = static_cast<int8_t *>(allocator_->Malloc(kNumberTypeInt8, tmp_out_size_, kWorkspace));

/*=============================input_data_============================*/
c8_input_size_ = in_batch * input_h * input_w * ic8 * C8NUM * sizeof(int16_t);


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h View File

@@ -51,7 +51,7 @@ class Conv2D3x3Int8Coder final : public Conv2DBaseCoder {
int16_t *block_unit_buffer_{nullptr};
int16_t *tile_buffer_{nullptr};
int32_t *tmp_dst_buffer_{nullptr};
uint8_t *tmp_out_{nullptr};
int8_t *tmp_out_{nullptr};
int16_t *c8_input_{nullptr};

size_t tile_buffer_size_{0};


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/matmul_base_int8_coder.cc View File

@@ -184,7 +184,7 @@ int MatMulBaseInt8Coder::DoCode(CoderContext *const context) {
init_code.CodeFunction("memset", weight_bias_sums_, 0, weight_bias_sums_size_);
init_code.CodeMallocExpression(pack_b_ptr_, b_pack_ptr_size_);
init_code.CodeFunction("memset", pack_b_ptr_, 0, b_pack_ptr_size_);
init_code.CodeArray("init_filter_zp", quant_.filter_zp_, weight_quant_num_);
init_code.CodeArray("init_filter_zp", quant_.filter_zp_, weight_quant_num_, false);
init_code.CodeFunction("InitInt8MatrixB", filter_tensor_, weight_bias_sums_, pack_b_ptr_, param_->batch,
param_->deep_, param_->col_, param_->col_align_, param_->deep_16_, quant_.input_.zp_,
"init_filter_zp", bias_ptr_, param_->b_transpose_, filter_per_channel_);


+ 18
- 18
mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc View File

@@ -45,20 +45,20 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const ConvParamete

std::string conv_quant_arg = name + "_conv_quant_arg";

CodeBaseStruct("ConvQuantArg", conv_quant_arg, quant_arg.round_mode_, quant_arg.quant_multiplier_mode_, quant_arg_in,
quant_arg_w, quant_arg_out, real_multiplier, left_shift, right_shift, quant_multiplier, out_act_min,
out_act_max, quant_arg.input_arg_num_, quant_arg.filter_arg_num_, quant_arg.output_arg_num_,
quant_arg.per_channel_);
CodeBaseStruct<false>("ConvQuantArg", conv_quant_arg, quant_arg.round_mode_, quant_arg.quant_multiplier_mode_,
quant_arg_in, quant_arg_w, quant_arg_out, real_multiplier, left_shift, right_shift,
quant_multiplier, out_act_min, out_act_max, quant_arg.input_arg_num_, quant_arg.filter_arg_num_,
quant_arg.output_arg_num_, quant_arg.per_channel_);
code << "int thread_num = MSMIN(" << gThreadNum << ", " << conv_parameter.output_h_ << ");\n";
CodeBaseStruct("ConvParameter", name, conv_parameter.op_parameter_, conv_quant_arg, conv_parameter.kernel_h_,
conv_parameter.kernel_w_, conv_parameter.stride_h_, conv_parameter.stride_w_,
conv_parameter.dilation_h_, conv_parameter.dilation_w_, conv_parameter.pad_u_, conv_parameter.pad_d_,
conv_parameter.pad_l_, conv_parameter.pad_r_, conv_parameter.group_, conv_parameter.tile_num_,
conv_parameter.input_batch_, conv_parameter.input_h_, conv_parameter.input_w_,
conv_parameter.input_channel_, conv_parameter.output_batch_, conv_parameter.output_h_,
conv_parameter.output_w_, conv_parameter.output_channel_, "thread_num", conv_parameter.input_unit_,
conv_parameter.output_unit_, conv_parameter.pad_mode_, conv_parameter.act_type_,
conv_parameter.channel_multiplie_, conv_parameter.output_padding_w_, conv_parameter.output_padding_h_);
CodeBaseStruct<false>(
"ConvParameter", name, conv_parameter.op_parameter_, conv_quant_arg, conv_parameter.kernel_h_,
conv_parameter.kernel_w_, conv_parameter.stride_h_, conv_parameter.stride_w_, conv_parameter.dilation_h_,
conv_parameter.dilation_w_, conv_parameter.pad_u_, conv_parameter.pad_d_, conv_parameter.pad_l_,
conv_parameter.pad_r_, conv_parameter.group_, conv_parameter.tile_num_, conv_parameter.input_batch_,
conv_parameter.input_h_, conv_parameter.input_w_, conv_parameter.input_channel_, conv_parameter.output_batch_,
conv_parameter.output_h_, conv_parameter.output_w_, conv_parameter.output_channel_, "thread_num",
conv_parameter.input_unit_, conv_parameter.output_unit_, conv_parameter.pad_mode_, conv_parameter.act_type_,
conv_parameter.channel_multiplie_, conv_parameter.output_padding_w_, conv_parameter.output_padding_h_);
}

void NNaclInt8Serializer::CodeStruct(const std::string &name, const MatMulParameter &matmul_parameter) {
@@ -201,11 +201,11 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const ReshapeQuant

void NNaclInt8Serializer::CodeStruct(const std::string &name, const MatmulQuantParameter &matmul_quant_arg,
int weight_quant_num) {
CodeArray("filter_scale", matmul_quant_arg.filter_scale_, weight_quant_num);
CodeArray("filter_zp", matmul_quant_arg.filter_zp_, weight_quant_num);
CodeArray("left_shift", matmul_quant_arg.left_shift_, weight_quant_num);
CodeArray("right_shift", matmul_quant_arg.right_shift_, weight_quant_num);
CodeArray("multiplier", matmul_quant_arg.quant_multiplier_, weight_quant_num);
CodeArray("filter_scale", matmul_quant_arg.filter_scale_, weight_quant_num, false);
CodeArray("filter_zp", matmul_quant_arg.filter_zp_, weight_quant_num, false);
CodeArray("left_shift", matmul_quant_arg.left_shift_, weight_quant_num, false);
CodeArray("right_shift", matmul_quant_arg.right_shift_, weight_quant_num, false);
CodeArray("multiplier", matmul_quant_arg.quant_multiplier_, weight_quant_num, false);
CodeBaseStruct("MatmulQuantParameter", name, matmul_quant_arg.input_, matmul_quant_arg.weight_,
matmul_quant_arg.output_, matmul_quant_arg.out_act_min_, matmul_quant_arg.out_act_max_, "filter_scale",
"filter_zp", "left_shift", "right_shift", "multiplier");


+ 2
- 6
mindspore/lite/micro/coder/operator_library/CMakeLists.txt View File

@@ -53,12 +53,8 @@ include(${MICRO_CMAKE_PATH}/package_wrapper.cmake)

list(APPEND OP_FILES ${NNACL_OPS} ${WRAPPER_SRC} ${RUNTIME_SRC})

if(PLATFORM_ARM64)
set(LIB_PATH "${OPERATOR_LIBRARY_PATH}/lib/arm64")
elseif(PLATFORM_ARM32)
set(LIB_PATH "${OPERATOR_LIBRARY_PATH}/lib/arm32a")
else()
set(LIB_PATH "${OPERATOR_LIBRARY_PATH}/lib/x86")
set(LIB_PATH "${OPERATOR_LIBRARY_PATH}/lib")
if(NOT PLATFORM_ARM64 AND NOT PLATFORM_ARM32)
list(APPEND OP_FILES ${CMSIS_OPS})
endif()



+ 8
- 9
mindspore/lite/micro/coder/utils/dir_utils.cc View File

@@ -32,7 +32,7 @@ constexpr _mode_t kMicroDirMode = 0777;
constexpr __mode_t kMicroDirMode = 0777;
#endif

static std::array<std::string, 3> kWorkDirs = {"src", "benchmark"};
static std::array<std::string, 2> kWorkDirs = {"src", "benchmark"};

bool DirExists(const std::string &dir_path) {
struct stat file_info;
@@ -76,18 +76,18 @@ static int MkMicroDir(const std::string &currentDir) {
return RET_OK;
}

int InitProjDirs(const std::string &pro_root_dir, const std::string &module_name) {
int InitProjDirs(const std::string &project_root_dir, const std::string &proj_name) {
#if defined(_WIN32) || defined(_WIN64)
std::ofstream pro_file;
std::string read_me_file = pro_root_dir + "\\readMe.txt";
std::string read_me_file = project_root_dir + "\\readMe.txt";
pro_file.open(read_me_file.c_str());
pro_file << "This is a directory for generating coding files. Do not edit !!!\n";
#else
std::ifstream pro_file;
pro_file.open(pro_root_dir.c_str());
pro_file.open(project_root_dir.c_str());
#endif
if (!pro_file.is_open()) {
MS_LOG(ERROR) << pro_root_dir << ": model's root dir not exists or have no access to open, please check it!!!";
MS_LOG(ERROR) << project_root_dir << ": model's root dir not exists or have no access to open, please check it!!!";
pro_file.close();
return RET_ERROR;
}
@@ -95,11 +95,10 @@ int InitProjDirs(const std::string &pro_root_dir, const std::string &module_name
// 1. coderDir 2.WorkRootDir 3. WorkChildDir
std::string current_dir;
std::string slashCh = std::string(kSlash);
if (pro_root_dir.back() == slashCh.back()) {
current_dir = pro_root_dir + module_name;
} else {
current_dir = pro_root_dir + slashCh + module_name;
if (project_root_dir.back() != slashCh.back()) {
current_dir = project_root_dir + slashCh;
}
current_dir += proj_name;
std::string work_dir = current_dir;
STATUS ret = MkMicroDir(current_dir);
if (ret == RET_ERROR) {


+ 1
- 1
mindspore/lite/micro/coder/utils/dir_utils.h View File

@@ -24,7 +24,7 @@ static const char kSlash[] = "\\";
static const char kSlash[] = "/";
#endif

int InitProjDirs(const std::string &project_root_dir, const std::string &module_name);
int InitProjDirs(const std::string &project_root_dir, const std::string &proj_name);

bool DirExists(const std::string &dir_path);



+ 57
- 0
mindspore/lite/micro/example/mnist/CMakeLists.txt View File

@@ -0,0 +1,57 @@


cmake_minimum_required(VERSION 3.14)
project(benchmark)

if(NOT DEFINED PKG_PATH)
message(FATAL_ERROR "PKG_PATH not set")
endif()

get_filename_component(PKG_PATH ${PKG_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})

set(HEADER_PATH ${PKG_PATH}/inference)

option(MICRO_BUILD_ARM64 "build android arm64" OFF)
option(MICRO_BUILD_ARM32A "build android arm32" OFF)

if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A)
add_compile_definitions(ENABLE_NEON)
add_compile_definitions(ENABLE_ARM)
endif()

if(MICRO_BUILD_ARM64)
add_compile_definitions(ENABLE_ARM64)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8.2-a+dotprod")
endif()

if(MICRO_BUILD_ARM32A)
add_compile_definitions(ENABLE_ARM32)
add_definitions(-mfloat-abi=softfp -mfpu=neon)
endif()

set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
message(STATUS "build benchmark with debug info")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
endif()

add_subdirectory(src)
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/)
include_directories(${HEADER_PATH})
set(SRC_FILES
benchmark/benchmark.cc
benchmark/load_input.c
)
add_executable(benchmark ${SRC_FILES})
target_link_libraries(benchmark net -lm -pthread)


+ 147
- 0
mindspore/lite/micro/example/mnist/benchmark/benchmark.cc View File

@@ -0,0 +1,147 @@


/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <iostream>
#include <string>
#include <cstring>

#include "include/lite_session.h"
#include "include/ms_tensor.h"
#include "include/errorcode.h"

#include "load_input.h"

using namespace mindspore;

void usage() {
printf(
"-- mindspore benchmark params usage:\n"
"args[0]: executable file\n"
"args[1]: inputs binary file\n"
"args[2]: model weight binary file\n"
"args[3]: loop count for performance test\n"
"args[4]: runtime thread num\n"
"args[5]: runtime thread bind mode\n\n");
}

template <typename T>
void PrintData(void *data, size_t data_number) {
if (data == nullptr) {
return;
}
auto casted_data = static_cast<T *>(data);
for (size_t i = 0; i < 10 && i < data_number; i++) {
std::cout << std::to_string(casted_data[i]) << ", ";
}
std::cout << std::endl;
}

void TensorToString(tensor::MSTensor *tensor) {
uint8_t i = 0;
std::cout << "uint8: " << i << std::endl;

std::cout << "Name: " << tensor->tensor_name();
std::cout << ", DataType: " << tensor->data_type();
std::cout << ", Size: " << tensor->Size();
std::cout << ", Shape:";
for (auto &dim : tensor->shape()) {
std::cout << " " << dim;
}
std::cout << ", Data:" << std::endl;
switch (tensor->data_type()) {
case kNumberTypeFloat32: {
PrintData<float>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeFloat16: {
PrintData<int16_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeInt32: {
PrintData<int32_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeInt16: {
PrintData<int16_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeInt8: {
PrintData<int8_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeUInt8: {
PrintData<uint8_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
default:
std::cout << "Unsupported data type to print" << std::endl;
break;
}
}

int main(int argc, const char **argv) {
if (argc < 2) {
std::cout << "input command is invalid\n" << std::endl;
usage();
return lite::RET_ERROR;
}
std::cout << "start run benchmark" << std::endl;

const char *model_buffer = nullptr;
int model_size = 0;
// read .net file by ReadBinaryFile;
if (argc >= 3) {
model_buffer = static_cast<const char *>(ReadInputData(argv[2], &model_size));
}
session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, nullptr);
if (session == nullptr) {
std::cerr << "create lite session failed" << std::endl;
return lite::RET_ERROR;
}

// set model inputs tensor data
std::vector<tensor::MSTensor *> inputs = session->GetInputs();
size_t inputs_num = inputs.size();
void *inputs_binbuf[inputs_num];
int inputs_size[inputs_num];
for (size_t i = 0; i < inputs_num; ++i) {
inputs_size[i] = inputs[i]->Size();
}
int ret = ReadInputsFile(const_cast<char *>(argv[1]), inputs_binbuf, inputs_size, inputs_num);
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
for (size_t i = 0; i < inputs_num; ++i) {
void *input_data = inputs[i]->MutableData();
memcpy(input_data, inputs_binbuf[i], inputs_size[i]);
}

ret = session->RunGraph();
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}

auto outputs = session->GetOutputs();
std::cout << "output size: " << outputs.size() << std::endl;
for (const auto &item : outputs) {
auto output = item.second;
TensorToString(output);
}

std::cout << "run benchmark success" << std::endl;
delete session;
for (size_t i = 0; i < inputs_num; ++i) {
free(inputs_binbuf[i]);
}
return lite::RET_OK;
}


+ 95
- 0
mindspore/lite/micro/example/mnist/benchmark/load_input.c View File

@@ -0,0 +1,95 @@

/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "load_input.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>

void *ReadInputData(const char *real_input_path, int *size) {
if (real_input_path == NULL) {
return NULL;
}
if (strstr(real_input_path, ".bin") || strstr(real_input_path, ".net")) {
FILE *file;
file = fopen(real_input_path, "rb+");
if (!file) {
printf("Can't find %s\n", real_input_path);
return NULL;
}
int curr_file_posi = ftell(file);
fseek(file, 0, SEEK_END);
*size = ftell(file);
unsigned char *buf = malloc((*size));
(void)memset(buf, 0, (*size));
fseek(file, curr_file_posi, SEEK_SET);
int read_size = (int)(fread(buf, 1, *size, file));
if (read_size != (*size)) {
printf("read file failed, total file size: %d, read_size: %d\n", (*size), read_size);
fclose(file);
free(buf);
return NULL;
}
fclose(file);
return (void *)buf;
} else {
printf("input data file should be .bin , .net");
return NULL;
}
}

void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size) {
FILE *output_file;
output_file = fopen(final_name, "w");
if (output_file == NULL) {
printf("fopen output file: %s failed\n", final_name);
return;
}
unsigned char str[out_size];
for (unsigned int i = 0; i < out_size; ++i) {
str[i] = output_data[i];
fprintf(output_file, "%d\t", str[i]);
}
fclose(output_file);
}

int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num) {
char *inputs_path[inputs_num];
char *delim = ",";
char *token;
int i = 0;
while ((token = strtok_r(path, delim, &path))) {
if (i >= inputs_num) {
printf("inputs num is error, need: %d\n", inputs_num);
return -1;
}
inputs_path[i] = token;
printf("input %d: %s\n", i, inputs_path[i]);
i++;
}

for (i = 0; i < inputs_num; ++i) {
int size = 0;
buffers[i] = ReadInputData(inputs_path[i], &size);
if (size != inputs_size[i] || buffers[i] == NULL) {
printf("size mismatch, %s, input: %d, needed: %d\n", inputs_path[i], size, inputs_size[i]);
return -1;
}
}
return 0;
}


+ 36
- 0
mindspore/lite/micro/example/mnist/benchmark/load_input.h View File

@@ -0,0 +1,36 @@

/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_
#define MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_

#ifdef __cplusplus
extern "C" {
#endif

void *ReadInputData(const char *real_input_path, int *size);

void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size);

int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num);

#ifdef __cplusplus
}
#endif

#endif // MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_


+ 26
- 98
mindspore/lite/micro/example/mnist/mnist.sh View File

@@ -15,111 +15,39 @@
# ============================================================================
set -e

CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
MINDSPORE_ROOT_DIR=${${CURRENT_DIR}%%/mindspore/lite/micro/example/mnist}
BASEPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
MINDSPORE_ROOT_DIR=${${BASEPATH}%%/mindspore/lite/micro/example/mnist}

OUTPUT_DIR=${1:-${MINDSPORE_ROOT_DIR}/output}
THREAD_NUM=${2:-32}
MODULE_NAME=mnist
OUTPUT_IR=Reshape-64.ir
CALIB_OUT=${CURRENT_DIR}/Reshape-64.out
echo "current dir is: ${BASEPATH}"

echo "current dir is: ${CURRENT_DIR}"
echo "packed output dir is :${OUTPUT_DIR}"
VERSION_HEADER=${MINDSPORE_ROOT_DIR}/mindspore/lite/include/version.h
INPUT_BIN=${BASEPATH}/mnist_input.bin

if [ ! -d "${OUTPUT_DIR}" ]; then
echo "folder ${OUTPUT_DIR} does not exist"
return 1
fi

# rm if already exist
WORKSPACE=${CURRENT_DIR}/build
rm -rf ${WORKSPACE}
mkdir ${WORKSPACE} || exit 1
PROJECT_DIR=${WORKSPACE}/${MODULE_NAME}

compare_output() {
local OUTPUT_FILE=$1
local CALIB_FILE=$2
if [[ ! -f "${OUTPUT_FILE}" || ! -f "${CALIB_FILE}" ]]; then
echo "file ${OUTPUT_FILE}, ${CALIB_FILE} does not exist, pwd $(pwd)"
exit 1
fi
lines=$(cat ${CALIB_FILE} | wc -l)
for ((i = 1; i <= $lines; i++)); do
line1=$(awk 'NR=="'${i}'"{print $0}' ${CALIB_FILE})
line2=$(awk 'NR=="'${i}'"{print $0}' ${OUTPUT_FILE})
if [[ "${line1}" != "${line2}" ]]; then
echo -e "file ${OUTPUT_FILE}, ${CALIB_FILE}, compare failed! line: ${i}"
exit 1
fi
done
echo -e "compare success, ${OUTPUT_FILE}, ${CALIB_FILE}"
get_version() {
VERSION_MAJOR=$(grep "const int ms_version_major =" ${VERSION_HEADER} | tr -dc "[0-9]")
VERSION_MINOR=$(grep "const int ms_version_minor =" ${VERSION_HEADER} | tr -dc "[0-9]")
VERSION_REVISION=$(grep "const int ms_version_revision =" ${VERSION_HEADER} | tr -dc "[0-9]")
VERSION_STR=${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_REVISION}
}
get_version
MINDSPORE_FILE_NAME="mindspore-lite-${VERSION_STR}-inference-linux-x64"
MINDSPORE_FILE="${MINDSPORE_FILE_NAME}.tar.gz"
MINDSPORE_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/release/linux/${MINDSPORE_FILE}"

# cp oplib and codegen
cp ${OUTPUT_DIR}/mindspore-lite-*-codegen-linux-x64.tar.gz ${WORKSPACE}/ || exit 1
cd ${WORKSPACE} || exit 1
tar -zxf mindspore-lite-*-codegen-linux-x64.tar.gz || exit 1
cd mindspore-lite-*-codegen-linux-x64 || exit 1
mv operator_library/ ${WORKSPACE}/ || exit 1
mv codegen ${WORKSPACE}/ || exit 1
cd -
rm -r mindspore-lite-*-codegen-linux-x64 || exit 1
rm mindspore-lite-*-codegen-linux-x64.tar.gz || exit 1

# convert model
cp ${OUTPUT_DIR}/mindspore-lite-*-converter-linux-x64.tar.gz ${WORKSPACE}/ || exit 1
cd ${WORKSPACE} || exit 1
tar -zxf mindspore-lite-*-converter-linux-x64.tar.gz || exit 1
rm mindspore-lite-*-converter-linux-x64.tar.gz || exit 1
cd mindspore-lite-*-converter-linux-x64 || exit 1
export LD_LIBRARY_PATH=./lib/:./third_party/protobuf/lib:./third_party/flatbuffers/lib:./third_party/glog/lib
converter/converter_lite --fmk=TFLITE \
--modelFile=${CURRENT_DIR}/mnist.tflite \
--outputFile=${WORKSPACE}/mnist
cd -
rm -rf mindspore-lite-*-converter-linux-x64 || exit 1
mkdir -p build

# generate code
${WORKSPACE}/codegen --modelPath=${WORKSPACE}/mnist.ms \
--moduleName=${MODULE_NAME} \
--isWeightFile=true \
--debugMode=true
rm codegen

if [ ! -d "${PROJECT_DIR}" ]; then
echo "folder ${PROJECT_DIR} does not exist"
return 1
if [ ! -e ${BASEPATH}/build/${MINDSPORE_FILE} ]; then
wget -c -O ${BASEPATH}/build/${MINDSPORE_FILE} --no-check-certificate ${MINDSPORE_LITE_DOWNLOAD_URL}
fi
cd ${PROJECT_DIR} || exit 1

# 1. build static lib.a
echo -e "building static library"
mkdir -p src/build && cd src/build || exit 1
OP_HEADER_PATH=${WORKSPACE}/operator_library/include
OP_LIB=${WORKSPACE}/operator_library/lib/x86/libops.a
echo "Head Path: ${OP_HEADER_PATH}"
echo "Lib Path: ${OP_LIB}"
cmake -DCMAKE_BUILD_TYPE=Debug \
-DOP_LIB=${OP_LIB} \
-DOP_HEADER_PATH=${OP_HEADER_PATH} ..
make -j${THREAD_NUM}

# 2. build benchmark
cd ${PROJECT_DIR}/benchmark && mkdir -p build && cd build || exit 1
cmake -DMODEL_LIB="${PROJECT_DIR}/src/build/libnet.a" ..
make -j${THREAD_NUM}
tar xzvf ${BASEPATH}/build/${MINDSPORE_FILE} -C ${BASEPATH}/build/ || exit 1
rm ${BASEPATH}/build/${MINDSPORE_FILE} || exit 1
PKG_PATH=${BASEPATH}/build/${MINDSPORE_FILE_NAME}
# build benchmark
mkdir -p ${BASEPATH}/build/benchmark && cd ${BASEPATH}/build/benchmark || exit 1
cmake -DPKG_PATH=${PKG_PATH} ${BASEPATH}
make

echo "net file: ${PROJECT_DIR}/src/${MODULE_NAME}.net"
echo "net file: ${BASEPATH}/src/mnist.bin"
# 3. run benchmark
./benchmark ${CURRENT_DIR}/input_1_224_224_3_uint8.bin ${PROJECT_DIR}/src/${MODULE_NAME}.net
compare_output ${OUTPUT_IR} ${CALIB_OUT}

RET=$?
if [[ "${RET}" -eq 0 ]]; then
echo -e "run benchmark success: ${MODULE_NAME}"
else
echo -e "run benchmark failed: ${MODULE_NAME}"
exit 1
fi
./benchmark ${INPUT_BIN} ${BASEPATH}/src/net.bin

BIN
mindspore/lite/micro/example/mnist/mnist_input.bin View File


+ 83
- 0
mindspore/lite/micro/example/mnist/src/CMakeLists.txt View File

@@ -0,0 +1,83 @@


cmake_minimum_required(VERSION 3.14)
project(net)

if(NOT DEFINED PKG_PATH)
message(FATAL_ERROR "PKG_PATH not set")
endif()

get_filename_component(PKG_PATH ${PKG_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})

set(OP_LIB ${PKG_PATH}/tools/codegen/operator_library/lib/libops.a)
set(OP_HEADER_PATH ${PKG_PATH}/tools/codegen/operator_library/include)
set(HEADER_PATH ${PKG_PATH}/inference)

message("operator lib path: ${OP_LIB}")
message("operator header path: ${OP_HEADER_PATH}")

include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
include_directories(${OP_HEADER_PATH})
include_directories(${HEADER_PATH})

include(net.cmake)

option(MICRO_BUILD_ARM64 "build android arm64" OFF)
option(MICRO_BUILD_ARM32A "build android arm32" OFF)

if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A)
add_compile_definitions(ENABLE_NEON)
add_compile_definitions(ENABLE_ARM)
endif()

if(MICRO_BUILD_ARM64)
add_compile_definitions(ENABLE_ARM64)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8.2-a+dotprod")
endif()

if(MICRO_BUILD_ARM32A)
add_compile_definitions(ENABLE_ARM32)
add_definitions(-mfloat-abi=softfp -mfpu=neon)
endif()

set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
endif()

function(create_library)
add_custom_command(TARGET net
POST_BUILD
COMMAND rm -rf tmp
COMMAND mkdir tmp
COMMAND cd tmp && ar -x ${OP_LIB}
COMMAND echo "raw static library ${library_name} size:"
COMMAND ls -lh ${library_name}
COMMAND mv ${library_name} ./tmp && cd tmp && ar -x ${library_name}
COMMENT "unzip raw static library ${library_name}"
)
foreach(object_file ${OP_SRC})
add_custom_command(TARGET net POST_BUILD COMMAND mv ./tmp/${object_file} .)
endforeach()
add_custom_command(TARGET net
POST_BUILD
COMMAND ar cr ${library_name} *.o
COMMAND ranlib ${library_name}
COMMAND echo "new static library ${library_name} size:"
COMMAND ls -lh ${library_name}
COMMAND rm -rf tmp && rm -rf *.o
COMMENT "generate specified static library ${library_name}"
)
endfunction(create_library)
string(CONCAT library_name "lib" net ".a")
create_library()


BIN
mindspore/lite/micro/example/mnist/src/net.bin View File


+ 170
- 0
mindspore/lite/micro/example/mnist/src/net.c View File

@@ -0,0 +1,170 @@

/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/


#include "weight.h"
#include "net.h"

static const unsigned char *g_Input0 = 0;
int SetInputs(const void **inputs, int num) {
if (inputs == NULL) {
return RET_ERROR;
}
if (num !=1) {
return RET_ERROR;
}
g_Input0 = inputs[0];
return RET_OK;
}
int CopyOutputsData(void **outputs, int num) {
if (outputs == NULL) {
return RET_ERROR;
}
if (num != 1) {
return RET_ERROR;
}
memcpy(outputs[0], g_Buffer+56, 40);
return RET_OK;
}

int GetBufferSize() {
return 40032;
}
int SetBuffer( void *buffer) {
if (buffer == NULL) {
return RET_ERROR;
}
g_Buffer = buffer;
return RET_OK;
}
void FreeResource() {
g_Buffer= NULL;
g_Input0 = NULL;
void *allocated[] = {g_Weight14, g_Weight15, g_Weight16, g_Weight17, g_Weight18, g_Weight19, };
for (int i = 0; i < 6; ++i) {
free(allocated[i]);
allocated[i] = NULL;
}
}
void Inference() {
const int g_thread_num = 1;
{
DoQuantizeFp32ToInt8((float *)(g_Input0), (int8_t *)(g_Buffer+0), 0.007874015718698501587, 0, 784, false);
}
{
memset((int16_t *)(g_Buffer+10928), 0, 2048);
memset((int16_t *)(g_Buffer+12976), 0, 256);
memset((int *)(g_Buffer+13232), 0, 6144);
memset((int8_t *)(g_Buffer+19376), 0, 8112);
memset((int16_t *)(g_Buffer+27488), 0, 12544);
static QuantArg conv_param__quant_arg_in[1] = {{0.007874015718698501587, 0}};
static QuantArg conv_param__quant_arg_w[12] = {{0.003238174133002758026, -6}, {0.003890725085511803627, -8}, {0.003394871251657605171, -7}, {0.001685356837697327137, -127}, {0.004322394262999296188, 1}, {0.002274985425174236298, -56}, {0.003617759561166167259, 17}, {0.004447745624929666519, 23}, {0.004683905746787786484, 26}, {0.004021023400127887726, 24}, {0.005650237202644348145, 11}, {0.001966834301128983498, -84}};
static QuantArg conv_param__quant_arg_out[1] = {{0.01778890006244182587, 0}};
static double conv_param__real_multiplier[12] = {0.001433333970799530351, 0.001722176774828924938, 0.00150269379968211614, 0.0007460003866156953226, 0.001913249346122961134, 0.001006991503636309139, 0.001601352314486244018, 0.001968734305210294733, 0.002073267527210802957, 0.00177985160945266568, 0.002501001060249878095, 0.0008705926067589928779};
static int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static int conv_param__right_shift[12] = {-9, -9, -9, -10, -9, -9, -9, -8, -8, -9, -8, -10};
static int conv_param__quant_multiplier[12] = {1575967367, 1893553389, 1652229306, 1640472199, 2103639903, 1107198867, 1760705490, 1082323130, 1139790877, 1956967540, 1374939873, 1914453388};
static int conv_param__out_act_min[1] = {0};
static int conv_param__out_act_max[1] = {127};
ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(1), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2};
int thread_num = MSMIN(g_thread_num, 26);
ConvParameter conv_param_ = {{ "", 35, g_thread_num}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 28, 28, 1, 1, 26, 26, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(1), 0, 0, 0};
PackInputToC8Int8((int8_t *)(g_Buffer+0), (int16_t *)(g_Buffer+27488), &conv_param_);
Conv3x3Int8((int16_t *)(g_Buffer+27488), g_Weight10, g_Weight11, (int8_t *)(g_Buffer+784), (int16_t *)(g_Buffer+10928), (int16_t *)(g_Buffer+12976), (int *)(g_Buffer+13232), (int8_t *)(g_Buffer+19376), 0, &conv_param_);
PackNC4HW4ToNHWCInt8((int8_t *)(g_Buffer+19376), (int8_t *)(g_Buffer+784), 1, 676, 12);
}
{
static QuantArg pooling_parameter_quant_in = {0.01778890006244182587, 0};
static QuantArg pooling_parameter_quant_out = {0.01778890006244182587, 0};
static QuantArg *pooling_parameter_quant[2] = { &pooling_parameter_quant_in, &pooling_parameter_quant_out};
const PoolingParameter pooling_parameter = {{ "", 92, g_thread_num}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 26, 26, 1, 12, 13, 13, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false};
MaxPoolingInt8((int8_t *)(g_Buffer+784), (int8_t *)(g_Buffer+8896), (PoolingParameter *)&pooling_parameter, 0);
}
{
memset((int16_t *)(g_Buffer+10928), 0, 4096);
memset((int16_t *)(g_Buffer+15024), 0, 256);
memset((int *)(g_Buffer+15280), 0, 6144);
memset((int8_t *)(g_Buffer+21424), 0, 1452);
memset((int16_t *)(g_Buffer+22876), 0, 5408);
static QuantArg conv_param__quant_arg_in[1] = {{0.01778890006244182587, 0}};
static QuantArg conv_param__quant_arg_w[12] = {{0.005374609492719173431, 33}, {0.005837683100253343582, 22}, {0.004709810949862003326, -15}, {0.003726204857230186462, 27}, {0.00318551529198884964, -8}, {0.003453079145401716232, 50}, {0.004045850131660699844, -9}, {0.003903790842741727829, 30}, {0.004003710579127073288, -10}, {0.00560879148542881012, 27}, {0.005486610345542430878, -23}, {0.003554018214344978333, 4}};
static QuantArg conv_param__quant_arg_out[1] = {{0.07183934003114700317, 0}};
static double conv_param__real_multiplier[12] = {0.001330863973520378732, 0.001445530533608141606, 0.001166246148374064893, 0.0009226850783705293785, 0.0007887991893445710223, 0.0008550534992628172192, 0.001001835847923064193, 0.0009666590447744700769, 0.0009914011740411567478, 0.001388852288199173826, 0.00135859773990280961, 0.0008800481219728497088};
static int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static int conv_param__right_shift[12] = {-9, -9, -9, -10, -10, -10, -9, -10, -9, -9, -9, -10};
static int conv_param__quant_multiplier[12] = {1463300414, 1589377630, 1282301201, 2029005945, 1734587761, 1880282530, 1101530164, 2125705720, 1090057119, 1527059240, 1493794012, 1935246286};
static int conv_param__out_act_min[1] = {0};
static int conv_param__out_act_max[1] = {127};
ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(1), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2};
int thread_num = MSMIN(g_thread_num, 11);
ConvParameter conv_param_ = {{ "", 35, g_thread_num}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 13, 13, 12, 1, 11, 11, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(1), 0, 0, 0};
PackInputToC8Int8((int8_t *)(g_Buffer+8896), (int16_t *)(g_Buffer+22876), &conv_param_);
Conv3x3Int8((int16_t *)(g_Buffer+22876), g_Weight12, g_Weight13, (int8_t *)(g_Buffer+0), (int16_t *)(g_Buffer+10928), (int16_t *)(g_Buffer+15024), (int *)(g_Buffer+15280), (int8_t *)(g_Buffer+21424), 0, &conv_param_);
PackNC4HW4ToNHWCInt8((int8_t *)(g_Buffer+21424), (int8_t *)(g_Buffer+0), 1, 121, 12);
}
{
static QuantArg pooling_parameter_quant_in = {0.07136065512895584106, 0};
static QuantArg pooling_parameter_quant_out = {0.07136065512895584106, 0};
static QuantArg *pooling_parameter_quant[2] = { &pooling_parameter_quant_in, &pooling_parameter_quant_out};
const PoolingParameter pooling_parameter = {{ "", 92, g_thread_num}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 11, 11, 1, 12, 5, 5, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false};
MaxPoolingInt8((int8_t *)(g_Buffer+0), (int8_t *)(g_Buffer+1456), (PoolingParameter *)&pooling_parameter, 0);
}
{
const ReshapeQuantArg reshape_quant_arg = {{0.07136065512895584106, 0}, {0.07136065512895584106, 0}, -128, 127};
Int8Reshape((int8_t *)(g_Buffer+1456), (int8_t *)(g_Buffer+0), 300, reshape_quant_arg);
}
{
int32_t tmp_weight_zp = 1;
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer+0)+0, (int8_t *)(g_Buffer+10928), 1, 300);
CalcInputSums((int8_t *)(g_Buffer+0)+0, 1, 300, tmp_weight_zp, (int *)(g_Buffer+12144), RowMajor);
static float filter_scale[20] = {0.003479549195617437363, 0.004490676335990428925, 0.004529818892478942871, 0.002983231563121080399, 0.003455155529081821442, 0.003223794745281338692, 0.003272445406764745712, 0.003801185870543122292, 0.003679843153804540634, 0.003040234791114926338, 0.003704284550622105598, 0.003355232765898108482, 0.002904496388509869576, 0.003024494973942637444, 0.002794801956042647362, 0.004355110693722963333, 0.003499472280964255333, 0.004184196703135967255, 0.003057289868593215942, 0.003264668164774775505};
static int filter_zp[20] = {1, 12, 3, 2, -10, -5, -11, 5, 12, 22, 16, 1, -5, 15, 13, 5, -10, -5, -6, 0};
static int left_shift[20] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static int right_shift[20] = {-10, -9, -9, -10, -10, -10, -10, -9, -9, -10, -9, -10, -10, -10, -10, -9, -10, -9, -10, -10};
static int multiplier[20] = {2108215049, 1360422072, 1372280070, 1807502393, 2093435146, 1953256619, 1982733521, 1151545365, 1114785262, 1842040025, 1122189669, 2032893316, 1759797843, 1832503464, 1693335354, 1319353429, 2120286176, 1267576078, 1852373503, 1978021333};
const MatmulQuantParameter matmul_quant_parameter = {{0.07136065512895584106, 0}, {0, 0}, {0.258998185396194458, 0}, -128, 127, filter_scale, filter_zp, left_shift, right_shift, multiplier};
int32_t *cur_left = matmul_quant_parameter.left_shift_ + 0;
int32_t *cur_right = matmul_quant_parameter.right_shift_ + 0;
int32_t *cur_mul = matmul_quant_parameter.quant_multiplier_ + 0;
int32_t *cur_zp = matmul_quant_parameter.filter_zp_ + 0;
MatmulInt8Opt((int8_t *)(g_Buffer+10928), g_Weight15+0 + 0, (int8_t *)(g_Buffer+304)+0+0, 1, 20, 304, (int *)(g_Buffer+12144), g_Weight16+0, -128, 127, 0, cur_mul, cur_left, cur_right, 20, true, cur_zp);
}
{
int32_t tmp_weight_zp = 1;
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer+304)+0, (int8_t *)(g_Buffer+10928), 1, 20);
CalcInputSums((int8_t *)(g_Buffer+304)+0, 1, 20, tmp_weight_zp, (int *)(g_Buffer+11056), RowMajor);
static float filter_scale[10] = {0.004678330849856138229, 0.005127115640789270401, 0.00471437256783246994, 0.004531511571258306503, 0.005476122256368398666, 0.004348111804574728012, 0.004803542047739028931, 0.006081215571612119675, 0.004532597027719020844, 0.004762654658406972885};
static int filter_zp[10] = {7, -2, 9, 2, -6, 21, 16, 10, -19, 8};
static int left_shift[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static int right_shift[10] = {-8, -8, -8, -8, -8, -8, -8, -8, -8, -8};
static int multiplier[10] = {1242805482, 1362025788, 1252380041, 1203802750, 1454739904, 1155082292, 1276068015, 1615483838, 1204091115, 1265206260};
const MatmulQuantParameter matmul_quant_parameter = {{0.258998185396194458, 0}, {0, 0}, {0.5359870791435241699, 0}, -128, 127, filter_scale, filter_zp, left_shift, right_shift, multiplier};
int32_t *cur_left = matmul_quant_parameter.left_shift_ + 0;
int32_t *cur_right = matmul_quant_parameter.right_shift_ + 0;
int32_t *cur_mul = matmul_quant_parameter.quant_multiplier_ + 0;
int32_t *cur_zp = matmul_quant_parameter.filter_zp_ + 0;
MatmulInt8Opt((int8_t *)(g_Buffer+10928), g_Weight18+0 + 0, (int8_t *)(g_Buffer+0)+0+0, 1, 10, 32, (int *)(g_Buffer+11056), g_Weight19+0, -128, 127, 0, cur_mul, cur_left, cur_right, 10, true, cur_zp);
}
{
DoDequantizeInt8ToFp32((int8_t *)(g_Buffer+0), (float *)(g_Buffer+16), 0.5359870791435241699, 0, 10);
}
{
const SoftmaxParameter softmax_parameter = {{ "", 138, g_thread_num}, 1, {1, 10}, 10, 2};
memset((float *)(g_Buffer+10928), 0, 4);
Softmax((float *)(g_Buffer+16), (float *)(g_Buffer+56), (float *)(g_Buffer+10928), &softmax_parameter);
}
}

+ 25
- 0
mindspore/lite/micro/example/mnist/src/net.cmake View File

@@ -0,0 +1,25 @@
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)
set(OP_SRC
common_func.c.o
common_func_int8.c.o
conv3x3_int8.c.o
conv_int8.c.o
exp_fp32.c.o
fixed_point.c.o
matmul_int8.c.o
matmul_int8_wrapper.c.o
pack_int8.c.o
pooling_int8.c.o
quant_dtype_cast_int8.c.o
reshape_int8.c.o
softmax_fp32.c.o
weight.c.o
net.c.o
session.cc.o
tensor.cc.o
)
file(GLOB NET_SRC
${CMAKE_CURRENT_SOURCE_DIR}/*.cc
${CMAKE_CURRENT_SOURCE_DIR}/*.c
)
add_library(net STATIC ${NET_SRC})

+ 56
- 0
mindspore/lite/micro/example/mnist/src/net.h View File

@@ -0,0 +1,56 @@

/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifdef __cplusplus
extern "C" {
#endif
/**
* set input tensors
* @param inputs, the input data ptr's array of the model, the tensors' count of input may be greater than one.
* @param num, the input data's number of the model.
**/
int SetInputs(const void **inputs, int num);

int CopyOutputsData(void **outputs, int num);

/**
* @param weight_buffer, the address of the weight binary file
* @param weight_size, the size of the model file in bytes
**/
int Init(void *weight_buffer, int weight_size);

/**
* get the memory space size of the inference.
**/
int GetBufferSize();
/**
* set the memory space for the inference
**/
int SetBuffer(void *buffer);

/**
* free the memory of packed weights, and set the membuf buffer and input address to NULL
**/
void FreeResource();
/**
* net inference function
**/
void Inference();

#ifdef __cplusplus
}
#endif

+ 157
- 0
mindspore/lite/micro/example/mnist/src/session.cc View File

@@ -0,0 +1,157 @@

/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "session.h"
#include "net.h"

namespace mindspore {
namespace lite {
int LiteSession::CompileGraph(lite::Model *model) {
inputs_.resize(1);
inputs_[0] = new (std::nothrow) MTensor("graph_input-0", kNumberTypeFloat32, {1, 28, 28, 1, });
MS_ERROR_IF_NULL(inputs_[0]);
outputs_.resize(1);
outputs_[0] = new (std::nothrow) MTensor("Softmax-7", kNumberTypeFloat32, {1, 10, });
MS_ERROR_IF_NULL(outputs_[0]);
for (const auto &output: outputs_) {
output_tensor_map_[output->tensor_name()] = output;
}
return RET_OK;
}


int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &after) {
const void *inputs_data[inputs_.size()];
for (size_t i = 0; i < inputs_.size(); ++i) {
inputs_data[i] = inputs_[i]->MutableData();
}
SetInputs(inputs_data, inputs_.size());

Inference();

void *outputs_data[outputs_.size()];
for (size_t i = 0; i < outputs_.size(); ++i) {
outputs_data[i] = outputs_[i]->MutableData();
}
CopyOutputsData(outputs_data, outputs_.size());

return RET_OK;
}

LiteSession::~LiteSession() {
FreeResource();
if (runtime_buffer_ != nullptr) {
free(runtime_buffer_);
runtime_buffer_ = nullptr;
}
for (auto &input : inputs_) {
if (input == nullptr) {
continue;
}
delete input;
input = nullptr;
}
for (auto &item : output_tensor_map_) {
auto output = item.second;
if (output == nullptr) {
continue;
}
delete output;
output = nullptr;
}
}

int LiteSession::InitRuntimeBuffer() {
int buffer_size = GetBufferSize();
runtime_buffer_ = malloc(buffer_size);
if (runtime_buffer_ == nullptr) {
return RET_ERROR;
}
int ret = SetBuffer(runtime_buffer_);
if (ret != RET_OK) {
return RET_ERROR;
}
return RET_OK;
}

std::vector<tensor::MSTensor *> LiteSession::GetInputs() const {
std::vector<tensor::MSTensor *> inputs;
inputs.insert(inputs.begin(), inputs_.begin(), inputs_.end());
return inputs;
}

std::vector<tensor::MSTensor *> LiteSession::GetOutputsByNodeName(const std::string &node_name) const {
auto iter = output_node_map_.find(node_name);
if (iter == output_node_map_.end()) {
std::vector<tensor::MSTensor *> empty;
return empty;
}
return iter->second;
}

std::unordered_map<std::string, mindspore::tensor::MSTensor *> LiteSession::GetOutputs() const {
return output_tensor_map_;
}

std::vector<std::string> LiteSession::GetOutputTensorNames() const {
std::vector<std::string> output_names;
for (const auto &item : output_node_map_) {
for (const auto &output : item.second) {
output_names.emplace_back(output->tensor_name());
}
}
return output_names;
}

mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const std::string &tensor_name) const {
auto item = output_tensor_map_.find(tensor_name);
if (item == output_tensor_map_.end()) {
return nullptr;
}
return item->second;
}

int LiteSession::Resize(const std::vector<tensor::MSTensor *> &inputs, const std::vector<std::vector<int>> &dims) {
return RET_OK;
}

} // namespace lite

session::LiteSession *session::LiteSession::CreateSession(const lite::Context *context) {
auto *session = new (std::nothrow) lite::LiteSession();
if (session == nullptr) {
return nullptr;
}
session->InitRuntimeBuffer();
return session;
}

session::LiteSession *session::LiteSession::CreateSession(const char *net_buf, size_t size,
const lite::Context *context) {
session::LiteSession *session = CreateSession(context);
if (session == nullptr) {
return nullptr;
}
int ret = session->CompileGraph(nullptr);
if (ret != lite::RET_OK) {
return nullptr;
}
Init(const_cast<char *>(net_buf), size);
return session;
}
} // namespace mindspore


+ 78
- 0
mindspore/lite/micro/example/mnist/src/session.h View File

@@ -0,0 +1,78 @@

/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_

#include "include/errorcode.h"
#include "include/lite_session.h"

#include "tensor.h"

namespace mindspore {
namespace lite {

#define MS_ERROR_IF_NULL(ptr) \
do { \
if ((ptr) == nullptr) { \
return mindspore::lite::RET_ERROR; \
} \
} while (0)

class LiteSession : public session::LiteSession {
public:
LiteSession() = default;

~LiteSession() override;

void BindThread(bool if_bind) override {}

int CompileGraph(lite::Model *model) override;

std::vector<tensor::MSTensor *> GetInputs() const override;

mindspore::tensor::MSTensor *GetInputsByTensorName(const std::string &tensor_name) const override { return nullptr; }

int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override;

std::vector<tensor::MSTensor *> GetOutputsByNodeName(const std::string &node_name) const override;

std::unordered_map<std::string, mindspore::tensor::MSTensor *> GetOutputs() const override;

std::vector<std::string> GetOutputTensorNames() const override;

mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const override;

int Resize(const std::vector<tensor::MSTensor *> &inputs, const std::vector<std::vector<int>> &dims) override;

int InitRuntimeBuffer();

private:
int SetInputsData(const std::vector<MTensor *> &inputs) const;
std::vector<MTensor *> inputs_;
std::vector<MTensor *> outputs_;
std::unordered_map<std::string, mindspore::tensor::MSTensor *> output_tensor_map_;
std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> output_node_map_;

void *runtime_buffer_;
};

} // namespace lite
} // namespace mindspore

#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_


+ 93
- 0
mindspore/lite/micro/example/mnist/src/tensor.cc View File

@@ -0,0 +1,93 @@


/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "tensor.h"

namespace mindspore {
namespace lite {
size_t DataTypeSize(const TypeId type) {
switch (type) {
case kNumberTypeFloat64:
return sizeof(double);
case kNumberTypeFloat:
case kNumberTypeFloat32:
return sizeof(float);
case kNumberTypeInt8:
return sizeof(int8_t);
case kNumberTypeUInt8:
return sizeof(uint8_t);
case kNumberTypeFloat16:
case kNumberTypeInt16:
return sizeof(int16_t);
case kNumberTypeInt32:
return sizeof(int32_t);
case kNumberTypeInt64:
return sizeof(int64_t);
case kNumberTypeUInt16:
return sizeof(uint16_t);
case kNumberTypeUInt32:
return sizeof(uint32_t);
case kNumberTypeUInt64:
return sizeof(uint64_t);
case kNumberTypeBool:
return sizeof(bool);
case kObjectTypeString:
return sizeof(char);
case kObjectTypeTensorType:
default:
return 0;
}
}

MTensor::~MTensor() {
if (data_ != nullptr) {
free(data_);
data_ = nullptr;
}
}

int MTensor::DimensionSize(const size_t index) const {
int dim_size = -1;
if (index < shape_.size()) {
dim_size = shape_[index];
}
return dim_size;
}

int MTensor::ElementsNum() const {
int elements = 1;
for (int i : shape_) {
elements *= i;
}
return elements;
}

size_t MTensor::Size() const {
size_t element_size = DataTypeSize(data_type_);
return element_size * ElementsNum();
}

void *MTensor::MutableData() {
if (data_ == nullptr) {
data_ = malloc(this->Size());
}
return data_;
}
} // namespace lite
} // namespace mindspore


+ 71
- 0
mindspore/lite/micro/example/mnist/src/tensor.h View File

@@ -0,0 +1,71 @@


/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_

#include "include/ms_tensor.h"
#include <utility>
#include <vector>

namespace mindspore {
namespace lite {
struct QuantArg {
double scale;
int32_t zeroPoint;
float var_corr{1};
float mean_corr{0};
bool inited;
std::vector<float> clusters{};
int bitNum;
int roundType;
int multiplier;
int dstDtype;
};

class MTensor : public mindspore::tensor::MSTensor {
public:
MTensor() = default;
MTensor(std::string name, enum TypeId type, std::vector<int32_t> shape)
: tensor_name_(std::move(name)), data_type_(type), shape_(std::move(shape)) {}
~MTensor() override;

TypeId data_type() const override { return data_type_; }
std::vector<int> shape() const override { return shape_; }
int DimensionSize(size_t index) const override;
int ElementsNum() const override;
size_t Size() const override;
void *MutableData() override;
std::string tensor_name() const override { return tensor_name_; }
void set_tensor_name(const std::string name) override { tensor_name_ = name; }
void set_data(void *data) override { data_ = data; }

private:
std::string tensor_name_;
TypeId data_type_;
std::vector<int> shape_;
void *data_ = nullptr;
std::vector<QuantArg> quant_params_;
};

} // namespace lite
} // namespace mindspore

#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_



+ 102
- 0
mindspore/lite/micro/example/mnist/src/weight.c View File

@@ -0,0 +1,102 @@

/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "weight.h"

unsigned char * g_Buffer = 0 ;
int16_t g_Weight10[1536];
int32_t g_Weight11[12];
int16_t g_Weight12[3072];
int32_t g_Weight13[12];
int32_t *g_Weight14 = NULL;
int8_t *g_Weight15 = NULL;
int32_t *g_Weight16 = NULL;
int32_t *g_Weight17 = NULL;
int8_t *g_Weight18 = NULL;
int32_t *g_Weight19 = NULL;

int Init(void *weight_buffer, int weight_size) {
if (weight_buffer == NULL) {
return RET_ERROR;
}

struct ModelParameter {
void *addr;
size_t size;
size_t offset;
};
int8_t *g_Weight6 = (weight_buffer + 9312);
int32_t *g_Weight7 = (weight_buffer + 15312);
int8_t *g_Weight8 = (weight_buffer + 15392);
int32_t *g_Weight9 = (weight_buffer + 15592);

struct ModelParameter model_params[] = {
{g_Weight10, 3072, 0},
{g_Weight11, 48, 3072},
{g_Weight12, 6144, 3120},
{g_Weight13, 48, 9264},
};

for(int i = 0; i < 4; ++i) {
if (model_params[i].offset + model_params[i].size > weight_size) {
return RET_ERROR;
}
memcpy(model_params[i].addr, (weight_buffer + model_params[i].offset), model_params[i].size);
}
{
g_Weight14 = malloc(80);
if (g_Weight14 == NULL) {
return RET_ERROR;
}
memset(g_Weight14, 0, 80);
memcpy(g_Weight14, g_Weight7, 80);
g_Weight16 = malloc(80);
if (g_Weight16 == NULL) {
return RET_ERROR;
}
memset(g_Weight16, 0, 80);
g_Weight15 = malloc(6080);
if (g_Weight15 == NULL) {
return RET_ERROR;
}
memset(g_Weight15, 0, 6080);
static int init_filter_zp[20] = {1, 12, 3, 2, -10, -5, -11, 5, 12, 22, 16, 1, -5, 15, 13, 5, -10, -5, -6, 0};
InitInt8MatrixB(g_Weight6, g_Weight16, g_Weight15, 1, 300, 20, 20, 304, 0, init_filter_zp, g_Weight14, true, true);
}
{
g_Weight17 = malloc(48);
if (g_Weight17 == NULL) {
return RET_ERROR;
}
memset(g_Weight17, 0, 48);
memcpy(g_Weight17, g_Weight9, 48);
g_Weight19 = malloc(48);
if (g_Weight19 == NULL) {
return RET_ERROR;
}
memset(g_Weight19, 0, 48);
g_Weight18 = malloc(384);
if (g_Weight18 == NULL) {
return RET_ERROR;
}
memset(g_Weight18, 0, 384);
static int init_filter_zp[10] = {7, -2, 9, 2, -6, 21, 16, 10, -19, 8};
InitInt8MatrixB(g_Weight8, g_Weight19, g_Weight18, 1, 20, 10, 12, 32, 0, init_filter_zp, g_Weight17, true, true);
}
return RET_OK;
}


+ 46
- 0
mindspore/lite/micro/example/mnist/src/weight.h View File

@@ -0,0 +1,46 @@

/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "nnacl/common_func.h"
#include "nnacl/errorcode.h"
#include "nnacl/fp32/softmax_fp32.h"
#include "nnacl/int8/common_func_int8.h"
#include "nnacl/int8/conv3x3_int8.h"
#include "nnacl/int8/conv_int8.h"
#include "nnacl/int8/matmul_int8.h"
#include "nnacl/int8/pooling_int8.h"
#include "nnacl/int8/quant_dtype_cast_int8.h"
#include "nnacl/int8/reshape_int8.h"
#include "wrapper/int8/matmul_int8_wrapper.h"
#include <stdlib.h>
#include <string.h>
extern unsigned char *g_Buffer;
enum STATUS {
RET_OK = 0,
RET_ERROR = 1,
};

extern int16_t g_Weight10[];
extern int32_t g_Weight11[];
extern int16_t g_Weight12[];
extern int32_t g_Weight13[];
extern int32_t *g_Weight14;
extern int8_t *g_Weight15;
extern int32_t *g_Weight16;
extern int32_t *g_Weight17;
extern int8_t *g_Weight18;
extern int32_t *g_Weight19;

+ 7
- 4
scripts/check_clang_format.sh View File

@@ -33,7 +33,7 @@ echo "SCRIPTS_PATH=$SCRIPTS_PATH"
# print usage message
function usage()
{
echo "Check whether the specified source files were well formated"
echo "Check whether the specified source files were well formatted"
echo "Usage:"
echo "bash $0 [-a] [-c] [-l] [-h]"
echo "e.g. $0 -a"
@@ -97,8 +97,11 @@ fi
CHECK_RESULT_FILE=__code_format_check_result__
echo "0" > "$CHECK_RESULT_FILE"

# check format of files modified in the lastest commit
# check format of files modified in the latest commit
while read line; do
if [ ! -e ${line} ]; then
continue
fi
BASE_NAME=$(basename "${line}")
TEMP_FILE="__TEMP__${BASE_NAME}"
cp "${line}" "${TEMP_FILE}"
@@ -107,7 +110,7 @@ while read line; do
ret=$?
rm "${TEMP_FILE}"
if [[ "${ret}" -ne 0 ]]; then
echo "File ${line} is not formated, please format it."
echo "File ${line} is not formatted, please format it."
echo "1" > "${CHECK_RESULT_FILE}"
break
fi
@@ -118,6 +121,6 @@ rm "${CHECK_RESULT_FILE}"
rm "${CHECK_LIST_FILE}"
cd "${CURRENT_PATH}" || exit 1
if [[ "X${result}" == "X0" ]]; then
echo "Check PASS: specified files are well formated!"
echo "Check PASS: specified files are well formatted!"
fi
exit "${result}"

Loading…
Cancel
Save