From 26e20de83277ec629841a511d95ca15e40d746a1 Mon Sep 17 00:00:00 2001 From: z00512249 Date: Thu, 25 Mar 2021 10:14:47 +0800 Subject: [PATCH] up_date mnist_x86 example codes --- .../micro/example/mnist_x86/CMakeLists.txt | 12 +- .../example/mnist_x86/benchmark/benchmark.cc | 91 ++++-- .../lite/micro/example/mnist_x86/mnist.sh | 2 +- .../example/mnist_x86/src/CMakeLists.txt | 9 +- .../lite/micro/example/mnist_x86/src/model.h | 65 ++++ .../lite/micro/example/mnist_x86/src/net.c | 57 ++-- .../micro/example/mnist_x86/src/net.cmake | 1 + .../micro/example/mnist_x86/src/session.cc | 95 +++--- .../micro/example/mnist_x86/src/session.h | 37 ++- .../micro/example/mnist_x86/src/string.cc | 306 ++++++++++++++++++ .../micro/example/mnist_x86/src/tensor.cc | 10 - .../lite/micro/example/mnist_x86/src/tensor.h | 25 +- .../lite/micro/example/mnist_x86/src/weight.c | 8 +- .../lite/micro/example/mnist_x86/src/weight.h | 1 + 14 files changed, 561 insertions(+), 158 deletions(-) create mode 100644 mindspore/lite/micro/example/mnist_x86/src/model.h create mode 100644 mindspore/lite/micro/example/mnist_x86/src/string.cc diff --git a/mindspore/lite/micro/example/mnist_x86/CMakeLists.txt b/mindspore/lite/micro/example/mnist_x86/CMakeLists.txt index f6356a13db..fb0470ed29 100644 --- a/mindspore/lite/micro/example/mnist_x86/CMakeLists.txt +++ b/mindspore/lite/micro/example/mnist_x86/CMakeLists.txt @@ -1,5 +1,4 @@ - cmake_minimum_required(VERSION 3.14) project(benchmark) @@ -14,6 +13,8 @@ set(HEADER_PATH ${PKG_PATH}/inference) option(MICRO_BUILD_ARM64 "build android arm64" OFF) option(MICRO_BUILD_ARM32A "build android arm32" OFF) +add_compile_definitions(NOT_USE_STL) + if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A) add_compile_definitions(ENABLE_NEON) add_compile_definitions(ENABLE_ARM) @@ -38,15 +39,17 @@ if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default") else() - set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + message(STATUS "build benchmark release version") + set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \ -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") - set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \ -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") + string(REPLACE "-g" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") + string(REPLACE "-g" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") endif() add_subdirectory(src) include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/) include_directories(${HEADER_PATH}) set(SRC_FILES benchmark/benchmark.cc @@ -54,4 +57,3 @@ set(SRC_FILES ) add_executable(benchmark ${SRC_FILES}) target_link_libraries(benchmark net -lm -pthread) - diff --git a/mindspore/lite/micro/example/mnist_x86/benchmark/benchmark.cc b/mindspore/lite/micro/example/mnist_x86/benchmark/benchmark.cc index 6abbcd8313..7c9cb3f6ec 100644 --- a/mindspore/lite/micro/example/mnist_x86/benchmark/benchmark.cc +++ b/mindspore/lite/micro/example/mnist_x86/benchmark/benchmark.cc @@ -1,5 +1,4 @@ - /** * Copyright 2021 Huawei Technologies Co., Ltd * @@ -39,6 +38,17 @@ void usage() { "args[5]: runtime thread bind mode\n\n"); } +uint64_t GetTimeUs() { + const int USEC = 1000000; + const int MSEC = 1000; + struct timespec ts = {0, 0}; + if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) { + return 0; + } + uint64_t retval = (uint64_t)((ts.tv_sec * USEC) + (ts.tv_nsec / MSEC)); + return retval; +} + template void PrintData(void *data, size_t data_number) { if (data == nullptr) { @@ -46,23 +56,20 @@ void PrintData(void *data, size_t data_number) { } auto casted_data = static_cast(data); for (size_t i = 0; i < 10 && i < data_number; i++) { - std::cout << std::to_string(casted_data[i]) << ", "; + printf("%s, ", std::to_string(casted_data[i]).c_str()); } - std::cout << std::endl; + printf("\n"); } void TensorToString(tensor::MSTensor *tensor) { - uint8_t i = 0; - std::cout << "uint8: " << i << std::endl; - - std::cout << "Name: " << tensor->tensor_name(); - std::cout << ", DataType: " << tensor->data_type(); - std::cout << ", Size: " << tensor->Size(); - std::cout << ", Shape:"; + printf("name: %s, ", tensor->tensor_name().c_str()); + printf("DataType: %d, ", tensor->data_type()); + printf("Elements: %d, ", tensor->ElementsNum()); + printf("Shape: ["); for (auto &dim : tensor->shape()) { - std::cout << " " << dim; + printf("%d ", dim); } - std::cout << ", Data:" << std::endl; + printf("], Data: \n"); switch (tensor->data_type()) { case kNumberTypeFloat32: { PrintData(tensor->MutableData(), tensor->ElementsNum()); @@ -90,26 +97,42 @@ void TensorToString(tensor::MSTensor *tensor) { int main(int argc, const char **argv) { if (argc < 2) { - std::cout << "input command is invalid\n" << std::endl; + printf("input command is invalid\n"); usage(); return lite::RET_ERROR; } - std::cout << "start run benchmark" << std::endl; + printf("=======run benchmark======\n"); const char *model_buffer = nullptr; int model_size = 0; - // read .net file by ReadBinaryFile; + // read .bin file by ReadBinaryFile; if (argc >= 3) { model_buffer = static_cast(ReadInputData(argv[2], &model_size)); } - session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, nullptr); + + lite::Context *context = nullptr; + if (argc >= 5) { + // config benchmark context + context = new (std::nothrow) lite::Context(); + if (context == nullptr) { + return lite::RET_ERROR; + } + context->thread_num_ = atoi(argv[4]); + context->device_list_.resize(1); + context->device_list_[0] = {lite::DT_CPU, {{false, static_cast(atoi(argv[5]))}}}; + printf("context: ThreadNum: %d, BindMode: %d\n", context->thread_num_, + context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_); + } + + session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, context); if (session == nullptr) { - std::cerr << "create lite session failed" << std::endl; + printf("create lite session failed\n"); return lite::RET_ERROR; } + delete[] model_buffer; // set model inputs tensor data - std::vector inputs = session->GetInputs(); + Vector inputs = session->GetInputs(); size_t inputs_num = inputs.size(); void *inputs_binbuf[inputs_num]; int inputs_size[inputs_num]; @@ -125,23 +148,41 @@ int main(int argc, const char **argv) { memcpy(input_data, inputs_binbuf[i], inputs_size[i]); } + if (argc >= 4) { + int loop_count = atoi(argv[3]); + printf("\nloop count: %d\n", loop_count); + uint64_t start_time = GetTimeUs(); + for (int i = 0; i < loop_count; ++i) { + ret = session->RunGraph(); + if (ret != lite::RET_OK) { + return lite::RET_ERROR; + } + } + uint64_t end_time = GetTimeUs(); + float total_time = (float)(end_time - start_time) / 1000.0f; + printf("total time: %.5fms, per time: %.5fms\n", total_time, total_time / loop_count); + } ret = session->RunGraph(); if (ret != lite::RET_OK) { return lite::RET_ERROR; } - auto outputs = session->GetOutputs(); - std::cout << "output size: " << outputs.size() << std::endl; - for (const auto &item : outputs) { - auto output = item.second; + Vector outputs_name = session->GetOutputTensorNames(); + printf("\noutputs: \n"); + for (const auto &name : outputs_name) { + auto output = session->GetOutputByTensorName(name); TensorToString(output); } - - std::cout << "run benchmark success" << std::endl; + printf("========run success=======\n"); delete session; + session = nullptr; + if (context != nullptr) { + delete context; + context = nullptr; + } for (size_t i = 0; i < inputs_num; ++i) { free(inputs_binbuf[i]); + inputs_binbuf[i] = nullptr; } return lite::RET_OK; } - diff --git a/mindspore/lite/micro/example/mnist_x86/mnist.sh b/mindspore/lite/micro/example/mnist_x86/mnist.sh index 2c2960a99c..33cb6b9091 100644 --- a/mindspore/lite/micro/example/mnist_x86/mnist.sh +++ b/mindspore/lite/micro/example/mnist_x86/mnist.sh @@ -27,7 +27,7 @@ do done BASEPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -MINDSPORE_ROOT_DIR=${${BASEPATH}%%/mindspore/lite/micro/example/mnist_x86} +MINDSPORE_ROOT_DIR=${BASEPATH%%/mindspore/lite/micro/example/mnist_x86} echo "current dir is: ${BASEPATH}" diff --git a/mindspore/lite/micro/example/mnist_x86/src/CMakeLists.txt b/mindspore/lite/micro/example/mnist_x86/src/CMakeLists.txt index 30cab63b3f..cae94664a6 100644 --- a/mindspore/lite/micro/example/mnist_x86/src/CMakeLists.txt +++ b/mindspore/lite/micro/example/mnist_x86/src/CMakeLists.txt @@ -1,5 +1,4 @@ - cmake_minimum_required(VERSION 3.14) project(net) @@ -16,7 +15,8 @@ set(HEADER_PATH ${PKG_PATH}/inference) message("operator lib path: ${OP_LIB}") message("operator header path: ${OP_HEADER_PATH}") -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include) +add_compile_definitions(NOT_USE_STL) + include_directories(${OP_HEADER_PATH}) include_directories(${HEADER_PATH}) @@ -43,15 +43,19 @@ endif() set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") + message(STATUS "build net library with debug info") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default") else() + message(STATUS "build net library release version") set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \ -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \ -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") + string(REPLACE "-g" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") + string(REPLACE "-g" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") endif() function(create_library) @@ -80,4 +84,3 @@ function(create_library) endfunction(create_library) string(CONCAT library_name "lib" net ".a") create_library() - diff --git a/mindspore/lite/micro/example/mnist_x86/src/model.h b/mindspore/lite/micro/example/mnist_x86/src/model.h new file mode 100644 index 0000000000..838cbbbc90 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_x86/src/model.h @@ -0,0 +1,65 @@ + +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_ +#define MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_ + +#include "include/model.h" +#include "session.h" +#include +#include + +namespace mindspore::lite { +class MModel : public Model { + public: + void Free() override { + if (this->buf != nullptr) { + free(this->buf); + this->buf = nullptr; + this->buf_size_ = 0; + } + } + + void Destroy() override { Free(); } + + ~MModel() override { Destroy(); } + + void set_buf_size(size_t size) { buf_size_ = size; } + size_t buf_size() const { return buf_size_; } + + private: + size_t buf_size_{0}; +}; + +Model *Model::Import(const char *model_buf, size_t size) { + MS_NULLPTR_IF_NULL(model_buf); + MModel *model = new (std::nothrow) MModel(); + MS_NULLPTR_IF_NULL(model); + if (size == 0) { + delete model; + return nullptr; + } + model->buf = reinterpret_cast(malloc(size)); + if (model->buf == nullptr) { + delete model; + return nullptr; + } + memcpy(model->buf, model_buf, size); + model->set_buf_size(size); + return model; +} +} // namespace mindspore::lite +#endif // MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_ diff --git a/mindspore/lite/micro/example/mnist_x86/src/net.c b/mindspore/lite/micro/example/mnist_x86/src/net.c index ae15c50fa4..b128efafff 100644 --- a/mindspore/lite/micro/example/mnist_x86/src/net.c +++ b/mindspore/lite/micro/example/mnist_x86/src/net.c @@ -61,7 +61,6 @@ void FreeResource() { } } void Inference() { - const int g_thread_num = 1; { DoQuantizeFp32ToInt8((float *)(g_Input0), (int8_t *)(g_Buffer+0), 0.007874015718698501587, 0, 784, false); } @@ -71,15 +70,15 @@ memset((int16_t *)(g_Buffer+12976), 0, 256); memset((int *)(g_Buffer+13232), 0, 6144); memset((int8_t *)(g_Buffer+19376), 0, 8112); memset((int16_t *)(g_Buffer+27488), 0, 12544); -static QuantArg conv_param__quant_arg_in[1] = {{0.007874015718698501587, 0}}; -static QuantArg conv_param__quant_arg_w[12] = {{0.003238174133002758026, -6}, {0.003890725085511803627, -8}, {0.003394871251657605171, -7}, {0.001685356837697327137, -127}, {0.004322394262999296188, 1}, {0.002274985425174236298, -56}, {0.003617759561166167259, 17}, {0.004447745624929666519, 23}, {0.004683905746787786484, 26}, {0.004021023400127887726, 24}, {0.005650237202644348145, 11}, {0.001966834301128983498, -84}}; -static QuantArg conv_param__quant_arg_out[1] = {{0.01778890006244182587, 0}}; -static double conv_param__real_multiplier[12] = {0.001433333970799530351, 0.001722176774828924938, 0.00150269379968211614, 0.0007460003866156953226, 0.001913249346122961134, 0.001006991503636309139, 0.001601352314486244018, 0.001968734305210294733, 0.002073267527210802957, 0.00177985160945266568, 0.002501001060249878095, 0.0008705926067589928779}; -static int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; -static int conv_param__right_shift[12] = {-9, -9, -9, -10, -9, -9, -9, -8, -8, -9, -8, -10}; -static int conv_param__quant_multiplier[12] = {1575967367, 1893553389, 1652229306, 1640472199, 2103639903, 1107198867, 1760705490, 1082323130, 1139790877, 1956967540, 1374939873, 1914453388}; -static int conv_param__out_act_min[1] = {0}; -static int conv_param__out_act_max[1] = {127}; +QuantArg conv_param__quant_arg_in[1] = {{0.007874015718698501587, 0}}; +QuantArg conv_param__quant_arg_w[12] = {{0.003238174133002758026, -6}, {0.003890725085511803627, -8}, {0.003394871251657605171, -7}, {0.001685356837697327137, -127}, {0.004322394262999296188, 1}, {0.002274985425174236298, -56}, {0.003617759561166167259, 17}, {0.004447745624929666519, 23}, {0.004683905746787786484, 26}, {0.004021023400127887726, 24}, {0.005650237202644348145, 11}, {0.001966834301128983498, -84}}; +QuantArg conv_param__quant_arg_out[1] = {{0.01778890006244182587, 0}}; +double conv_param__real_multiplier[12] = {0.001433333970799530351, 0.001722176774828924938, 0.00150269379968211614, 0.0007460003866156953226, 0.001913249346122961134, 0.001006991503636309139, 0.001601352314486244018, 0.001968734305210294733, 0.002073267527210802957, 0.00177985160945266568, 0.002501001060249878095, 0.0008705926067589928779}; +int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +int conv_param__right_shift[12] = {-9, -9, -9, -10, -9, -9, -9, -8, -8, -9, -8, -10}; +int conv_param__quant_multiplier[12] = {1575967367, 1893553389, 1652229306, 1640472199, 2103639903, 1107198867, 1760705490, 1082323130, 1139790877, 1956967540, 1374939873, 1914453388}; +int conv_param__out_act_min[1] = {0}; +int conv_param__out_act_max[1] = {127}; ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(1), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2}; int thread_num = MSMIN(g_thread_num, 26); ConvParameter conv_param_ = {{ "", 35, g_thread_num}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 28, 28, 1, 1, 26, 26, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(1), 0, 0, 0}; @@ -100,15 +99,15 @@ memset((int16_t *)(g_Buffer+15024), 0, 256); memset((int *)(g_Buffer+15280), 0, 6144); memset((int8_t *)(g_Buffer+21424), 0, 1452); memset((int16_t *)(g_Buffer+22876), 0, 5408); -static QuantArg conv_param__quant_arg_in[1] = {{0.01778890006244182587, 0}}; -static QuantArg conv_param__quant_arg_w[12] = {{0.005374609492719173431, 33}, {0.005837683100253343582, 22}, {0.004709810949862003326, -15}, {0.003726204857230186462, 27}, {0.00318551529198884964, -8}, {0.003453079145401716232, 50}, {0.004045850131660699844, -9}, {0.003903790842741727829, 30}, {0.004003710579127073288, -10}, {0.00560879148542881012, 27}, {0.005486610345542430878, -23}, {0.003554018214344978333, 4}}; -static QuantArg conv_param__quant_arg_out[1] = {{0.07183934003114700317, 0}}; -static double conv_param__real_multiplier[12] = {0.001330863973520378732, 0.001445530533608141606, 0.001166246148374064893, 0.0009226850783705293785, 0.0007887991893445710223, 0.0008550534992628172192, 0.001001835847923064193, 0.0009666590447744700769, 0.0009914011740411567478, 0.001388852288199173826, 0.00135859773990280961, 0.0008800481219728497088}; -static int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; -static int conv_param__right_shift[12] = {-9, -9, -9, -10, -10, -10, -9, -10, -9, -9, -9, -10}; -static int conv_param__quant_multiplier[12] = {1463300414, 1589377630, 1282301201, 2029005945, 1734587761, 1880282530, 1101530164, 2125705720, 1090057119, 1527059240, 1493794012, 1935246286}; -static int conv_param__out_act_min[1] = {0}; -static int conv_param__out_act_max[1] = {127}; +QuantArg conv_param__quant_arg_in[1] = {{0.01778890006244182587, 0}}; +QuantArg conv_param__quant_arg_w[12] = {{0.005374609492719173431, 33}, {0.005837683100253343582, 22}, {0.004709810949862003326, -15}, {0.003726204857230186462, 27}, {0.00318551529198884964, -8}, {0.003453079145401716232, 50}, {0.004045850131660699844, -9}, {0.003903790842741727829, 30}, {0.004003710579127073288, -10}, {0.00560879148542881012, 27}, {0.005486610345542430878, -23}, {0.003554018214344978333, 4}}; +QuantArg conv_param__quant_arg_out[1] = {{0.07183934003114700317, 0}}; +double conv_param__real_multiplier[12] = {0.001330863973520378732, 0.001445530533608141606, 0.001166246148374064893, 0.0009226850783705293785, 0.0007887991893445710223, 0.0008550534992628172192, 0.001001835847923064193, 0.0009666590447744700769, 0.0009914011740411567478, 0.001388852288199173826, 0.00135859773990280961, 0.0008800481219728497088}; +int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +int conv_param__right_shift[12] = {-9, -9, -9, -10, -10, -10, -9, -10, -9, -9, -9, -10}; +int conv_param__quant_multiplier[12] = {1463300414, 1589377630, 1282301201, 2029005945, 1734587761, 1880282530, 1101530164, 2125705720, 1090057119, 1527059240, 1493794012, 1935246286}; +int conv_param__out_act_min[1] = {0}; +int conv_param__out_act_max[1] = {127}; ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(1), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2}; int thread_num = MSMIN(g_thread_num, 11); ConvParameter conv_param_ = {{ "", 35, g_thread_num}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 13, 13, 12, 1, 11, 11, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(1), 0, 0, 0}; @@ -131,11 +130,11 @@ Int8Reshape((int8_t *)(g_Buffer+1456), (int8_t *)(g_Buffer+0), 300, reshape_quan int32_t tmp_weight_zp = 1; RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer+0)+0, (int8_t *)(g_Buffer+10928), 1, 300); CalcInputSums((int8_t *)(g_Buffer+0)+0, 1, 300, tmp_weight_zp, (int *)(g_Buffer+12144), RowMajor); -static float filter_scale[20] = {0.003479549195617437363, 0.004490676335990428925, 0.004529818892478942871, 0.002983231563121080399, 0.003455155529081821442, 0.003223794745281338692, 0.003272445406764745712, 0.003801185870543122292, 0.003679843153804540634, 0.003040234791114926338, 0.003704284550622105598, 0.003355232765898108482, 0.002904496388509869576, 0.003024494973942637444, 0.002794801956042647362, 0.004355110693722963333, 0.003499472280964255333, 0.004184196703135967255, 0.003057289868593215942, 0.003264668164774775505}; -static int filter_zp[20] = {1, 12, 3, 2, -10, -5, -11, 5, 12, 22, 16, 1, -5, 15, 13, 5, -10, -5, -6, 0}; -static int left_shift[20] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; -static int right_shift[20] = {-10, -9, -9, -10, -10, -10, -10, -9, -9, -10, -9, -10, -10, -10, -10, -9, -10, -9, -10, -10}; -static int multiplier[20] = {2108215049, 1360422072, 1372280070, 1807502393, 2093435146, 1953256619, 1982733521, 1151545365, 1114785262, 1842040025, 1122189669, 2032893316, 1759797843, 1832503464, 1693335354, 1319353429, 2120286176, 1267576078, 1852373503, 1978021333}; +float filter_scale[20] = {0.003479549195617437363, 0.004490676335990428925, 0.004529818892478942871, 0.002983231563121080399, 0.003455155529081821442, 0.003223794745281338692, 0.003272445406764745712, 0.003801185870543122292, 0.003679843153804540634, 0.003040234791114926338, 0.003704284550622105598, 0.003355232765898108482, 0.002904496388509869576, 0.003024494973942637444, 0.002794801956042647362, 0.004355110693722963333, 0.003499472280964255333, 0.004184196703135967255, 0.003057289868593215942, 0.003264668164774775505}; +int filter_zp[20] = {1, 12, 3, 2, -10, -5, -11, 5, 12, 22, 16, 1, -5, 15, 13, 5, -10, -5, -6, 0}; +int left_shift[20] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +int right_shift[20] = {-10, -9, -9, -10, -10, -10, -10, -9, -9, -10, -9, -10, -10, -10, -10, -9, -10, -9, -10, -10}; +int multiplier[20] = {2108215049, 1360422072, 1372280070, 1807502393, 2093435146, 1953256619, 1982733521, 1151545365, 1114785262, 1842040025, 1122189669, 2032893316, 1759797843, 1832503464, 1693335354, 1319353429, 2120286176, 1267576078, 1852373503, 1978021333}; const MatmulQuantParameter matmul_quant_parameter = {{0.07136065512895584106, 0}, {0, 0}, {0.258998185396194458, 0}, -128, 127, filter_scale, filter_zp, left_shift, right_shift, multiplier}; int32_t *cur_left = matmul_quant_parameter.left_shift_ + 0; int32_t *cur_right = matmul_quant_parameter.right_shift_ + 0; @@ -147,11 +146,11 @@ MatmulInt8Opt((int8_t *)(g_Buffer+10928), g_Weight15+0 + 0, (int8_t *)(g_Buffer+ int32_t tmp_weight_zp = 1; RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer+304)+0, (int8_t *)(g_Buffer+10928), 1, 20); CalcInputSums((int8_t *)(g_Buffer+304)+0, 1, 20, tmp_weight_zp, (int *)(g_Buffer+11056), RowMajor); -static float filter_scale[10] = {0.004678330849856138229, 0.005127115640789270401, 0.00471437256783246994, 0.004531511571258306503, 0.005476122256368398666, 0.004348111804574728012, 0.004803542047739028931, 0.006081215571612119675, 0.004532597027719020844, 0.004762654658406972885}; -static int filter_zp[10] = {7, -2, 9, 2, -6, 21, 16, 10, -19, 8}; -static int left_shift[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; -static int right_shift[10] = {-8, -8, -8, -8, -8, -8, -8, -8, -8, -8}; -static int multiplier[10] = {1242805482, 1362025788, 1252380041, 1203802750, 1454739904, 1155082292, 1276068015, 1615483838, 1204091115, 1265206260}; +float filter_scale[10] = {0.004678330849856138229, 0.005127115640789270401, 0.00471437256783246994, 0.004531511571258306503, 0.005476122256368398666, 0.004348111804574728012, 0.004803542047739028931, 0.006081215571612119675, 0.004532597027719020844, 0.004762654658406972885}; +int filter_zp[10] = {7, -2, 9, 2, -6, 21, 16, 10, -19, 8}; +int left_shift[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +int right_shift[10] = {-8, -8, -8, -8, -8, -8, -8, -8, -8, -8}; +int multiplier[10] = {1242805482, 1362025788, 1252380041, 1203802750, 1454739904, 1155082292, 1276068015, 1615483838, 1204091115, 1265206260}; const MatmulQuantParameter matmul_quant_parameter = {{0.258998185396194458, 0}, {0, 0}, {0.5359870791435241699, 0}, -128, 127, filter_scale, filter_zp, left_shift, right_shift, multiplier}; int32_t *cur_left = matmul_quant_parameter.left_shift_ + 0; int32_t *cur_right = matmul_quant_parameter.right_shift_ + 0; diff --git a/mindspore/lite/micro/example/mnist_x86/src/net.cmake b/mindspore/lite/micro/example/mnist_x86/src/net.cmake index dd8a2ea236..ca68e45c83 100644 --- a/mindspore/lite/micro/example/mnist_x86/src/net.cmake +++ b/mindspore/lite/micro/example/mnist_x86/src/net.cmake @@ -17,6 +17,7 @@ set(OP_SRC net.c.o session.cc.o tensor.cc.o + string.cc.o ) file(GLOB NET_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cc diff --git a/mindspore/lite/micro/example/mnist_x86/src/session.cc b/mindspore/lite/micro/example/mnist_x86/src/session.cc index 6ecb582244..3bb3b7f03a 100644 --- a/mindspore/lite/micro/example/mnist_x86/src/session.cc +++ b/mindspore/lite/micro/example/mnist_x86/src/session.cc @@ -16,21 +16,31 @@ */ #include "session.h" +#include "model.h" #include "net.h" +#include namespace mindspore { namespace lite { int LiteSession::CompileGraph(lite::Model *model) { inputs_.resize(1); - inputs_[0] = new (std::nothrow) MTensor("graph_input-0", kNumberTypeFloat32, {1, 28, 28, 1, }); + Vector in_shape_0; + in_shape_0.resize(4); + in_shape_0[0] = 1; + in_shape_0[1] = 28; + in_shape_0[2] = 28; + in_shape_0[3] = 1; + inputs_[0] = new (std::nothrow) MTensor(String("graph_input-0"), kNumberTypeFloat32, in_shape_0); MS_ERROR_IF_NULL(inputs_[0]); outputs_.resize(1); - outputs_[0] = new (std::nothrow) MTensor("Softmax-7", kNumberTypeFloat32, {1, 10, }); + Vector out_shape_0; + out_shape_0.resize(2); + out_shape_0[0] = 1; + out_shape_0[1] = 10; + outputs_[0] = new (std::nothrow) MTensor(String("Softmax-7"), kNumberTypeFloat32, out_shape_0); MS_ERROR_IF_NULL(outputs_[0]); - for (const auto &output: outputs_) { - output_tensor_map_[output->tensor_name()] = output; - } - return RET_OK; + int ret = Init(model->buf, dynamic_cast(model)->buf_size()); + return ret; } @@ -65,8 +75,7 @@ LiteSession::~LiteSession() { delete input; input = nullptr; } - for (auto &item : output_tensor_map_) { - auto output = item.second; + for (auto &output : outputs_) { if (output == nullptr) { continue; } @@ -88,69 +97,53 @@ int LiteSession::InitRuntimeBuffer() { return RET_OK; } -std::vector LiteSession::GetInputs() const { - std::vector inputs; - inputs.insert(inputs.begin(), inputs_.begin(), inputs_.end()); - return inputs; -} - -std::vector LiteSession::GetOutputsByNodeName(const std::string &node_name) const { - auto iter = output_node_map_.find(node_name); - if (iter == output_node_map_.end()) { - std::vector empty; - return empty; +Vector LiteSession::GetInputs() const { + Vector inputs; + for (const auto &input : inputs_) { + inputs.push_back(input); } - return iter->second; + return inputs; } -std::unordered_map LiteSession::GetOutputs() const { - return output_tensor_map_; +Vector LiteSession::GetOutputsByNodeName(const String &node_name) const { + Vector outputs; + return outputs; } -std::vector LiteSession::GetOutputTensorNames() const { - std::vector output_names; - for (const auto &item : output_node_map_) { - for (const auto &output : item.second) { - output_names.emplace_back(output->tensor_name()); - } +Vector LiteSession::GetOutputTensorNames() const { + Vector output_names; + for (const auto &output : outputs_) { + output_names.push_back(output->tensor_name()); } return output_names; } -mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const std::string &tensor_name) const { - auto item = output_tensor_map_.find(tensor_name); - if (item == output_tensor_map_.end()) { - return nullptr; +mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const String &tensor_name) const { + for (const auto &output : outputs_) { + if (output->tensor_name() == tensor_name) { + return output; + } } - return item->second; -} - -int LiteSession::Resize(const std::vector &inputs, const std::vector> &dims) { - return RET_OK; + return nullptr; } } // namespace lite - session::LiteSession *session::LiteSession::CreateSession(const lite::Context *context) { auto *session = new (std::nothrow) lite::LiteSession(); - if (session == nullptr) { - return nullptr; - } - session->InitRuntimeBuffer(); + MS_NULLPTR_IF_NULL(session); + int ret = session->InitRuntimeBuffer(); + MS_NULLPTR_IF_ERROR(ret); return session; } -session::LiteSession *session::LiteSession::CreateSession(const char *net_buf, size_t size, +session::LiteSession *session::LiteSession::CreateSession(const char *model_buf, size_t size, const lite::Context *context) { session::LiteSession *session = CreateSession(context); - if (session == nullptr) { - return nullptr; - } - int ret = session->CompileGraph(nullptr); - if (ret != lite::RET_OK) { - return nullptr; - } - Init(const_cast(net_buf), size); + MS_NULLPTR_IF_NULL(session); + lite::Model *model = lite::Model::Import(model_buf, size); + int ret = session->CompileGraph(model); + MS_NULLPTR_IF_ERROR(ret); + delete model; return session; } } // namespace mindspore diff --git a/mindspore/lite/micro/example/mnist_x86/src/session.h b/mindspore/lite/micro/example/mnist_x86/src/session.h index 161f594a17..6a4e628140 100644 --- a/mindspore/lite/micro/example/mnist_x86/src/session.h +++ b/mindspore/lite/micro/example/mnist_x86/src/session.h @@ -33,6 +33,20 @@ namespace lite { } \ } while (0) +#define MS_NULLPTR_IF_NULL(ptr) \ + do { \ + if ((ptr) == nullptr) { \ + return nullptr; \ + } \ + } while (0) + +#define MS_NULLPTR_IF_ERROR(ptr) \ + do { \ + if ((ptr) != mindspore::lite::RET_OK) { \ + return nullptr; \ + } \ + } while (0) + class LiteSession : public session::LiteSession { public: LiteSession() = default; @@ -43,31 +57,25 @@ class LiteSession : public session::LiteSession { int CompileGraph(lite::Model *model) override; - std::vector GetInputs() const override; + Vector GetInputs() const override; - mindspore::tensor::MSTensor *GetInputsByTensorName(const std::string &tensor_name) const override { return nullptr; } + mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const override { return nullptr; } int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override; - std::vector GetOutputsByNodeName(const std::string &node_name) const override; + Vector GetOutputsByNodeName(const String &node_name) const override; - std::unordered_map GetOutputs() const override; + Vector GetOutputTensorNames() const override; - std::vector GetOutputTensorNames() const override; + mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const override; - mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const override; - - int Resize(const std::vector &inputs, const std::vector> &dims) override; + int Resize(const Vector &inputs, const Vector> &dims) override { return RET_ERROR; } int InitRuntimeBuffer(); private: - int SetInputsData(const std::vector &inputs) const; - std::vector inputs_; - std::vector outputs_; - std::unordered_map output_tensor_map_; - std::unordered_map> output_node_map_; - + Vector inputs_; + Vector outputs_; void *runtime_buffer_; }; @@ -75,4 +83,3 @@ class LiteSession : public session::LiteSession { } // namespace mindspore #endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_ - diff --git a/mindspore/lite/micro/example/mnist_x86/src/string.cc b/mindspore/lite/micro/example/mnist_x86/src/string.cc new file mode 100644 index 0000000000..c0b0359d23 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_x86/src/string.cc @@ -0,0 +1,306 @@ + + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef NOT_USE_STL +#include +#include +#include +#include +#include +#include "include/lite_utils.h" + +namespace mindspore { +String::String() { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; +} + +String::String(size_t count, char ch) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * (count + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memset(buffer_, ch, count); + buffer_[count] = '\0'; + size_ = count; +} +String::String(const char *s, size_t count) { + if (s == nullptr) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + return; + } + size_t size_s = strlen(s); + if (size_s <= count) { + size_ = size_s; + } else { + size_ = count; + } + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + strncpy(buffer_, s, size_); + buffer_[size_] = '\0'; +} + +String::String(const char *s) { + if (s == nullptr) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + return; + } + size_ = strlen(s); + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(buffer_, s, size_ + 1); +} + +String::String(const String &other) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * (other.size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + size_ = other.size_; + memcpy(buffer_, other.buffer_, size_ + 1); +} + +String::String(const String &other, size_t pos, size_t count) { + if (pos >= other.size_) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + } else { + if (count == npos) { + count = other.size_ - pos; + } + if (pos + count > other.size_) { + size_ = other.size_ - pos; + } else { + size_ = count; + } + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + strncpy(buffer_, other.buffer_ + pos, size_); + buffer_[size_] = '\0'; + } +} + +String::~String() { free(buffer_); } + +String &String::operator=(const String &str) { + if (this == &str) { + return *this; + } + free(buffer_); + buffer_ = reinterpret_cast(malloc(sizeof(char) * (str.size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + size_ = str.size_; + memcpy(buffer_, str.buffer_, size_ + 1); + return *this; +} + +String &String::operator=(const char *str) { + free(buffer_); + if (str == nullptr) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + return *this; + } + size_t size_s = strlen(str); + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_s + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + size_ = size_s; + memcpy(buffer_, str, size_ + 1); + return *this; +} + +char &String::at(size_t pos) { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return buffer_[pos]; +} +const char &String::at(size_t pos) const { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return buffer_[pos]; +} +char &String::operator[](size_t pos) { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return this->at(pos); +} +const char &String::operator[](size_t pos) const { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return this->at(pos); +} +char *String::data() noexcept { return buffer_; }; +const char *String::data() const noexcept { return buffer_; } +const char *String::c_str() const noexcept { return buffer_; } + +// capacity +bool String::empty() const noexcept { return size_ == 0; } +size_t String::size() const noexcept { return size_; } +size_t String::length() const noexcept { return size_; } + +// operations +void String::clear() noexcept { + free(buffer_); + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; +} + +String &String::operator+(const String &str) { + (*this) += str; + return *this; +} + +String &String::operator+=(const String &str) { + size_t new_size = size_ + str.size_; + char *tmp = reinterpret_cast(malloc(sizeof(char) * (new_size + 1))); + if (tmp == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(tmp, this->buffer_, size_ + 1); + strncat(tmp, str.buffer_, str.size_); + tmp[new_size] = '\0'; + free(buffer_); + buffer_ = tmp; + size_ = new_size; + return *this; +} + +String &String::operator+=(const char *str) { + if (str == nullptr) { + return *this; + } + size_t str_size = strlen(str); + size_t new_size = size_ + str_size; + char *tmp = reinterpret_cast(malloc(sizeof(char) * (new_size + 1))); + if (tmp == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(tmp, this->buffer_, size_ + 1); + strncat(tmp, str, str_size); + tmp[new_size] = '\0'; + free(buffer_); + buffer_ = tmp; + size_ = new_size; + return *this; +} + +String &String::operator+=(const char ch) { + char *tmp = reinterpret_cast(malloc(sizeof(char) * (size_ + 2))); + if (tmp == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(tmp, this->buffer_, size_ + 1); + tmp[size_] = ch; + tmp[size_ + 1] = '\0'; + free(buffer_); + buffer_ = tmp; + size_ += 1; + return *this; +} + +String &String::append(size_t count, const char ch) { + (*this) += ch; + return *this; +} +String &String::append(const String &str) { + (*this) += str; + return *this; +} +String &String::append(const char *str) { + if (str == nullptr) { + return *this; + } + (*this) += str; + return *this; +} + +int String::compare(const String &str) const { return strcmp(buffer_, str.buffer_); } +int String::compare(const char *str) const { return strcmp(buffer_, str); } + +String String::substr(size_t pos, size_t count) const { return String(*this, pos, count); } + +String operator+(const String &lhs, const char *rhs) { + String str = lhs; + str += rhs; + return str; +} + +String operator+(const char *lhs, const String &rhs) { + String str = rhs; + str += lhs; + return str; +} + +bool operator==(const String &lhs, const String &rhs) { return lhs.compare(rhs) == 0; } +bool operator==(const String &lhs, const char *rhs) { return lhs.compare(rhs) == 0; } +bool operator==(const char *lhs, const String &rhs) { return rhs.compare(lhs) == 0; } + +String to_String(int32_t value) { + char tmp[sizeof(int32_t) * 4]; + snprintf(tmp, sizeof(int32_t) * 4, "%d", value); + return String(tmp, strlen(tmp)); +} + +String to_String(float value) { + char tmp[FLT_MAX_10_EXP + 20]; + snprintf(tmp, FLT_MAX_10_EXP + 20, "%f", value); + return String(tmp, strlen(tmp)); +} +} // namespace mindspore +#endif // NOT_USE_STL diff --git a/mindspore/lite/micro/example/mnist_x86/src/tensor.cc b/mindspore/lite/micro/example/mnist_x86/src/tensor.cc index debe6edf94..6daa17242a 100644 --- a/mindspore/lite/micro/example/mnist_x86/src/tensor.cc +++ b/mindspore/lite/micro/example/mnist_x86/src/tensor.cc @@ -1,5 +1,4 @@ - /** * Copyright 2021 Huawei Technologies Co., Ltd * @@ -61,14 +60,6 @@ MTensor::~MTensor() { } } -int MTensor::DimensionSize(const size_t index) const { - int dim_size = -1; - if (index < shape_.size()) { - dim_size = shape_[index]; - } - return dim_size; -} - int MTensor::ElementsNum() const { int elements = 1; for (int i : shape_) { @@ -90,4 +81,3 @@ void *MTensor::MutableData() { } } // namespace lite } // namespace mindspore - diff --git a/mindspore/lite/micro/example/mnist_x86/src/tensor.h b/mindspore/lite/micro/example/mnist_x86/src/tensor.h index 58f0f8ecf4..6c49a322f2 100644 --- a/mindspore/lite/micro/example/mnist_x86/src/tensor.h +++ b/mindspore/lite/micro/example/mnist_x86/src/tensor.h @@ -1,5 +1,4 @@ - /** * Copyright 2021 Huawei Technologies Co., Ltd * @@ -20,8 +19,6 @@ #define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ #include "include/ms_tensor.h" -#include -#include namespace mindspore { namespace lite { @@ -31,7 +28,7 @@ struct QuantArg { float var_corr{1}; float mean_corr{0}; bool inited; - std::vector clusters{}; + Vector clusters{}; int bitNum; int roundType; int multiplier; @@ -41,31 +38,29 @@ struct QuantArg { class MTensor : public mindspore::tensor::MSTensor { public: MTensor() = default; - MTensor(std::string name, enum TypeId type, std::vector shape) - : tensor_name_(std::move(name)), data_type_(type), shape_(std::move(shape)) {} + MTensor(String name, TypeId type, Vector shape) : tensor_name_(name), data_type_(type), shape_(shape) {} ~MTensor() override; TypeId data_type() const override { return data_type_; } - std::vector shape() const override { return shape_; } - int DimensionSize(size_t index) const override; + Vector shape() const override { return shape_; } + void set_shape(const Vector &shape) override { shape_ = shape; } int ElementsNum() const override; size_t Size() const override; + String tensor_name() const override { return tensor_name_; } + void set_tensor_name(const String &name) override { tensor_name_ = name; } void *MutableData() override; - std::string tensor_name() const override { return tensor_name_; } - void set_tensor_name(const std::string name) override { tensor_name_ = name; } + void *data() override { return data_; } void set_data(void *data) override { data_ = data; } private: - std::string tensor_name_; + String tensor_name_; TypeId data_type_; - std::vector shape_; + Vector shape_; void *data_ = nullptr; - std::vector quant_params_; + Vector quant_params_; }; } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ - - diff --git a/mindspore/lite/micro/example/mnist_x86/src/weight.c b/mindspore/lite/micro/example/mnist_x86/src/weight.c index ca72d9b718..8d97badd6c 100644 --- a/mindspore/lite/micro/example/mnist_x86/src/weight.c +++ b/mindspore/lite/micro/example/mnist_x86/src/weight.c @@ -17,7 +17,8 @@ #include "weight.h" -unsigned char * g_Buffer = 0 ; +int g_thread_num = 1; +unsigned char * g_Buffer = 0; int16_t g_Weight10[1536]; int32_t g_Weight11[12]; int16_t g_Weight12[3072]; @@ -33,7 +34,6 @@ int Init(void *weight_buffer, int weight_size) { if (weight_buffer == NULL) { return RET_ERROR; } - struct ModelParameter { void *addr; size_t size; @@ -74,7 +74,7 @@ if (g_Weight15 == NULL) { return RET_ERROR; } memset(g_Weight15, 0, 6080); -static int init_filter_zp[20] = {1, 12, 3, 2, -10, -5, -11, 5, 12, 22, 16, 1, -5, 15, 13, 5, -10, -5, -6, 0}; +int init_filter_zp[20] = {1, 12, 3, 2, -10, -5, -11, 5, 12, 22, 16, 1, -5, 15, 13, 5, -10, -5, -6, 0}; InitInt8MatrixB(g_Weight6, g_Weight16, g_Weight15, 1, 300, 20, 20, 304, 0, init_filter_zp, g_Weight14, true, true); } { @@ -94,7 +94,7 @@ if (g_Weight18 == NULL) { return RET_ERROR; } memset(g_Weight18, 0, 384); -static int init_filter_zp[10] = {7, -2, 9, 2, -6, 21, 16, 10, -19, 8}; +int init_filter_zp[10] = {7, -2, 9, 2, -6, 21, 16, 10, -19, 8}; InitInt8MatrixB(g_Weight8, g_Weight19, g_Weight18, 1, 20, 10, 12, 32, 0, init_filter_zp, g_Weight17, true, true); } return RET_OK; diff --git a/mindspore/lite/micro/example/mnist_x86/src/weight.h b/mindspore/lite/micro/example/mnist_x86/src/weight.h index 887e4dacae..7be657370a 100644 --- a/mindspore/lite/micro/example/mnist_x86/src/weight.h +++ b/mindspore/lite/micro/example/mnist_x86/src/weight.h @@ -34,6 +34,7 @@ enum STATUS { RET_ERROR = 1, }; +extern int g_thread_num; extern int16_t g_Weight10[]; extern int32_t g_Weight11[]; extern int16_t g_Weight12[];