From: @zoloft Reviewed-by: Signed-off-by:tags/v1.2.0-rc1
| @@ -203,6 +203,9 @@ if(ENABLE_CONVERTER) | |||
| include(${TOP_DIR}/cmake/external_libs/eigen.cmake) | |||
| include(${TOP_DIR}/cmake/external_libs/protobuf.cmake) | |||
| add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tools/converter) | |||
| if(NOT WIN32) | |||
| add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/micro) | |||
| endif() | |||
| endif() | |||
| if(PLATFORM_ARM32 OR PLATFORM_ARM64) | |||
| @@ -0,0 +1,31 @@ | |||
| cmake_minimum_required(VERSION 3.14) | |||
| project(micro) | |||
| string(REPLACE "/mindspore/lite/micro" "" MINDSPORE_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}) | |||
| include_directories(${CMAKE_BINARY_DIR}) | |||
| if(NOT ENABLE_CONVERTER) | |||
| set(CMAKE_CXX_STANDARD 17) | |||
| include(${MINDSPORE_ROOT_DIR}/cmake/utils.cmake) | |||
| include(${MINDSPORE_ROOT_DIR}/cmake/dependency_utils.cmake) | |||
| include(${MINDSPORE_ROOT_DIR}/cmake/dependency_securec.cmake) | |||
| include(${MINDSPORE_ROOT_DIR}/cmake/external_libs/flatbuffers.cmake) | |||
| set(FBS_FILES | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../schema/model.fbs | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../schema/ops.fbs | |||
| ) | |||
| ms_build_flatbuffers_lite(FBS_FILES | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../schema/ | |||
| fbs_src | |||
| ${CMAKE_BINARY_DIR}/schema | |||
| "" | |||
| ) | |||
| ms_build_flatbuffers_lite(FBS_FILES | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/../schema/ | |||
| fbs_inner_src | |||
| ${CMAKE_BINARY_DIR}/schema/inner | |||
| "inner" | |||
| ) | |||
| endif() | |||
| add_subdirectory(coder) | |||
| @@ -0,0 +1,106 @@ | |||
| if(NOT ENABLE_CONVERTER) | |||
| if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") | |||
| MESSAGE(" ******Micro Debug********") | |||
| set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g") | |||
| set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g") | |||
| set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default") | |||
| set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default") | |||
| else() | |||
| MESSAGE(" ******Micro Release********") | |||
| set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -w -O2 -Werror \ | |||
| -fstack-protector-all -Wno-attributes -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") | |||
| set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -w -O2 -Werror \ | |||
| -fstack-protector-all -Wno-attributes -Wno-deprecated-declarations \ | |||
| -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") | |||
| endif() | |||
| if(ENABLE_ASAN) | |||
| set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address -fsanitize-recover=address -lasan") | |||
| set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address -fsanitize-recover=address -lasan") | |||
| endif() | |||
| endif() | |||
| set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gc-sections ") | |||
| set(LITE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../..) | |||
| set(3RD_DIR ${LITE_DIR}/../../third_party) | |||
| set(NNACL_DIR ${LITE_DIR}/nnacl) | |||
| #include 3rd | |||
| include_directories(${3RD_DIR}) | |||
| include_directories(${3RD_DIR}/flatbuffers/include) | |||
| include_directories(${3RD_DIR}/securec/include/) | |||
| #include ms | |||
| include_directories(${CMAKE_CURRENT_SOURCE_DIR}) | |||
| include_directories(${MINDSPORE_ROOT_DIR}) | |||
| include_directories(${LITE_DIR}) | |||
| include_directories(${MINDSPORE_ROOT_DIR}/mindspore/core/) | |||
| include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../runtime) | |||
| #include coder | |||
| include_directories(${CMAKE_CURRENT_SOURCE_DIR}/) | |||
| include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../) | |||
| include_directories(${CMAKE_CURRENT_SOURCE_DIR}/utils) | |||
| include_directories(${MINDSPORE_ROOT_DIR}/mindspore/lite/src/runtime/kernel/arm) | |||
| set(MS_SRC | |||
| ${MINDSPORE_ROOT_DIR}/mindspore/lite/src/common/log_adapter.cc | |||
| ) | |||
| file(GLOB_RECURSE PRIMITIVE_OP_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} | |||
| ${LITE_DIR}/src/ops/*.cc | |||
| ) | |||
| set(LITE_COMMON_SRC | |||
| ${PRIMITIVE_OP_SRC} | |||
| ${LITE_DIR}/tools/common/flag_parser.cc | |||
| ${LITE_DIR}/src/common/file_utils.cc | |||
| ${LITE_DIR}/src/common/graph_util.cc | |||
| ${LITE_DIR}/src/common/string_util.cc | |||
| ${LITE_DIR}/src/runtime/allocator.cc | |||
| ${LITE_DIR}/src/lite_model.cc | |||
| ${LITE_DIR}/src/tensorlist.cc | |||
| ${LITE_DIR}/src/tensor.cc | |||
| ) | |||
| file(GLOB_RECURSE MICRO_ALLOCATOR RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} | |||
| allocator/*.cc | |||
| ) | |||
| file(GLOB_RECURSE MICRO_GENERATOR RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} | |||
| generator/*.cc | |||
| ) | |||
| file(GLOB_RECURSE MICRO_UTILS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} | |||
| utils/*.cc | |||
| ) | |||
| set(MICRO_CODER_SRC | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/coder_context.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/session_coder.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/coder.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/debug.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/coder_graph.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/opcoders/op_coder.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/opcoders/op_coder_register.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/opcoders/op_coder_builder.cc | |||
| ${CMAKE_CURRENT_SOURCE_DIR}/opcoders/file_collector.cc | |||
| ) | |||
| list(APPEND MICRO_CODER_SRC | |||
| ${MICRO_ALLOCATOR} | |||
| ${MICRO_GENERATOR} | |||
| ) | |||
| add_executable(codegen main.cc | |||
| ${MS_SRC} | |||
| ${MICRO_UTILS} | |||
| ${LITE_COMMON_SRC} | |||
| ${MICRO_CODER_SRC} | |||
| ${MICRO_RUNTIME_SRC}) | |||
| add_dependencies(codegen fbs_src) | |||
| add_dependencies(codegen fbs_inner_src) | |||
| target_link_libraries(codegen ${SECUREC_LIBRARY} ${CMAKE_DL_LIBS}) | |||
| @@ -0,0 +1,144 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "coder/allocator/allocator.h" | |||
| #include <string> | |||
| #include <map> | |||
| #include "coder/allocator/memory_manager.h" | |||
| #include "coder/opcoders/op_coder.h" | |||
| #include "coder/coder_config.h" | |||
| namespace mindspore::lite::micro { | |||
| const std::map<std::type_index, std::pair<TypeId, size_t>> types_map = { | |||
| {std::type_index(typeid(float *)), {kNumberTypeFloat32, sizeof(float)}}, | |||
| {std::type_index(typeid(int *)), {kNumberTypeInt32, sizeof(int)}}, | |||
| {std::type_index(typeid(int32_t *)), {kNumberTypeInt32, sizeof(int32_t)}}, | |||
| {std::type_index(typeid(int16_t *)), {kNumberTypeInt16, sizeof(int16_t)}}, | |||
| {std::type_index(typeid(int8_t *)), {kNumberTypeInt8, sizeof(int8_t)}}, | |||
| }; | |||
| void *MemoryAllocator::MallocWeightTensor(TypeId type_id, size_t size, MallocType type) { | |||
| auto item = types_map.find(std::type_index(typeid(type_id))); | |||
| MS_CHECK_TRUE_RET_NULL(item != types_map.end(), "unsupported type idnex"); | |||
| TypeId typei = item->second.first; | |||
| size_t type_size = item->second.second; | |||
| std::vector<int> shape = {1, static_cast<int>(size / type_size)}; | |||
| auto cate = type == kOfflinePackWeight ? Tensor::Category::CONST_TENSOR : Tensor::Category::VAR; | |||
| Tensor *weight = new (std::nothrow) lite::Tensor(typei, shape, schema::Format_NHWC, cate); | |||
| MS_CHECK_PTR_RET_NULL(weight); | |||
| std::string runtime_addr = net_weight_addr_ + std::to_string(weight_index_++); | |||
| malloc_weights_addr_.insert(std::make_pair(weight, runtime_addr)); | |||
| if (type == kOfflinePackWeight) { | |||
| saved_weights_addr_.insert(std::make_pair(runtime_addr, weight)); | |||
| } | |||
| MS_CHECK_RET_CODE_RET_NULL(weight->MallocData(), "weight malloc data failed!"); | |||
| return weight->data_c(); | |||
| } | |||
| void MemoryAllocator::RecordRuntimeAddrs(const std::string &net_input_addr, const std::string &net_buffer_addr, | |||
| const std::string &net_weight_addr) { | |||
| net_input_addr_ = net_input_addr; | |||
| net_buffer_addr_ = net_buffer_addr; | |||
| net_weight_addr_ = net_weight_addr; | |||
| } | |||
| void MemoryAllocator::Free() { | |||
| for (auto iter = malloc_weights_addr_.begin(); iter != malloc_weights_addr_.end();) { | |||
| Tensor *tensor = iter->first; | |||
| if (origin_weights_addr_.find(tensor) == origin_weights_addr_.end()) { | |||
| delete tensor; | |||
| malloc_weights_addr_.erase(iter++); | |||
| } else { | |||
| iter++; | |||
| } | |||
| } | |||
| for (auto &item : allocated_) { | |||
| free(item); | |||
| item = nullptr; | |||
| } | |||
| malloc_weights_addr_.clear(); | |||
| allocated_.clear(); | |||
| } | |||
| std::map<Tensor *, std::string> MemoryAllocator::tensors_map() const { | |||
| std::map<Tensor *, std::string> res; | |||
| res.insert(tensors_addr_.begin(), tensors_addr_.end()); | |||
| res.insert(malloc_weights_addr_.begin(), malloc_weights_addr_.end()); | |||
| return res; | |||
| } | |||
| void MemoryAllocator::AssignWorkspaces(void *addr, size_t size) { | |||
| if (is_next_) { | |||
| is_next_ = false; | |||
| offset_ = 0; | |||
| } | |||
| workspaces_addr_.insert(std::make_pair(addr, net_buffer_addr_ + "+" + std::to_string(tensors_size_ + offset_))); | |||
| offset_ += size; | |||
| if (workspace_size_ < offset_) { | |||
| workspace_size_ = offset_; | |||
| } | |||
| } | |||
| void MemoryAllocator::RecordTensorsAddr(const std::map<Tensor *, size_t> &offsets) { | |||
| for (auto &item : offsets) { | |||
| auto tensor = item.first; | |||
| auto offset = item.second; | |||
| tensors_addr_.insert(std::make_pair(tensor, net_buffer_addr_ + "+" + std::to_string(offset))); | |||
| } | |||
| } | |||
| void MemoryAllocator::AssignGraphInputs(const std::vector<Tensor *> &inputs) { | |||
| size_t num = inputs.size(); | |||
| for (size_t i = 0; i < num; ++i) { | |||
| inputs_addr_.insert(std::make_pair(inputs.at(i), net_input_addr_ + std::to_string(i))); | |||
| } | |||
| } | |||
| void MemoryAllocator::RecordOriginWeightsAddr(const std::vector<std::unique_ptr<OperatorCoder>> &nodes) { | |||
| for (const auto &node : nodes) { | |||
| std::vector<Tensor *> inputs = node->input_tensors(); | |||
| for (const auto &tensor : inputs) { | |||
| if (tensor->category() == Tensor::Category::CONST_TENSOR) { | |||
| std::string runtime_addr = net_weight_addr_ + std::to_string(weight_index_); | |||
| origin_weights_addr_.insert(std::make_pair(tensor, runtime_addr)); | |||
| weight_index_++; | |||
| } | |||
| } | |||
| } | |||
| } | |||
| int MemoryAllocator::AssignTensors(const std::vector<std::unique_ptr<OperatorCoder>> &nodes) { | |||
| // intend to support multi memory assign algorithm | |||
| auto manager = std::make_unique<MemoryManager>(); | |||
| int ret = manager->AssignMemory(nodes); | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "assign memory failed"; | |||
| return RET_ERROR; | |||
| } | |||
| std::map<Tensor *, size_t> offsets = manager->variables_offset(); | |||
| RecordTensorsAddr(offsets); | |||
| tensors_size_ = manager->GetAllocatedSize(); | |||
| return RET_OK; | |||
| } | |||
| int MemoryAllocator::Assign(const std::vector<Tensor *> &inputs, | |||
| const std::vector<std::unique_ptr<OperatorCoder>> &nodes) { | |||
| AssignGraphInputs(inputs); | |||
| RecordOriginWeightsAddr(nodes); | |||
| return AssignTensors(nodes); | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,182 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_MICRO_CODER_MEMORY_ALLOCATOR_H_ | |||
| #define MINDSPORE_LITE_MICRO_CODER_MEMORY_ALLOCATOR_H_ | |||
| #include <map> | |||
| #include <vector> | |||
| #include <memory> | |||
| #include <utility> | |||
| #include <string> | |||
| #include "coder/allocator/memory_manager.h" | |||
| #include "coder/log.h" | |||
| #include "coder/utils/print_utils.h" | |||
| #include "src/tensor.h" | |||
| #include "src/common/log_adapter.h" | |||
| namespace mindspore::lite::micro { | |||
| /* | |||
| * kOfflinePackWeight, pack weight tensor data when opcode Prepare | |||
| * kOnlinePackWeight, pack weight tensor data when WeightInit function before the Inference | |||
| */ | |||
| enum MallocType { kOfflinePackWeight = 0, kOnlinePackWeight = 1, kWorkspace = 2 }; | |||
| inline std::string wrap(const std::string &a) { return "(" + a + ")"; } | |||
| /* | |||
| * while using Malloc(size, kOnlinePackWeight), the size is actually not necessary, | |||
| * it could be any value that is a multiple of sizeof(T) | |||
| * it just need a size, so we set kOnlineSize to avoid magic number. | |||
| */ | |||
| const int kOnlineSize = 4; | |||
| class OperatorCoder; | |||
| class MemoryAllocator { | |||
| public: | |||
| static MemoryAllocator *GetInstance() { | |||
| static MemoryAllocator allocator; | |||
| return &allocator; | |||
| } | |||
| MemoryAllocator(const MemoryAllocator &) = delete; | |||
| MemoryAllocator &operator=(const MemoryAllocator &) = delete; | |||
| /* | |||
| * Record Runtime's addrs of model | |||
| */ | |||
| void RecordRuntimeAddrs(const std::string &net_input_addr, const std::string &net_buffer_addr, | |||
| const std::string &net_weight_addr); | |||
| /* | |||
| * assign model's input, original weights and all tensors memory addr | |||
| */ | |||
| int Assign(const std::vector<Tensor *> &inputs, const std::vector<std::unique_ptr<OperatorCoder>> &nodes); | |||
| // allocator holds the space malloced by opcoders, will free before session coder destroy | |||
| void Free(); | |||
| /* | |||
| * malloc new weight or bias at Prepare | |||
| * in view of weight, bias and workspace | |||
| */ | |||
| void *Malloc(TypeId type_id, size_t size, MallocType type) { | |||
| if (type != kWorkspace) { | |||
| return MallocWeightTensor(type_id, size, type); | |||
| } | |||
| if (size == 0 && size >= UINT_MAX) { | |||
| return nullptr; | |||
| } | |||
| void *buffer = malloc(size); | |||
| if (buffer == nullptr) { | |||
| MS_LOG(ERROR) << "malloc memory failed"; | |||
| return nullptr; | |||
| } | |||
| AssignWorkspaces(buffer, size); | |||
| allocated_.push_back(buffer); | |||
| return buffer; | |||
| } | |||
| /* | |||
| * get the actual runtime address with it's type, | |||
| * including tensor, workspace | |||
| */ | |||
| template <typename T> | |||
| std::string GetRuntimeAddr(T t, bool is_const = false) { | |||
| if (!t) { | |||
| return "NULL"; | |||
| } | |||
| std::string type_info = is_const ? "const " : ""; | |||
| std::string type_name; | |||
| if (std::type_index(typeid(T)) == std::type_index(typeid(Tensor *))) { | |||
| type_name = GetTensorDataType(reinterpret_cast<Tensor *>(t)->data_type()) + " *"; | |||
| } else { | |||
| type_name = GetVariableTypeName<T>(); | |||
| } | |||
| type_info = wrap(type_info + type_name); | |||
| void *variable = reinterpret_cast<void *>(t); | |||
| auto item = inputs_addr_.find(variable); | |||
| if (item != inputs_addr_.end()) { | |||
| return type_info + item->second; | |||
| } | |||
| item = workspaces_addr_.find(variable); | |||
| if (item != workspaces_addr_.end()) { | |||
| return type_info + wrap(item->second); | |||
| } | |||
| auto iter = std::find_if( | |||
| tensors_addr_.begin(), tensors_addr_.end(), | |||
| [&variable](const std::pair<Tensor *, std::string> &a) { return variable == reinterpret_cast<void *>(a.first); }); | |||
| if (iter != tensors_addr_.end()) { | |||
| return type_info + wrap(iter->second); | |||
| } | |||
| // find variable in weights map | |||
| iter = | |||
| std::find_if(malloc_weights_addr_.begin(), malloc_weights_addr_.end(), | |||
| [&variable](const std::pair<Tensor *, std::string> &a) { return variable == (a.first)->data_c(); }); | |||
| if (iter != malloc_weights_addr_.end()) { | |||
| return iter->second; | |||
| } | |||
| // origin weight | |||
| iter = std::find_if(origin_weights_addr_.begin(), origin_weights_addr_.end(), | |||
| [&variable](const std::pair<Tensor *, std::string> &a) { return variable == a.first; }); | |||
| if (iter != origin_weights_addr_.end()) { | |||
| saved_weights_addr_.insert(std::make_pair(iter->second, reinterpret_cast<Tensor *>(variable))); | |||
| return iter->second; | |||
| } | |||
| MS_LOG(ERROR) << "uninitialized memory"; | |||
| return ""; | |||
| } | |||
| std::map<Tensor *, std::string> tensors_map() const; | |||
| /** | |||
| * @return weight tensor map which | |||
| */ | |||
| std::map<std::string, Tensor *> saved_weights() const { return saved_weights_addr_; } | |||
| size_t total_buffer_size() const { return tensors_size_ + workspace_size_; } | |||
| void enable_is_next() { is_next_ = true; } | |||
| private: | |||
| void *MallocWeightTensor(TypeId type_id, size_t size, MallocType type); | |||
| int AssignTensors(const std::vector<std::unique_ptr<OperatorCoder>> &nodes); | |||
| void AssignGraphInputs(const std::vector<Tensor *> &inputs); | |||
| void AssignWorkspaces(void *addr, size_t size); | |||
| void RecordOriginWeightsAddr(const std::vector<std::unique_ptr<OperatorCoder>> &nodes); | |||
| void RecordTensorsAddr(const std::map<Tensor *, size_t> &offsets); | |||
| private: | |||
| MemoryAllocator() = default; | |||
| ~MemoryAllocator() = default; | |||
| std::map<void *, std::string> workspaces_addr_; | |||
| size_t workspace_size_{0}; | |||
| size_t tensors_size_{0}; | |||
| size_t weight_index_{0}; | |||
| bool is_next_{false}; | |||
| size_t offset_{0}; | |||
| std::vector<void *> allocated_; | |||
| std::map<std::string, Tensor *> saved_weights_addr_; | |||
| std::map<Tensor *, std::string> origin_weights_addr_; | |||
| std::map<Tensor *, std::string> malloc_weights_addr_; | |||
| std::map<Tensor *, std::string> tensors_addr_; | |||
| std::map<void *, std::string> inputs_addr_; | |||
| std::string net_input_addr_; | |||
| std::string net_buffer_addr_; | |||
| std::string net_weight_addr_; | |||
| }; | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MINDSPORE_LITE_MICRO_CODER_MEMORY_ALLOCATOR_H_ | |||
| @@ -0,0 +1,167 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "coder/allocator/memory_manager.h" | |||
| #include <vector> | |||
| #include "coder/opcoders/op_coder.h" | |||
| namespace mindspore::lite::micro { | |||
| static constexpr size_t kDefaultMemAlignSize = 8; | |||
| static size_t AlignMemorySize(size_t size) { | |||
| return ((size + kDefaultMemAlignSize - 1) / kDefaultMemAlignSize) * kDefaultMemAlignSize; | |||
| } | |||
| int MemoryManager::AssignMemory(const std::vector<std::unique_ptr<OperatorCoder>> &nodes) { | |||
| for (const auto &node : nodes) { | |||
| AssignOutputs(node); | |||
| StoreMembufListInfo(node); | |||
| ReleaseInputs(node); | |||
| } | |||
| return RET_OK; | |||
| } | |||
| void MemoryManager::StoreMembufListInfo(const std::unique_ptr<OperatorCoder> &node) { | |||
| std::vector<MembufPtr> temp; | |||
| for (const auto &membuf : membuf_list_) { | |||
| auto buf = std::make_shared<Membuf>(membuf->key_, membuf->status_, membuf->size_, membuf->offset_); | |||
| temp.emplace_back(buf); | |||
| } | |||
| auto info = std::make_pair(node->node_index(), temp); | |||
| all_membuf_list_info_.emplace_back(info); | |||
| } | |||
| size_t MemoryManager::GetAllocatedSize() const { | |||
| if (membuf_list_.empty()) { | |||
| return 0; | |||
| } | |||
| return membuf_list_.back()->offset_ + membuf_list_.back()->size_; | |||
| } | |||
| void MemoryManager::AssignOutputs(const std::unique_ptr<OperatorCoder> &node) { | |||
| for (const auto &output : node->output_tensors()) { | |||
| if (output == nullptr) { | |||
| MS_LOG(ERROR) << "output tensor is nullptr"; | |||
| return; | |||
| } | |||
| size_t size = AlignMemorySize(output->Size()); | |||
| std::map<size_t, size_t> size_map = GetReusableMembufMap(size); | |||
| if (size_map.empty()) { | |||
| AssignNewMembuf(output, size); | |||
| } else { | |||
| size_t membuf_index = size_map.begin()->second; | |||
| ReuseExistedMembuf(membuf_index, output, size); | |||
| } | |||
| } | |||
| } | |||
| void MemoryManager::ReleaseInputs(const std::unique_ptr<OperatorCoder> &node) { | |||
| // release node input and workspace | |||
| for (const auto &input : node->input_tensors()) { | |||
| if (input == nullptr) { | |||
| MS_LOG(ERROR) << "input tensor is nullptr"; | |||
| return; | |||
| } | |||
| if (input->category() != Tensor::VAR && input->data_c() != nullptr) { | |||
| continue; | |||
| } | |||
| input->DecRefCount(); | |||
| if (input->ref_count() > 0) { | |||
| continue; | |||
| } | |||
| auto item = std::find_if(membuf_list_.begin(), membuf_list_.end(), | |||
| [input](const MembufPtr &membuf) { return membuf->key_ == input; }); | |||
| if (item == membuf_list_.end()) { | |||
| continue; | |||
| } | |||
| auto membuf = *item; | |||
| membuf->status_ = kUnused; | |||
| } | |||
| MergeMembuf(); | |||
| } | |||
| void MemoryManager::AssignNewMembuf(Tensor *key, size_t size) { | |||
| MS_LOG(DEBUG) << "assign new membuf: " << size; | |||
| size_t offset = GetAllocatedSize(); | |||
| auto membuf = std::make_shared<Membuf>(key, kReused, size, offset); | |||
| membuf_list_.push_back(membuf); | |||
| variables_offset_.insert(std::make_pair(key, offset)); | |||
| } | |||
| void MemoryManager::ReuseExistedMembuf(size_t index, Tensor *key, size_t size) { | |||
| MembufPtr membuf = membuf_list_[index]; | |||
| UpdataMembufInfo(membuf, key); | |||
| if (membuf->size_ > size) { | |||
| SplitMembuf(index, size); | |||
| } | |||
| } | |||
| void MemoryManager::UpdataMembufInfo(const MembufPtr &membuf, Tensor *key) { | |||
| membuf->status_ = kReused; | |||
| membuf->key_ = key; | |||
| variables_offset_.insert(std::make_pair(key, membuf->offset_)); | |||
| } | |||
| void MemoryManager::SplitMembuf(size_t index, size_t size) { | |||
| if (index >= membuf_list_.size()) { | |||
| MS_LOG(ERROR) << "Index out of vector range."; | |||
| } | |||
| auto membuf = membuf_list_[index]; | |||
| size_t bias = membuf->size_ - size; | |||
| if (bias < 32) { | |||
| return; // Res is too smallTensor | |||
| } | |||
| membuf->size_ = size; | |||
| auto new_membuf = std::make_shared<Membuf>(kUnused, bias, membuf->offset_ + membuf->size_); | |||
| (void)membuf_list_.insert(membuf_list_.begin() + index + 1, new_membuf); | |||
| } | |||
| void MemoryManager::MergeMembuf() { | |||
| if (membuf_list_.empty()) { | |||
| return; | |||
| } | |||
| std::vector<MembufPtr> temp; | |||
| bool is_continue = false; | |||
| for (const auto &membuf : membuf_list_) { | |||
| if (membuf->status_ == kReused) { | |||
| temp.emplace_back(membuf); | |||
| is_continue = false; | |||
| } else { | |||
| if (!is_continue) { | |||
| temp.emplace_back(membuf); | |||
| is_continue = true; | |||
| } else { | |||
| auto back = temp.back(); | |||
| back->size_ += membuf->size_; | |||
| } | |||
| } | |||
| } | |||
| membuf_list_ = temp; | |||
| } | |||
| std::map<size_t, size_t> MemoryManager::GetReusableMembufMap(size_t size) { | |||
| std::map<size_t, size_t> size_map; | |||
| for (size_t i = 0; i < membuf_list_.size(); ++i) { | |||
| auto membuf = membuf_list_[i]; | |||
| auto index = i; | |||
| if (membuf->status_ == kUnused && membuf->size_ >= size) { | |||
| (void)size_map.insert(std::make_pair(membuf->size_, index)); | |||
| } | |||
| } | |||
| return size_map; | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,74 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_MICRO_CODER_MEMORY_MANAGER_H_ | |||
| #define MINDSPORE_LITE_MICRO_CODER_MEMORY_MANAGER_H_ | |||
| #include <map> | |||
| #include <vector> | |||
| #include <memory> | |||
| #include <utility> | |||
| #include "src/tensor.h" | |||
| namespace mindspore::lite::micro { | |||
| class OperatorCoder; | |||
| enum Status { kUnused = 0, kReused = 1 }; | |||
| class Membuf { | |||
| public: | |||
| Membuf(Status status, size_t size, size_t offset) : status_(status), size_(size), offset_(offset) {} | |||
| Membuf(Tensor *key, Status status, size_t size, size_t offset) | |||
| : key_(key), status_(status), size_(size), offset_(offset) {} | |||
| ~Membuf() = default; | |||
| Tensor *key_ = nullptr; | |||
| Status status_; | |||
| size_t size_; | |||
| size_t offset_; | |||
| }; | |||
| using MembufPtr = std::shared_ptr<Membuf>; | |||
| class MemoryManager { | |||
| public: | |||
| MemoryManager() = default; | |||
| ~MemoryManager() = default; | |||
| int AssignMemory(const std::vector<std::unique_ptr<OperatorCoder>> &nodes); | |||
| size_t GetAllocatedSize() const; | |||
| std::map<Tensor *, size_t> variables_offset() { return variables_offset_; } | |||
| private: | |||
| void AssignOutputs(const std::unique_ptr<OperatorCoder> &node); | |||
| void ReleaseInputs(const std::unique_ptr<OperatorCoder> &node); | |||
| void SplitMembuf(size_t index, size_t size); | |||
| void MergeMembuf(); | |||
| void UpdataMembufInfo(const MembufPtr &membuf, Tensor *key); | |||
| void AssignNewMembuf(Tensor *key, size_t size); | |||
| void ReuseExistedMembuf(size_t index, Tensor *key, size_t size); | |||
| std::map<size_t, size_t> GetReusableMembufMap(size_t size); | |||
| void StoreMembufListInfo(const std::unique_ptr<OperatorCoder> &node); | |||
| private: | |||
| std::vector<MembufPtr> membuf_list_; | |||
| std::vector<std::pair<size_t, std::vector<MembufPtr>>> all_membuf_list_info_; | |||
| std::map<Tensor *, size_t> variables_offset_; | |||
| }; | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MINDSPORE_LITE_MICRO_CODER_MEMORY_MANAGER_H_ | |||
| @@ -0,0 +1,207 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "coder/coder.h" | |||
| #include <getopt.h> | |||
| #include <iomanip> | |||
| #include <string> | |||
| #include <vector> | |||
| #include <map> | |||
| #include "schema/inner/model_generated.h" | |||
| #include "tools/common/flag_parser.h" | |||
| #include "coder/session_coder.h" | |||
| #include "coder/coder_context.h" | |||
| #include "utils/dir_utils.h" | |||
| #include "securec/include/securec.h" | |||
| #include "src/common/file_utils.h" | |||
| #include "src/common/utils.h" | |||
| #include "coder/coder_config.h" | |||
| namespace mindspore::lite::micro { | |||
| class CoderFlags : public virtual FlagParser { | |||
| public: | |||
| CoderFlags() { | |||
| AddFlag(&CoderFlags::is_weight_file_, "isWeightFile", "whether generating weight .net file, true| false", false); | |||
| AddFlag(&CoderFlags::model_path_, "modelPath", "Input model path", ""); | |||
| AddFlag(&CoderFlags::code_path_, "codePath", "Input code path", "."); | |||
| AddFlag(&CoderFlags::code_module_name_, "moduleName", "Input code module name", ""); | |||
| AddFlag(&CoderFlags::target_, "target", "generateed code target, x86| ARM32M| ARM32A| ARM64", "x86"); | |||
| AddFlag(&CoderFlags::code_mode_, "codeMode", "generated code mode, Normal | Android ", "Normal"); | |||
| AddFlag(&CoderFlags::debug_mode_, "debugMode", "dump perlayer's time cost and tensor, true | false", false); | |||
| } | |||
| ~CoderFlags() override = default; | |||
| public: | |||
| std::string model_path_; | |||
| bool is_weight_file_{false}; | |||
| std::string code_module_name_; | |||
| std::string code_path_; | |||
| std::string code_mode_; | |||
| bool debug_mode_{false}; | |||
| std::string target_; | |||
| }; | |||
| int Coder::Run(const std::string &model_path) { | |||
| session_ = CreateCoderSession(); | |||
| if (session_ == nullptr) { | |||
| MS_LOG(ERROR) << "new session failed while running"; | |||
| return RET_ERROR; | |||
| } | |||
| STATUS status = session_->Init(model_path); | |||
| if (status != RET_OK) { | |||
| MS_LOG(ERROR) << "Init session failed."; | |||
| return RET_ERROR; | |||
| } | |||
| status = session_->Build(); | |||
| if (status != RET_OK) { | |||
| MS_LOG(ERROR) << "Set Input resize shapes error"; | |||
| return status; | |||
| } | |||
| status = session_->Run(); | |||
| if (status != RET_OK) { | |||
| MS_LOG(ERROR) << "Generate Code Files error. " << status; | |||
| return status; | |||
| } | |||
| status = session_->GenerateCode(); | |||
| if (status != RET_OK) { | |||
| MS_LOG(ERROR) << "Generate Code Files error " << status; | |||
| } | |||
| return status; | |||
| } | |||
| int Coder::Init(const CoderFlags &flags) const { | |||
| static const std::map<std::string, Target> kTargetMap = { | |||
| {"x86", kX86}, {"ARM32M", kARM32M}, {"ARM32A", kARM32A}, {"ARM64", kARM64}, {"All", kAllTargets}}; | |||
| static const std::map<std::string, CodeMode> kCodeModeMap = {{"Normal", Code_Normal}, {"Android", Code_Android}}; | |||
| Configurator *config = Configurator::GetInstance(); | |||
| std::vector<std::function<bool()>> parsers; | |||
| parsers.emplace_back([flags, config]() -> bool { | |||
| config->set_is_weight_file(flags.is_weight_file_); | |||
| return true; | |||
| }); | |||
| parsers.emplace_back([&flags, config]() -> bool { | |||
| auto target_item = kTargetMap.find(flags.target_); | |||
| MS_CHECK_TRUE_RET_BOOL(target_item != kTargetMap.end(), "unsupported target: " + flags.target_); | |||
| config->set_target(target_item->second); | |||
| return true; | |||
| }); | |||
| parsers.emplace_back([&flags, config]() -> bool { | |||
| auto code_item = kCodeModeMap.find(flags.code_mode_); | |||
| MS_CHECK_TRUE_RET_BOOL(code_item != kCodeModeMap.end(), "unsupported code mode: " + flags.code_mode_); | |||
| config->set_code_mode(code_item->second); | |||
| return true; | |||
| }); | |||
| parsers.emplace_back([&flags, config]() -> bool { | |||
| config->set_debug_mode(flags.debug_mode_); | |||
| return true; | |||
| }); | |||
| parsers.emplace_back([&flags, config]() -> bool { | |||
| if (!FileExists(flags.model_path_)) { | |||
| MS_LOG(ERROR) << "code_gen model_path " << flags.model_path_ << " is not valid"; | |||
| return false; | |||
| } | |||
| if (flags.code_module_name_.empty() || isdigit(flags.code_module_name_.at(0))) { | |||
| MS_LOG(ERROR) << "code_gen code module name " << flags.code_module_name_ | |||
| << " not valid: it must be given and the first char could not be number"; | |||
| return false; | |||
| } | |||
| config->set_module_name(flags.code_module_name_); | |||
| return true; | |||
| }); | |||
| parsers.emplace_back([&flags, config]() -> bool { | |||
| const std::string slash = std::string(kSlash); | |||
| if (!flags.code_path_.empty() && !DirExists(flags.code_path_)) { | |||
| MS_LOG(ERROR) << "code_gen code path " << flags.code_path_ << " is not valid"; | |||
| return false; | |||
| } | |||
| config->set_code_path(flags.code_path_); | |||
| if (flags.code_path_.empty()) { | |||
| std::string path = ".." + slash + config->module_name(); | |||
| config->set_code_path(path); | |||
| } else { | |||
| if (flags.code_path_.substr(flags.code_path_.size() - 1, 1) != slash) { | |||
| std::string path = flags.code_path_ + slash + config->module_name(); | |||
| config->set_code_path(path); | |||
| } else { | |||
| std::string path = flags.code_path_ + config->module_name(); | |||
| config->set_code_path(path); | |||
| } | |||
| } | |||
| return InitProjDirs(flags.code_path_, config->module_name()) != RET_ERROR; | |||
| }); | |||
| if (!std::all_of(parsers.begin(), parsers.end(), [](auto &parser) -> bool { return parser(); })) { | |||
| if (!flags.help) { | |||
| std::cerr << flags.Usage() << std::endl; | |||
| return 0; | |||
| } | |||
| return RET_ERROR; | |||
| } | |||
| auto print_parameter = [](auto name, auto value) { | |||
| MS_LOG(INFO) << std::setw(20) << std::left << name << "= " << value; | |||
| }; | |||
| print_parameter("modelPath", flags.model_path_); | |||
| print_parameter("target", config->target()); | |||
| print_parameter("codePath", config->code_path()); | |||
| print_parameter("codeMode", config->code_mode()); | |||
| print_parameter("codeModuleName", config->module_name()); | |||
| print_parameter("isWeightFile", config->is_weight_file()); | |||
| print_parameter("debugMode", config->debug_mode()); | |||
| return RET_OK; | |||
| } | |||
| int RunCoder(int argc, const char **argv) { | |||
| CoderFlags flags; | |||
| Option<std::string> err = flags.ParseFlags(argc, argv, false, false); | |||
| if (err.IsSome()) { | |||
| std::cerr << err.Get() << std::endl; | |||
| std::cerr << flags.Usage() << std::endl; | |||
| return RET_ERROR; | |||
| } | |||
| if (flags.help) { | |||
| std::cerr << flags.Usage() << std::endl; | |||
| return RET_OK; | |||
| } | |||
| Coder code_gen; | |||
| STATUS status = code_gen.Init(flags); | |||
| if (status != RET_OK) { | |||
| MS_LOG(ERROR) << "Coder init Error : " << status; | |||
| return status; | |||
| } | |||
| status = code_gen.Run(flags.model_path_); | |||
| if (status != RET_OK) { | |||
| MS_LOG(ERROR) << "Run Coder Error : " << status; | |||
| return status; | |||
| } | |||
| MS_LOG(INFO) << "end of Coder"; | |||
| return RET_OK; | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,44 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_CODER_CODER_H_ | |||
| #define MICRO_CODER_CODER_H_ | |||
| #include <string> | |||
| #include <memory> | |||
| #include "coder/session_coder.h" | |||
| namespace mindspore::lite::micro { | |||
| class CoderFlags; | |||
| class Coder final { | |||
| public: | |||
| Coder() = default; | |||
| ~Coder() = default; | |||
| int Init(const CoderFlags &flags) const; | |||
| int Run(const std::string &model_path); | |||
| private: | |||
| std::shared_ptr<CoderSession> session_{nullptr}; | |||
| }; | |||
| int RunCoder(int argc, const char **argv); | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MICRO_CODER_CODER_H_ | |||
| @@ -0,0 +1,65 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_CODER_CONFIG_H | |||
| #define MICRO_CODER_CONFIG_H | |||
| #include <string> | |||
| namespace mindspore::lite::micro { | |||
| enum Target { kX86 = 0, kARM32M = 1, kARM32A = 2, kARM64 = 3, kAllTargets = 4, kTargetUnknown = 99 }; | |||
| enum CodeMode { Code_Normal = 0, Code_Android = 1, Code_Unknown = 99 }; | |||
| class Configurator { | |||
| public: | |||
| static Configurator *GetInstance() { | |||
| static Configurator configurator; | |||
| return &configurator; | |||
| } | |||
| void set_module_name(const std::string &module_name) { module_name_ = module_name; } | |||
| std::string module_name() const { return module_name_; } | |||
| void set_code_path(const std::string &code_path) { code_path_ = code_path; } | |||
| std::string code_path() const { return code_path_; } | |||
| void set_target(Target target) { target_ = target; } | |||
| Target target() const { return target_; } | |||
| void set_code_mode(CodeMode code_mode) { code_mode_ = code_mode; } | |||
| CodeMode code_mode() const { return code_mode_; } | |||
| void set_debug_mode(bool debug) { debug_mode_ = debug; } | |||
| bool debug_mode() const { return debug_mode_; } | |||
| void set_is_weight_file(bool flag) { is_weight_file_ = flag; } | |||
| bool is_weight_file() const { return is_weight_file_; } | |||
| private: | |||
| Configurator() = default; | |||
| ~Configurator() = default; | |||
| bool is_weight_file_{false}; | |||
| std::string module_name_; | |||
| std::string code_path_; | |||
| Target target_{kTargetUnknown}; | |||
| CodeMode code_mode_{Code_Unknown}; | |||
| bool debug_mode_{false}; | |||
| }; | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MICRO_CODER_CONFIG_H | |||
| @@ -0,0 +1,35 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "micro/coder/coder_context.h" | |||
| #include "micro/coder/coder_config.h" | |||
| #include "micro/coder/allocator/allocator.h" | |||
| namespace mindspore::lite::micro { | |||
| CoderContext::CoderContext() { | |||
| Configurator *config = Configurator::GetInstance(); | |||
| std::string module_name = config->module_name(); | |||
| this->input_name_ = module_name + "_I"; | |||
| this->output_name_ = module_name + "_O"; | |||
| this->buffer_name_ = module_name + "_B"; | |||
| this->weight_name_ = module_name + "_W"; | |||
| } | |||
| void CoderContext::AppendCode(const std::string &codeBlock) { this->code_blocks_.emplace_back(codeBlock); } | |||
| void CoderContext::AppendInitCode(const std::string &codeBlock) { this->initialContent_.push_back(codeBlock); } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,109 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_CODER_CODER_CONTEXT_H_ | |||
| #define MICRO_CODER_CODER_CONTEXT_H_ | |||
| #include <map> | |||
| #include <memory> | |||
| #include <set> | |||
| #include <stack> | |||
| #include <string> | |||
| #include <utility> | |||
| #include <vector> | |||
| #include "src/tensor.h" | |||
| #include "coder/utils/coder_utils.h" | |||
| #include "coder/utils/print_utils.h" | |||
| namespace mindspore::lite::micro { | |||
| class CoderContext { | |||
| public: | |||
| CoderContext(); | |||
| ~CoderContext() = default; | |||
| std::vector<std::string> init_contents() const { return initialContent_; } | |||
| void set_code_blocks(const std::vector<std::string> &code_block) { code_blocks_ = code_block; } | |||
| std::vector<std::string> code_blocks() { return code_blocks_; } | |||
| void set_tensor_map(const std::map<Tensor *, std::string> &tensor_map) { | |||
| tensors_map_.insert(tensor_map.begin(), tensor_map.end()); | |||
| } | |||
| std::map<Tensor *, std::string> tensors_map() const { return tensors_map_; } | |||
| void set_saved_weights(const std::map<std::string, Tensor *> &saved_weights) { saved_weights_ = saved_weights; } | |||
| std::map<std::string, Tensor *> saved_weights() const { return saved_weights_; } | |||
| void set_total_buffer_size(size_t size) { total_buffer_size_ = size; } | |||
| size_t total_buffer_size() const { return total_buffer_size_; } | |||
| void set_graph_inputs(const std::vector<Tensor *> &graph_inputs) { graph_inputs_ = graph_inputs; } | |||
| void set_graph_outputs(const std::vector<Tensor *> &graph_outputs) { graph_outputs_ = graph_outputs; } | |||
| std::vector<Tensor *> graph_inputs() const { return graph_inputs_; } | |||
| std::vector<Tensor *> graph_outputs() const { return graph_outputs_; } | |||
| std::string input_name() { return input_name_; } | |||
| std::string output_name() { return output_name_; } | |||
| std::string buffer_name() { return buffer_name_; } | |||
| std::string weight_name() { return weight_name_; } | |||
| void AppendCode(const std::string &codeBlock); | |||
| void AppendInitCode(const std::string &codeBlock); | |||
| std::set<std::string> c_files() const { return c_files_; } | |||
| void set_c_files(const std::set<std::string> files) { c_files_.insert(files.begin(), files.end()); } | |||
| std::set<std::string> h_files() const { return h_files_; } | |||
| void set_h_files(const std::set<std::string> files) { h_files_.insert(files.begin(), files.end()); } | |||
| std::set<std::string> asm_files() const { return asm_files_; } | |||
| void set_asm_files(const std::set<std::string> files) { asm_files_.insert(files.begin(), files.end()); } | |||
| private: | |||
| std::vector<Tensor *> graph_inputs_; | |||
| std::vector<Tensor *> graph_outputs_; | |||
| // primitive const tensors, parsed from model, without packed. | |||
| std::map<std::string, Tensor *> saved_weights_; | |||
| // all tensors, include parsed from model and packed tensors. | |||
| std::map<Tensor *, std::string> tensors_map_; | |||
| // workspace's size. | |||
| size_t total_buffer_size_{0}; | |||
| // model's input tensor data's address. | |||
| std::string input_name_; | |||
| // model's output tensor's address | |||
| std::string output_name_; | |||
| // the address of workspace, use for inference or train. | |||
| std::string buffer_name_; | |||
| // model's weight tensors' address. | |||
| std::string weight_name_; | |||
| // code blocks store the tensor will be packed runtime | |||
| std::vector<std::string> initialContent_; | |||
| // operator C Lang files list, depended by the net.c. it will be add to CMakeLists.txt | |||
| std::set<std::string> c_files_; | |||
| // when codegen generate the code for ARM64 OR ARM32, we provide server optimized artimetic used the assembly | |||
| // instructions. asm_files store the assembly file names | |||
| std::set<std::string> asm_files_; | |||
| // operator header files | |||
| std::set<std::string> h_files_; | |||
| // net.c's content, include the inference and prediction implementation | |||
| std::vector<std::string> code_blocks_; | |||
| }; | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MICRO_CODER_CODER_CONTEXT_H_ | |||
| @@ -0,0 +1,91 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "micro/coder/coder_graph.h" | |||
| #include <queue> | |||
| #include <deque> | |||
| #include <string> | |||
| #include <memory> | |||
| #include <algorithm> | |||
| #include <set> | |||
| #include "schema/inner/model_generated.h" | |||
| #include "src/ops/primitive_c.h" | |||
| namespace mindspore::lite::micro { | |||
| CoderGraph::~CoderGraph() { | |||
| model_->Free(); | |||
| delete model_; | |||
| for (auto &tensor : all_tensors_) { | |||
| delete tensor; | |||
| } | |||
| } | |||
| std::vector<lite::Tensor *> CoderGraph::input_tensors() const { return input_tensors_; } | |||
| std::vector<lite::Tensor *> CoderGraph::output_tensors() const { return output_tensors_; } | |||
| void CoderGraph::InitInputs() { | |||
| for (const auto &pair : inputs_map_) { | |||
| std::vector<Tensor *> tensors = pair.second; | |||
| input_tensors_.insert(input_tensors_.end(), tensors.begin(), tensors.end()); | |||
| } | |||
| // remove duplicate tensors | |||
| std::set<lite::Tensor *> unique; | |||
| unique.insert(input_tensors_.begin(), input_tensors_.end()); | |||
| input_tensors_.clear(); | |||
| input_tensors_.insert(input_tensors_.end(), unique.begin(), unique.end()); | |||
| } | |||
| void CoderGraph::InitOutputs() { | |||
| std::transform(output_indices_.begin(), output_indices_.end(), std::back_inserter(output_tensors_), | |||
| [&](uint32_t a) { return this->all_tensors_.at(a); }); | |||
| } | |||
| void CoderGraph::SetAllTensors(const std::vector<Tensor *> &all_tensors) { | |||
| all_tensors_.insert(all_tensors_.end(), all_tensors.begin(), all_tensors.end()); | |||
| } | |||
| void CoderGraph::SetInputIndices(const std::vector<uint32_t> &input_indices) { | |||
| input_indices_.insert(input_indices_.end(), input_indices.begin(), input_indices.end()); | |||
| } | |||
| void CoderGraph::SetOutputIndices(const std::vector<uint32_t> &output_indices) { | |||
| output_indices_.insert(output_indices_.end(), output_indices.begin(), output_indices.end()); | |||
| } | |||
| void CoderGraph::AddInputMap(const std::string &node_id, Tensor *input_tensor) { | |||
| if (!input_tensor) { | |||
| MS_LOG(ERROR) << "input tensor is nullptr, can not added to coder_graph"; | |||
| return; | |||
| } | |||
| this->inputs_map_[node_id].emplace_back(input_tensor); | |||
| } | |||
| void CoderGraph::AddOutputMap(const std::string &node_id, Tensor *output_tensor) { | |||
| if (!output_tensor) { | |||
| MS_LOG(ERROR) << "output tensor is nullptr, can not added to coder_graph"; | |||
| return; | |||
| } | |||
| this->outputs_map_[node_id].emplace_back(output_tensor); | |||
| } | |||
| std::vector<lite::Tensor *> CoderGraph::all_tensors() const { return this->all_tensors_; } | |||
| const std::map<std::string, std::vector<lite::Tensor *>> &CoderGraph::GetOutputsMap() const { return outputs_map_; } | |||
| std::vector<uint32_t> CoderGraph::input_indices() const { return this->input_indices_; } | |||
| std::vector<uint32_t> CoderGraph::output_indices() const { return this->output_indices_; } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,83 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_CODER_GRAPH_NODE_H_ | |||
| #define MICRO_CODER_GRAPH_NODE_H_ | |||
| #include <map> | |||
| #include <memory> | |||
| #include <unordered_map> | |||
| #include <vector> | |||
| #include <string> | |||
| #include "coder/coder_config.h" | |||
| #include "include/context.h" | |||
| #include "include/model.h" | |||
| #include "schema/inner/model_generated.h" | |||
| #include "src/common/graph_util.h" | |||
| #include "src/tensor.h" | |||
| namespace mindspore::lite::micro { | |||
| class CoderGraph { | |||
| public: | |||
| explicit CoderGraph(Model *model) : model_(model) {} | |||
| ~CoderGraph(); | |||
| void SetAllTensors(const std::vector<Tensor *> &all_tensors); | |||
| void InitInputs(); | |||
| void InitOutputs(); | |||
| void SetInputIndices(const std::vector<uint32_t> &input_indices); | |||
| void SetOutputIndices(const std::vector<uint32_t> &output_indices); | |||
| void AddInputMap(const std::string &node_id, Tensor *input_tensor); | |||
| void AddOutputMap(const std::string &node_id, Tensor *output_tensor); | |||
| std::vector<uint32_t> input_indices() const; | |||
| std::vector<uint32_t> output_indices() const; | |||
| std::vector<Tensor *> input_tensors() const; | |||
| std::vector<Tensor *> output_tensors() const; | |||
| std::vector<Tensor *> all_tensors() const; | |||
| const std::map<NODE_ID, std::vector<Tensor *>> &GetOutputsMap() const; | |||
| const Model *model() const { return this->model_; } | |||
| private: | |||
| // graph_inputs && weight && bias is value_node | |||
| // others are parameter_node | |||
| std::vector<Tensor *> all_tensors_; | |||
| std::vector<Tensor *> input_tensors_; | |||
| std::vector<Tensor *> output_tensors_; | |||
| std::vector<uint32_t> input_indices_; | |||
| std::vector<uint32_t> output_indices_; | |||
| std::map<std::string, std::vector<Tensor *>> inputs_map_; | |||
| std::map<std::string, std::vector<Tensor *>> outputs_map_; | |||
| Model *model_{nullptr}; | |||
| }; | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MICRO_CODER_GRAPH_NODE_H_ | |||
| @@ -0,0 +1,95 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "micro/coder/debug.h" | |||
| #include <memory> | |||
| #include <map> | |||
| #include <vector> | |||
| #include <utility> | |||
| #include "include/errorcode.h" | |||
| #include "micro/coder/utils/print_utils.h" | |||
| #include "micro/coder/coder_context.h" | |||
| namespace mindspore::lite::micro { | |||
| void MicroDebug::DumpTensorData(Tensor *tensor, const std::string &tensor_addr, std::string *code_block_str, | |||
| bool is_input) { | |||
| *code_block_str += "\t\t\t{\n\t\t\t\tMicroTensor tensor;\n"; | |||
| std::string format_str = "\t\t\t\ttensor.format = " + std::to_string(tensor->format()) + ";\n"; | |||
| std::string type_str = "\t\t\t\ttensor.type = " + GetMicroTensorDataType(tensor->data_type()) + ";\n"; | |||
| std::string ndim_str = "\t\t\t\ttensor.ndim = " + std::to_string(static_cast<int>(tensor->shape().size())) + ";\n"; | |||
| *code_block_str += "\t\t\t\tint dim[] = {"; | |||
| for (size_t i = 0; i < tensor->shape().size(); ++i) { | |||
| *code_block_str += std::to_string(tensor->shape().at(i)) + ", "; | |||
| } | |||
| *code_block_str += "};\n"; | |||
| *code_block_str += "\t\t\t\ttensor.dim = dim;\n"; | |||
| std::string data_str = "\t\t\t\ttensor.data = (void *)(" + tensor_addr + ");\n"; | |||
| std::string in_or_out = (is_input == 1 ? "input" : "output"); | |||
| std::string fprint_str = "\t\t\t\tfprintf(output_file, \"" + in_or_out + " Tensor:" + tensor_addr + "\\n\");\n"; | |||
| std::string print_str = "\t\t\t\tPrintTensor(&tensor,output_file," + std::to_string(is_input) + ");\n\t\t\t}\n"; | |||
| *code_block_str += ndim_str; | |||
| *code_block_str += type_str; | |||
| *code_block_str += format_str; | |||
| *code_block_str += data_str; | |||
| *code_block_str += fprint_str; | |||
| *code_block_str += print_str; | |||
| } | |||
| int MicroDebug::DumpNodeData(const std::unique_ptr<OperatorCoder> &op_coder, | |||
| const std::map<Tensor *, std::string> &tensor_addrs, std::string *code_block_str) { | |||
| auto config = Configurator::GetInstance(); | |||
| if (!config->debug_mode()) { | |||
| return RET_OK; | |||
| } | |||
| std::string node_name = op_coder->ID(); | |||
| std::string file_str = "\n\t\t{\n\t\t\tFILE *output_file = fopen( \"./" + node_name + | |||
| ".ir\", \"w\");\n\t\t\tfprintf(output_file, \"Node:" + op_coder->ID() + "\\n\");\n"; | |||
| *code_block_str += file_str; | |||
| auto runtime_tensor_iterator = [&op_coder, tensor_addrs, &code_block_str](const std::vector<Tensor *> &tensors, | |||
| bool dump_data) { | |||
| for (const auto &tensor : tensors) { | |||
| if (tensor->data_c() != nullptr) { | |||
| continue; | |||
| } | |||
| auto find_item = | |||
| std::find_if(tensor_addrs.begin(), tensor_addrs.end(), | |||
| [tensor](const std::pair<Tensor *, std::string> &item) { return item.first == tensor; }); | |||
| if (find_item != tensor_addrs.end()) { | |||
| DumpTensorData(tensor, find_item->second, code_block_str, dump_data); | |||
| } | |||
| } | |||
| return RET_OK; | |||
| }; | |||
| int status = runtime_tensor_iterator(op_coder->input_tensors(), true); | |||
| if (status != RET_OK) { | |||
| MS_LOG(ERROR) << "dump runtime input tensor failed!"; | |||
| return status; | |||
| } | |||
| status = runtime_tensor_iterator(op_coder->output_tensors(), false); | |||
| if (status != RET_OK) { | |||
| MS_LOG(ERROR) << "dump runtime input tensor failed!"; | |||
| return status; | |||
| } | |||
| std::string end_file_str = "\t\t\tfclose(output_file);\n\t\t}\n"; | |||
| *code_block_str += end_file_str; | |||
| return RET_OK; | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,43 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_MICRO_LITE_CODER_DEBUG_H_ | |||
| #define MINDSPORE_MICRO_LITE_CODER_DEBUG_H_ | |||
| #include <fstream> | |||
| #include <sstream> | |||
| #include <string> | |||
| #include <map> | |||
| #include <memory> | |||
| #include "src/tensor.h" | |||
| #include "micro/coder/opcoders/op_coder.h" | |||
| namespace mindspore::lite::micro { | |||
| class MicroDebug { | |||
| public: | |||
| MicroDebug() = default; | |||
| ~MicroDebug() = default; | |||
| static void DumpTensorData(Tensor *tensor, const std::string &tensor_addr, std::string *code_block_str, | |||
| bool is_input); | |||
| static int DumpNodeData(const std::unique_ptr<OperatorCoder> &op_coder, | |||
| const std::map<Tensor *, std::string> &tensor_addrs, std::string *code_block_str); | |||
| }; | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MINDSPORE_LITE_MICRO_CODER_DEBUG_H_ | |||
| @@ -0,0 +1,276 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BEN_DEBUG_UTILS_H_ | |||
| #define MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BEN_DEBUG_UTILS_H_ | |||
| static const char debug_utils_h[] = | |||
| "/**\n" | |||
| " * Copyright 2021 Huawei Technologies Co., Ltd\n" | |||
| " *\n" | |||
| " * Licensed under the Apache License, Version 2.0 (the \"License\");\n" | |||
| " * you may not use this file except in compliance with the License.\n" | |||
| " * You may obtain a copy of the License at\n" | |||
| " *\n" | |||
| " * http://www.apache.org/licenses/LICENSE-2.0\n" | |||
| " *\n" | |||
| " * Unless required by applicable law or agreed to in writing, software\n" | |||
| " * distributed under the License is distributed on an \"AS IS\" BASIS,\n" | |||
| " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" | |||
| " * See the License for the specific language governing permissions and\n" | |||
| " * limitations under the License.\n" | |||
| " */\n" | |||
| "\n" | |||
| "#ifndef MINDSPORE_LITE_MICRO_MICRODEBUGUTIL_H_\n" | |||
| "#define MINDSPORE_LITE_MICRO_MICRODEBUGUTIL_H_\n" | |||
| "\n" | |||
| "#include <stdio.h>\n" | |||
| "#include <sys/time.h>\n" | |||
| "#include <time.h>\n" | |||
| "#include <stdint.h>\n" | |||
| "#include \"microtensor.h\"\n" | |||
| "\n" | |||
| "void PrintTensor(MicroTensor *tensor, FILE *output_file, int isInput);\n" | |||
| "\n" | |||
| "void PrintTensorData(MicroTensor *tensor);\n" | |||
| "\n" | |||
| "uint64_t GetTimeUs(void);\n" | |||
| "\n" | |||
| "#endif // MINDSPORE_LITE_MICRO_MICRODEBUGUTIL_H_\n"; | |||
| static const char debug_utils_c[] = | |||
| "/**\n" | |||
| " * Copyright 2021 Huawei Technologies Co., Ltd\n" | |||
| " *\n" | |||
| " * Licensed under the Apache License, Version 2.0 (the \"License\");\n" | |||
| " * you may not use this file except in compliance with the License.\n" | |||
| " * You may obtain a copy of the License at\n" | |||
| " *\n" | |||
| " * http://www.apache.org/licenses/LICENSE-2.0\n" | |||
| " *\n" | |||
| " * Unless required by applicable law or agreed to in writing, software\n" | |||
| " * distributed under the License is distributed on an \"AS IS\" BASIS,\n" | |||
| " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" | |||
| " * See the License for the specific language governing permissions and\n" | |||
| " * limitations under the License.\n" | |||
| " */\n" | |||
| "\n" | |||
| "#include <inttypes.h>\n" | |||
| "#include \"debug_utils.h\"\n" | |||
| "\n" | |||
| "#define UP_DIV(x, y) (((x) + (y) - (1)) / (y))\n" | |||
| "\n" | |||
| "static const unsigned int kPrintNums = 20;\n" | |||
| "static const unsigned int kLineSplitNum = 44;\n" | |||
| "static const unsigned int kLineNum = 45;\n" | |||
| "unsigned int GetTensorElementSize(const MicroTensor *tensor) {\n" | |||
| " unsigned int ans = 1;\n" | |||
| " if (tensor->format == Format_NC4HW4) {\n" | |||
| " for (unsigned int i = 0; i < tensor->ndim; ++i) {\n" | |||
| " unsigned int dim = tensor->dim[i];\n" | |||
| " if (i == 1) {\n" | |||
| " dim = UP_DIV(dim, 4) * 4;\n" | |||
| " }\n" | |||
| " ans *= dim;\n" | |||
| " }\n" | |||
| " } else {\n" | |||
| " for (unsigned int i = 0; i < tensor->ndim; ++i) {\n" | |||
| " ans *= tensor->dim[i];\n" | |||
| " }\n" | |||
| " }\n" | |||
| " return ans;\n" | |||
| "}\n" | |||
| "\n" | |||
| "static const char *const TypeNames[] = {\"DT_FLOAT\", \"DT_FLOAT16\", \"DT_INT8\", \"DT_INT32\", \"DT_UINT8\", " | |||
| " \"DT_INT16\",\n" | |||
| " \"\", \"\", \"DT_UINT32\", \"DT_INT64\", \"DT_UINT16\", " | |||
| " \"\",\n" | |||
| " \"\", \"\", \"\", \"\", " | |||
| "\"DT_UNDEFINED\", \"\"};\n" | |||
| "\n" | |||
| "const char *EnumNameFormat(enum Format e) {\n" | |||
| " switch (e) {\n" | |||
| " case Format_NCHW:\n" | |||
| " return \"NCHW\";\n" | |||
| " case Format_NHWC:\n" | |||
| " return \"NHWC\";\n" | |||
| " case Format_HWKC:\n" | |||
| " return \"HWKC\";\n" | |||
| " case Format_HWCK:\n" | |||
| " return \"HWCK\";\n" | |||
| " case Format_KCHW:\n" | |||
| " return \"KCHW\";\n" | |||
| " case Format_CKHW:\n" | |||
| " return \"CKHW\";\n" | |||
| " case Format_KHWC:\n" | |||
| " return \"KHWC\";\n" | |||
| " case Format_CHWK:\n" | |||
| " return \"CHWK\";\n" | |||
| " case Format_NC4HW4:\n" | |||
| " return \"NC4HW4\";\n" | |||
| " case Format_NUM_OF_FORMAT:\n" | |||
| " return \"NUM_OF_FORMAT\";\n" | |||
| " default:\n" | |||
| " return \"\";\n" | |||
| " }\n" | |||
| "}\n" | |||
| "\n" | |||
| "void PrintTensorData(MicroTensor *tensor) {\n" | |||
| " void *data = tensor->data;\n" | |||
| " unsigned int elenums = GetTensorElementSize(tensor);\n" | |||
| " if (data == NULL || elenums == 0) {\n" | |||
| " MICRO_ERROR(\"print tensor data failed\");\n" | |||
| " return;\n" | |||
| " }\n" | |||
| " switch (tensor->type) {\n" | |||
| " case DataType_DT_FLOAT: {\n" | |||
| " float *addr = (float *)(data);\n" | |||
| " for (int i = 0; i < elenums && i < kPrintNums; ++i) {\n" | |||
| " printf(\"%f, \", addr[i]);\n" | |||
| " }\n" | |||
| " break;\n" | |||
| " }\n" | |||
| " case DataType_DT_INT32: {\n" | |||
| " int32_t *addr = (int32_t *)(data);\n" | |||
| " for (int i = 0; i < elenums && i < kPrintNums; ++i) {\n" | |||
| " printf(\"%d, \", addr[i]);\n" | |||
| " }\n" | |||
| " break;\n" | |||
| " }\n" | |||
| " case DataType_DT_INT8: {\n" | |||
| " int8_t *addr = (int8_t *)(data);\n" | |||
| " for (int i = 0; i < elenums && i < kPrintNums; ++i) {\n" | |||
| " printf(\"%d, \", addr[i]);\n" | |||
| " }\n" | |||
| " break;\n" | |||
| " }\n" | |||
| " case DataType_DT_UINT32: {\n" | |||
| " uint32_t *addr = (uint32_t *)(data);\n" | |||
| " for (int i = 0; i < elenums && i < kPrintNums; ++i) {\n" | |||
| " printf(\"%u, \", addr[i]);\n" | |||
| " }\n" | |||
| " break;\n" | |||
| " }\n" | |||
| " case DataType_DT_UINT8: {\n" | |||
| " uint8_t *addr = (uint8_t *)(data);\n" | |||
| " for (int i = 0; i < elenums && i < kPrintNums; ++i) {\n" | |||
| " printf(\"%u, \", addr[i]);\n" | |||
| " }\n" | |||
| " break;\n" | |||
| " }\n" | |||
| " default:\n" | |||
| " MICRO_ERROR(\"unsupported data type %d\", tensor->type);\n" | |||
| " }\n" | |||
| " printf(\"\\n\");\n" | |||
| "}\n" | |||
| "\n" | |||
| "void PrintDataToFile(const void *data, const size_t elenums, const enum DataType type, FILE *file) {\n" | |||
| " if (data == NULL || elenums == 0) {\n" | |||
| " MICRO_ERROR(\"print tensor data to file failed\");\n" | |||
| " return;\n" | |||
| " }\n" | |||
| " switch (type) {\n" | |||
| " case DataType_DT_FLOAT: {\n" | |||
| " float *addr = (float *)(data);\n" | |||
| " for (int i = 0; i < elenums; ++i) {\n" | |||
| " fprintf(file, \"%0.15f, \", addr[i]);\n" | |||
| " if (i % kLineNum == kLineSplitNum) {\n" | |||
| " fprintf(file, \"\\n\");\n" | |||
| " }\n" | |||
| " }\n" | |||
| " break;\n" | |||
| " }\n" | |||
| " case DataType_DT_INT32: {\n" | |||
| " int32_t *addr = (int32_t *)(data);\n" | |||
| " for (int i = 0; i < elenums; ++i) {\n" | |||
| " fprintf(file, \"%d, \", addr[i]);\n" | |||
| " if (i % kLineNum == kLineSplitNum) {\n" | |||
| " fprintf(file, \"\\n\");\n" | |||
| " }\n" | |||
| " }\n" | |||
| " break;\n" | |||
| " }\n" | |||
| " case DataType_DT_INT8: {\n" | |||
| " int8_t *addr = (int8_t *)(data);\n" | |||
| " for (int i = 0; i < elenums; ++i) {\n" | |||
| " fprintf(file, \"%d, \", addr[i]);\n" | |||
| " if (i % kLineNum == kLineSplitNum) {\n" | |||
| " fprintf(file, \"\\n\");\n" | |||
| " }\n" | |||
| " }\n" | |||
| " break;\n" | |||
| " }\n" | |||
| " case DataType_DT_UINT32: {\n" | |||
| " uint32_t *addr = (uint32_t *)(data);\n" | |||
| " for (int i = 0; i < elenums; ++i) {\n" | |||
| " fprintf(file, \"%u, \", addr[i]);\n" | |||
| " if (i % kLineNum == kLineSplitNum) {\n" | |||
| " fprintf(file, \"\\n\");\n" | |||
| " }\n" | |||
| " }\n" | |||
| " break;\n" | |||
| " }\n" | |||
| " case DataType_DT_UINT8: {\n" | |||
| " uint8_t *addr = (uint8_t *)(data);\n" | |||
| " for (int i = 0; i < elenums; ++i) {\n" | |||
| " fprintf(file, \"%u, \", addr[i]);\n" | |||
| " if (i % kLineNum == kLineSplitNum) {\n" | |||
| " fprintf(file, \"\\n\");\n" | |||
| " }\n" | |||
| " }\n" | |||
| " break;\n" | |||
| " }\n" | |||
| " default:\n" | |||
| " MICRO_ERROR(\"unsupported data type %d\", type);\n" | |||
| " }\n" | |||
| " fprintf(file, \"\\n\");\n" | |||
| "}\n" | |||
| "\n" | |||
| "void PrintTensor(MicroTensor *tensor, FILE *output_file, int isInput) {\n" | |||
| " if (output_file != NULL) {\n" | |||
| " const char *tips = NULL;\n" | |||
| " if (isInput) {\n" | |||
| " tips = \"input\";\n" | |||
| " } else {\n" | |||
| " tips = \"output\";\n" | |||
| " }\n" | |||
| " fprintf(output_file, \"%s \", tips);\n" | |||
| " for (int i = 0; i < tensor->ndim; ++i) {\n" | |||
| " fprintf(output_file, \"%u, \", tensor->dim[i]);\n" | |||
| " }\n" | |||
| " fprintf(output_file, \"\\n\");\n" | |||
| "\n" | |||
| " const char *type = TypeNames[tensor->type];\n" | |||
| " const char *format = EnumNameFormat(tensor->format);\n" | |||
| " unsigned int tensorSize = GetTensorElementSize(tensor);\n" | |||
| " fprintf(output_file, \"%s type:%s, format:%s, elementSize: %u\\n\", tips, type, format, tensorSize);\n" | |||
| " fprintf(output_file, \"%s Data:\\n\", tips);\n" | |||
| " PrintDataToFile(tensor->data, tensorSize, tensor->type, output_file);\n" | |||
| " (void)fflush(output_file);\n" | |||
| " }\n" | |||
| "}\n" | |||
| "\n" | |||
| "uint64_t GetTimeUs(void) {\n" | |||
| " const int USEC = 1000000;\n" | |||
| " const int MSEC = 1000;\n" | |||
| " struct timespec ts = {0, 0};\n" | |||
| " if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {\n" | |||
| " return 0;\n" | |||
| " }\n" | |||
| " uint64_t retval = (uint64_t)((ts.tv_sec * USEC) + (ts.tv_nsec / MSEC));\n" | |||
| " return retval;\n" | |||
| "}\n"; | |||
| #endif // MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BEN_DEBUG_UTILS_H_ | |||
| @@ -0,0 +1,141 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BENCH_LOAD_INPUT_H_ | |||
| #define MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BENCH_LOAD_INPUT_H_ | |||
| static const char load_input_h[] = | |||
| "/**\n" | |||
| " * Copyright 2021 Huawei Technologies Co., Ltd\n" | |||
| " *\n" | |||
| " * Licensed under the Apache License, Version 2.0 (the \"License\");\n" | |||
| " * you may not use this file except in compliance with the License.\n" | |||
| " * You may obtain a copy of the License at\n" | |||
| " *\n" | |||
| " * http://www.apache.org/licenses/LICENSE-2.0\n" | |||
| " *\n" | |||
| " * Unless required by applicable law or agreed to in writing, software\n" | |||
| " * distributed under the License is distributed on an \"AS IS\" BASIS,\n" | |||
| " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" | |||
| " * See the License for the specific language governing permissions and\n" | |||
| " * limitations under the License.\n" | |||
| " */\n" | |||
| "\n" | |||
| "#ifndef MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_\n" | |||
| "#define MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_\n" | |||
| "void *ReadInputData(const char *real_input_path, int *size);\n" | |||
| "\n" | |||
| "void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size);\n" | |||
| "\n" | |||
| "int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num);\n" | |||
| "\n" | |||
| "#endif // MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_\n"; | |||
| static const char load_input_c[] = | |||
| "/**\n" | |||
| " * Copyright 2021 Huawei Technologies Co., Ltd\n" | |||
| " *\n" | |||
| " * Licensed under the Apache License, Version 2.0 (the \"License\");\n" | |||
| " * you may not use this file except in compliance with the License.\n" | |||
| " * You may obtain a copy of the License at\n" | |||
| " *\n" | |||
| " * http://www.apache.org/licenses/LICENSE-2.0\n" | |||
| " *\n" | |||
| " * Unless required by applicable law or agreed to in writing, software\n" | |||
| " * distributed under the License is distributed on an \"AS IS\" BASIS,\n" | |||
| " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" | |||
| " * See the License for the specific language governing permissions and\n" | |||
| " * limitations under the License.\n" | |||
| " */\n" | |||
| "\n" | |||
| "#include \"load_input.h\"\n" | |||
| "#include <stdlib.h>\n" | |||
| "#include <stdio.h>\n" | |||
| "#include <string.h>\n" | |||
| "\n" | |||
| "void *ReadInputData(const char *real_input_path, int *size) {\n" | |||
| " if (real_input_path == NULL) {\n" | |||
| " return NULL;\n" | |||
| " }\n" | |||
| " if (strstr(real_input_path, \".bin\") || strstr(real_input_path, \".net\")) {\n" | |||
| " FILE *file;\n" | |||
| " file = fopen(real_input_path, \"rb+\");\n" | |||
| " if (!file) {\n" | |||
| " printf(\"Can't find %s\\n\", real_input_path);\n" | |||
| " return NULL;\n" | |||
| " }\n" | |||
| " int curr_file_posi = ftell(file);\n" | |||
| " fseek(file, 0, SEEK_END);\n" | |||
| " *size = ftell(file);\n" | |||
| " unsigned char *buf = malloc((*size));\n" | |||
| " (void)memset(buf, 0, (*size));\n" | |||
| " fseek(file, curr_file_posi, SEEK_SET);\n" | |||
| " int read_size = (int)(fread(buf, 1, *size, file));\n" | |||
| " if (read_size != (*size)) {\n" | |||
| " printf(\"read file failed, total file size: %d, read_size: %d\\n\", (*size), read_size);\n" | |||
| " fclose(file);\n" | |||
| " free(buf);\n" | |||
| " return NULL;\n" | |||
| " }\n" | |||
| " fclose(file);\n" | |||
| " return (void *)buf;\n" | |||
| " } else {\n" | |||
| " printf(\"input data file should be .bin , .net\");\n" | |||
| " return NULL;\n" | |||
| " }\n" | |||
| "}\n" | |||
| "\n" | |||
| "void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size) {\n" | |||
| " FILE *output_file;\n" | |||
| " output_file = fopen(final_name, \"w\");\n" | |||
| " if (output_file == NULL) {\n" | |||
| " printf(\"fopen output file: %s failed\\n\", final_name);\n" | |||
| " return;\n" | |||
| " }\n" | |||
| " unsigned char str[out_size];\n" | |||
| " for (unsigned int i = 0; i < out_size; ++i) {\n" | |||
| " str[i] = output_data[i];\n" | |||
| " fprintf(output_file, \"%d\\t\", str[i]);\n" | |||
| " }\n" | |||
| " fclose(output_file);\n" | |||
| "}\n" | |||
| "\n" | |||
| "int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num) {\n" | |||
| " char *inputs_path[inputs_num];\n" | |||
| " char *delim = \",\";\n" | |||
| " char *token;\n" | |||
| " int i = 0;\n" | |||
| " while ((token = strtok_r(path, delim, &path))) {\n" | |||
| " if (i >= inputs_num) {\n" | |||
| " printf(\"inputs num is error, need: %d\\n\", inputs_num);\n" | |||
| " return -1;\n" | |||
| " }\n" | |||
| " inputs_path[i] = token;\n" | |||
| " printf(\"input %d: %s\\n\", i, inputs_path[i]);\n" | |||
| " i++;\n" | |||
| " }\n" | |||
| "\n" | |||
| " for (i = 0; i < inputs_num; ++i) {\n" | |||
| " int size = 0;\n" | |||
| " buffers[i] = ReadInputData(inputs_path[i], &size);\n" | |||
| " if (size != inputs_size[i] || buffers[i] == NULL) {\n" | |||
| " printf(\"size mismatch, %s, %d, %d\\n\", inputs_path[i], size, inputs_size[i]);\n" | |||
| " return -1;\n" | |||
| " }\n" | |||
| " }\n" | |||
| " return 0;\n" | |||
| "}\n"; | |||
| #endif // MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BENCH_LOAD_INPUT_H_ | |||
| @@ -0,0 +1,115 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_CMAKE_LISTS_CODE_H_ | |||
| #define MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_CMAKE_LISTS_CODE_H_ | |||
| static const char bench_cmake_lists_txt[] = | |||
| "cmake_minimum_required(VERSION 3.14)\n" | |||
| "project(${PROJ_NAME})\n" | |||
| "\n" | |||
| "message(\"project name: ${PROJ_NAME}\")\n" | |||
| "message(\"project name: ${MODEL_LIB_PATH}\")\n" | |||
| "message(\"architecture cmake file path: ${ARCH_CMAKE_PATH}\")\n" | |||
| "\n" | |||
| "function(parse_lib_info lib_full_path lib_name lib_path)\n" | |||
| " string(FIND \"${lib_full_path}\" \"/\" POS REVERSE)\n" | |||
| " math(EXPR POS \"${POS} + 1\")\n" | |||
| " string(SUBSTRING ${lib_full_path} 0 ${POS} path)\n" | |||
| " set(${lib_path} ${path} PARENT_SCOPE)\n" | |||
| " string(SUBSTRING ${lib_full_path} \"${POS}\" \"-1\" name)\n" | |||
| " set(${lib_name} ${name} PARENT_SCOPE)\n" | |||
| "endfunction(parse_lib_info)\n" | |||
| "\n" | |||
| "parse_lib_info(${MODEL_LIB} MODEL_LIB_NAME MODEL_LIB_PATH)\n" | |||
| "\n" | |||
| "if (\"${ARCH_CMAKE_PATH}\" STREQUAL \"\")\n" | |||
| " message(\"arch is x86_64\")\n" | |||
| "else ()\n" | |||
| " include(${ARCH_CMAKE_PATH})\n" | |||
| "endif ()\n" | |||
| "\n" | |||
| "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)\n" | |||
| "\n" | |||
| "set(CMAKE_C_FLAGS \"${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}\")\n" | |||
| "if (\"${CMAKE_BUILD_TYPE}\" STREQUAL \"Debug\")\n" | |||
| " message(\"*******************${CMAKE_BUILD_TYPE}**********\")\n" | |||
| " set(CMAKE_C_FLAGS \"-DDebug -g -fPIC -fPIE -fvisibility=default ${CMAKE_C_FLAGS}\")\n" | |||
| "else ()\n" | |||
| " set(CMAKE_C_FLAGS \"-fPIC -fPIE -O3 -fstack-protector-strong -fomit-frame-pointer ${CMAKE_C_FLAGS}\")\n" | |||
| " set(CMAKE_C_FLAGS_Release \"${CMAKE_C_FLAGS_Release} -O3 -ffunction-sections -fdata-sections\")\n" | |||
| "endif ()\n" | |||
| "link_directories(${MODEL_LIB_PATH})\n" | |||
| "include(benchmark.cmake)\n" | |||
| "add_executable(${PROJ_NAME}_bench ${SRC_FILES})\n" | |||
| "target_link_libraries(${PROJ_NAME}_bench ${MODEL_LIB_NAME} -lm)\n"; | |||
| static const char src_cmake_lists_txt[] = | |||
| "cmake_minimum_required(VERSION 3.14)\n" | |||
| "project(${PROJ_NAME})\n" | |||
| "\n" | |||
| "message(\"project name: ${PROJ_NAME}\")\n" | |||
| "message(\"architecture cmake file path: ${ARCH_CMAKE_PATH}\")\n" | |||
| "message(\"operator lib path: ${OP_LIB}\")\n" | |||
| "message(\"operator header path: ${OP_HEADER_PATH}\")\n" | |||
| "\n" | |||
| "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)\n" | |||
| "include_directories(${OP_HEADER_PATH})\n" | |||
| "\n" | |||
| "include(net.cmake)\n" | |||
| "\n" | |||
| "if(\"${ARCH_CMAKE_PATH}\" STREQUAL \"\")\n" | |||
| " message(\"arch is x86_64\")\n" | |||
| "else()\n" | |||
| " include(${ARCH_CMAKE_PATH})\n" | |||
| "endif()\n" | |||
| "\n" | |||
| "set(CMAKE_C_FLAGS \"${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}\")\n" | |||
| "if(\"${CMAKE_BUILD_TYPE}\" STREQUAL \"Debug\")\n" | |||
| " set(CMAKE_C_FLAGS \"-DDebug -g -fPIC -fPIE -fvisibility=default ${CMAKE_C_FLAGS}\")\n" | |||
| "else()\n" | |||
| " set(CMAKE_C_FLAGS \"-fPIC -fPIE -O3 -Werror -fstack-protector-strong -fomit-frame-pointer ${CMAKE_C_FLAGS}\")\n" | |||
| " set(CMAKE_C_FLAGS_Release \"${CMAKE_C_FLAGS_Release} -O3 -ffunction-sections -Werror -fdata-sections\")\n" | |||
| "endif()\n" | |||
| "\n" | |||
| "function(create_library)\n" | |||
| " add_custom_command(TARGET ${PROJ_NAME}\n" | |||
| " POST_BUILD\n" | |||
| " COMMAND rm -rf tmp\n" | |||
| " COMMAND mkdir tmp\n" | |||
| " COMMAND cd tmp && ar -x ${OP_LIB}\n" | |||
| " COMMAND echo \"raw static library ${library_name} size:\"\n" | |||
| " COMMAND ls -lh ${library_name}\n" | |||
| " COMMAND mv ${library_name} ./tmp && cd tmp && ar -x ${library_name}\n" | |||
| " COMMENT \"unzip raw static library ${library_name}\"\n" | |||
| " )\n" | |||
| " foreach (object_file ${OP_SRC})\n" | |||
| " add_custom_command(TARGET ${PROJ_NAME} POST_BUILD COMMAND mv ./tmp/${object_file} .)\n" | |||
| " endforeach ()\n" | |||
| " add_custom_command(TARGET ${PROJ_NAME}\n" | |||
| " POST_BUILD\n" | |||
| " COMMAND ar cr ${library_name} *.o\n" | |||
| " COMMAND ranlib ${library_name}\n" | |||
| " COMMAND echo \"new static library ${library_name} size:\"\n" | |||
| " COMMAND ls -lh ${library_name}\n" | |||
| " COMMAND rm -rf tmp && rm -rf *.o\n" | |||
| " COMMENT \"generate specified static library ${library_name}\"\n" | |||
| " )\n" | |||
| "endfunction(create_library)\n" | |||
| "string(CONCAT library_name \"lib\" ${PROJ_NAME} \".a\")\n" | |||
| "create_library()\n"; | |||
| #endif // MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_CMAKE_LISTS_CODE_H_ | |||
| @@ -0,0 +1,40 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_GENERATOR_CONST_BLOCK_LICENSE_INFOS_H | |||
| #define MICRO_GENERATOR_CONST_BLOCK_LICENSE_INFOS_H | |||
| namespace mindspore::lite::micro { | |||
| const char g_hwLicense[] = | |||
| "/**\n" | |||
| " * Copyright 2021 Huawei Technologies Co., Ltd\n" | |||
| " *\n" | |||
| " * Licensed under the Apache License, Version 2.0 (the \"License\");\n" | |||
| " * you may not use this file except in compliance with the License.\n" | |||
| " * You may obtain a copy of the License at\n" | |||
| " *\n" | |||
| " * http://www.apache.org/licenses/LICENSE-2.0\n" | |||
| " *\n" | |||
| " * Unless required by applicable law or agreed to in writing, software\n" | |||
| " * distributed under the License is distributed on an \"AS IS\" BASIS,\n" | |||
| " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" | |||
| " * See the License for the specific language governing permissions and\n" | |||
| " * limitations under the License.\n" | |||
| " */\n\n"; | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MICRO_GENERATOR_CONST_BLOCK_LICENSE_INFOS_H | |||
| @@ -0,0 +1,118 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_MICRO_TENSOR_H_ | |||
| #define MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_MICRO_TENSOR_H_ | |||
| static const char micro_tensor_h[] = | |||
| "/**\n" | |||
| " * Copyright 2019 Huawei Technologies Co., Ltd\n" | |||
| " *\n" | |||
| " * Licensed under the Apache License, Version 2.0 (the \"License\");\n" | |||
| " * you may not use this file except in compliance with the License.\n" | |||
| " * You may obtain a copy of the License at\n" | |||
| " *\n" | |||
| " * http://www.apache.org/licenses/LICENSE-2.0\n" | |||
| " *\n" | |||
| " * Unless required by applicable law or agreed to in writing, software\n" | |||
| " * distributed under the License is distributed on an \"AS IS\" BASIS,\n" | |||
| " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" | |||
| " * See the License for the specific language governing permissions and\n" | |||
| " * limitations under the License.\n" | |||
| " */\n" | |||
| "\n" | |||
| "#ifndef MSMICRO_TENSOR_H\n" | |||
| "#define MSMICRO_TENSOR_H\n" | |||
| "\n" | |||
| "#include <stdlib.h>\n" | |||
| "#include <string.h>\n" | |||
| "#include <stdio.h>\n" | |||
| "#include <stdbool.h>\n" | |||
| "#include <stdint.h>\n" | |||
| "\n" | |||
| "inline bool IsPrint() {\n" | |||
| " char *env = getenv(\"GLOG_v\");\n" | |||
| " if (env == NULL) {\n" | |||
| " return false;\n" | |||
| " }\n" | |||
| " return strcmp(env, \"1\") == 0;\n" | |||
| "}\n" | |||
| "\n" | |||
| "#define MICRO_INFO(content, args...) \\\n" | |||
| " { \\\n" | |||
| " if (IsPrint()) { \\\n" | |||
| " printf(\"[INFO] %s|%d: \" #content \"\\r\\n\", __func__, __LINE__, ##args); \\\n" | |||
| " } \\\n" | |||
| " }\n" | |||
| "#define MICRO_ERROR(content, args...) \\\n" | |||
| " { printf(\"[ERROR] %s|%d: \" #content \"\\r\\n\", __func__, __LINE__, ##args); }\n" | |||
| "\n" | |||
| "enum STATUS {\n" | |||
| " RET_OK = 0,\n" | |||
| " RET_ERROR = 1,\n" | |||
| "};\n" | |||
| "\n" | |||
| "enum DataType {\n" | |||
| " DataType_DT_FLOAT = 0,\n" | |||
| " DataType_DT_FLOAT16 = 1,\n" | |||
| " DataType_DT_INT8 = 2,\n" | |||
| " DataType_DT_INT32 = 3,\n" | |||
| " DataType_DT_UINT8 = 4,\n" | |||
| " DataType_DT_INT16 = 5,\n" | |||
| " DataType_DT_UINT32 = 8,\n" | |||
| " DataType_DT_INT64 = 9,\n" | |||
| " DataType_DT_UINT16 = 10,\n" | |||
| " DataType_DT_UNDEFINED = 16,\n" | |||
| " DataType_MIN = DataType_DT_FLOAT,\n" | |||
| " DataType_MAX = DataType_DT_UNDEFINED\n" | |||
| "};\n" | |||
| "\n" | |||
| "enum Format {\n" | |||
| " Format_NCHW = 0,\n" | |||
| " Format_NHWC = 1,\n" | |||
| " Format_HWKC = 2,\n" | |||
| " Format_HWCK = 3,\n" | |||
| " Format_KCHW = 4,\n" | |||
| " Format_CKHW = 5,\n" | |||
| " Format_KHWC = 6,\n" | |||
| " Format_CHWK = 7,\n" | |||
| " Format_NC4HW4 = 100,\n" | |||
| " Format_NUM_OF_FORMAT = 101,\n" | |||
| " Format_MIN = Format_NCHW,\n" | |||
| " Format_MAX = Format_NUM_OF_FORMAT\n" | |||
| "};\n" | |||
| "\n" | |||
| "typedef struct {\n" | |||
| " enum DataType type;\n" | |||
| " enum Format format;\n" | |||
| " int ndim;\n" | |||
| " int *dim;\n" | |||
| " void *data;\n" | |||
| "} MicroTensor;\n" | |||
| "\n" | |||
| "typedef struct {\n" | |||
| " int num;\n" | |||
| " MicroTensor *tensor;\n" | |||
| "} MicroTensorList;\n" | |||
| "\n" | |||
| "typedef struct {\n" | |||
| " float in_scale;\n" | |||
| " float out_scale;\n" | |||
| " int in_zero_point;\n" | |||
| " int out_zero_point;\n" | |||
| "} GraphQuantArgs;\n" | |||
| "\n" | |||
| "#endif // MSMICRO_TENSOR_H\n"; | |||
| #endif // MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_MICRO_TENSOR_H_ | |||
| @@ -0,0 +1,341 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "coder/generator/generator.h" | |||
| #include <sys/stat.h> | |||
| #include <map> | |||
| #include <set> | |||
| #include <fstream> | |||
| #include "coder/generator/utils/generator_utils.h" | |||
| #include "coder/generator/const_blocks/cmake_lists_code.h" | |||
| #include "coder/generator/const_blocks/bench_debug_utils.h" | |||
| #include "coder/generator/const_blocks/bench_load_input.h" | |||
| #include "coder/generator/const_blocks/micro_tensor.h" | |||
| #include "coder/generator/const_blocks/license.h" | |||
| #include "micro/coder/log.h" | |||
| namespace mindspore::lite::micro { | |||
| Generator::Generator(std::unique_ptr<CoderContext> ctx) { | |||
| ctx_ = std::move(ctx); | |||
| this->config_ = Configurator::GetInstance(); | |||
| std::string module_name = config_->module_name(); | |||
| this->net_inc_hfile_ = module_name + ".h"; | |||
| this->net_src_cfile_ = module_name + ".c"; | |||
| this->net_weight_hfile_ = module_name + "_weight.h"; | |||
| this->net_main_cfile_ = module_name + "_benchmark.c"; | |||
| this->net_src_file_path_ = config_->code_path() + "/src/"; | |||
| this->net_inc_file_path_ = config_->code_path() + "/include/"; | |||
| this->net_main_file_path_ = config_->code_path() + "/benchmark/"; | |||
| origin_umask_ = umask(user_umask_); | |||
| MS_LOG(DEBUG) << "origin umask: " << origin_umask_ << ", user umask: " << user_umask_; | |||
| } | |||
| Generator::~Generator() { (void)umask(origin_umask_); } | |||
| int Generator::CodeGraphInOutQuanArgs(std::ofstream &ofs) { | |||
| std::vector<Tensor *> graph_inputs = ctx_->graph_inputs(); | |||
| if (graph_inputs.empty()) { | |||
| MS_LOG(ERROR) << "this graph has no input tensor"; | |||
| return RET_ERROR; | |||
| } | |||
| Tensor *in_tensor = graph_inputs.at(kInputIndex); | |||
| MS_CHECK_PTR(in_tensor); | |||
| std::vector<Tensor *> graph_outputs = ctx_->graph_outputs(); | |||
| if (graph_outputs.empty()) { | |||
| MS_LOG(ERROR) << "this graph has no output tensor"; | |||
| return RET_ERROR; | |||
| } | |||
| Tensor *out_tensor = graph_outputs.at(kOutputIndex); | |||
| MS_CHECK_PTR(out_tensor); | |||
| std::vector<QuantArg> in_quant_args = in_tensor->quant_params(); | |||
| std::vector<QuantArg> out_quant_args = out_tensor->quant_params(); | |||
| if (in_quant_args.empty() || out_quant_args.empty()) { | |||
| MS_LOG(WARNING) << "in_quant_args or out_quant_args is empty"; | |||
| return RET_OK; | |||
| } | |||
| ofs << "GraphQuantArgs " << config_->module_name() << "_GetInOutQuantArgs() {\n" | |||
| << "\t\t" | |||
| << "GraphQuantArgs quan_args = { " << in_quant_args.at(0).scale << ", " << out_quant_args.at(0).scale << ", " | |||
| << in_quant_args.at(0).zeroPoint << ", " << out_quant_args.at(0).zeroPoint << "};\n" | |||
| << "\t\t" | |||
| << "return quan_args;\n" | |||
| << "}\n"; | |||
| return RET_OK; | |||
| } | |||
| int Generator::CodeNetFileInputOutput(std::ofstream &ofs) { | |||
| // input tensors | |||
| ofs << "\n// set input tensors\n"; | |||
| std::vector<Tensor *> inputs = ctx_->graph_inputs(); | |||
| for (size_t i = 0; i < inputs.size(); ++i) { | |||
| ofs << "\nstatic const unsigned char *" << ctx_->input_name() + std::to_string(i) << " = 0;\n"; | |||
| } | |||
| size_t size = inputs.size(); | |||
| ofs << "int " << config_->module_name() << "_SetInputs(const void **inputs, int num) {\n" | |||
| << "if (inputs == NULL) {\n" | |||
| "\treturn RET_ERROR;\n" | |||
| "\t}\n" | |||
| << "\tif (num !=" << size << ") { return RET_ERROR;}\n"; | |||
| for (size_t i = 0; i < size; ++i) { | |||
| ofs << "\t" << ctx_->input_name() + std::to_string(i) << " = inputs[" << i << "];\n"; | |||
| } | |||
| ofs << "\treturn RET_OK;\n}\n"; | |||
| // output tensors | |||
| ofs << "\n// output tensors\n"; | |||
| std::vector<Tensor *> outputs = ctx_->graph_outputs(); | |||
| size_t output_num = outputs.size(); | |||
| std::string output_name = ctx_->output_name(); | |||
| ofs << "const MicroTensorList* " << config_->module_name() << "_GetOutputs() {\n" | |||
| << " static MicroTensor " << output_name << "[" << output_num << "] ;\n"; | |||
| if (PrintMicroTensors(ofs, outputs, output_name, ctx_->tensors_map()) != RET_OK) { | |||
| return RET_ERROR; | |||
| } | |||
| ofs << " static MicroTensorList " << config_->module_name() << "_TensorArray;\n" | |||
| << " " << config_->module_name() << "_TensorArray.num = " << output_num << ";\n" | |||
| << " " << config_->module_name() << "_TensorArray.tensor = &" << output_name << "[0];\n" | |||
| << " return &" << config_->module_name() << "_TensorArray; \n}\n"; | |||
| return RET_OK; | |||
| } | |||
| void Generator::CodeNetFileMembuffer(std::ofstream &ofs) { | |||
| // memory buffer | |||
| ofs << "\n// Get MemBuffer Size\n" | |||
| << "unsigned int " << config_->module_name() << "_GetBufferSize() {\n" | |||
| << "\t return " << ctx_->total_buffer_size() << "; \n}\n"; | |||
| ofs << "\n// set Membuffer address\n"; | |||
| ofs << "int " << config_->module_name() << "_SetBuffer( void *buffer) { \n"; | |||
| ofs << "\tif (buffer == NULL) {\n" | |||
| "\t\tMICRO_ERROR(\"memory buffer is NULL\");\n" | |||
| "\t\treturn RET_ERROR;\n" | |||
| "\t}\n"; | |||
| ofs << "\t" << ctx_->buffer_name() | |||
| << "= buffer; \n" | |||
| "\treturn RET_OK;"; | |||
| ofs << "}\n"; | |||
| } | |||
| void Generator::CodeNetFileInclude(std::ofstream &ofs) { | |||
| ofs << g_hwLicense; | |||
| // need copy head file of microtensor ro dst'dirs | |||
| ofs << "#include \"microtensor.h\"\n"; | |||
| // copy debug head files to cmake include files | |||
| ofs << "#include \"" << net_weight_hfile_ << "\"\n" | |||
| << "#include \"" << net_inc_hfile_ << "\"\n"; | |||
| if (config_->debug_mode()) { | |||
| ofs << "#include \"../benchmark/debug_utils.h\"\n"; | |||
| } | |||
| } | |||
| void Generator::CodeNetRunFunc(std::ofstream &ofs) { | |||
| // generate net predict code | |||
| ofs << "void " << config_->module_name() << "_Inference() {\n"; | |||
| if (config_->code_mode() == CodeMode::Code_Android) { | |||
| ofs << "int thread_num = GetCurrentThreadNum(THREAD_POOL_DEFAULT);\n"; | |||
| } | |||
| for (const auto &codeBlock : ctx_->code_blocks()) { | |||
| ofs << "\t{\n"; | |||
| ofs << codeBlock; | |||
| ofs << "\t}\n"; | |||
| } | |||
| ofs << "}\n"; | |||
| } | |||
| int Generator::CodeTestCMakeFile() { | |||
| std::string net_main_cmake_file_path = net_main_file_path_; | |||
| std::string test_cmake_file = net_main_cmake_file_path + "benchmark.cmake"; | |||
| std::ofstream of(test_cmake_file); | |||
| if (of.bad()) { | |||
| MS_LOG(ERROR) << "open file error " << test_cmake_file; | |||
| return RET_ERROR; | |||
| } | |||
| MS_LOG(INFO) << "write " << test_cmake_file; | |||
| of << "include_directories(${CMAKE_CURRENT_SOURCE_DIR})\n"; | |||
| of << "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)\n"; | |||
| of << "set(SRC_FILES\n"; | |||
| of << "\t\t" << config_->module_name() + "_benchmark.c\n"; | |||
| of << "\t\tload_input.c\n"; | |||
| of << "\t\tdebug_utils.c\n"; | |||
| of << ")\n"; | |||
| of.close(); | |||
| return RET_OK; | |||
| } | |||
| int Generator::CodeCMakeExecutableFile(std::ofstream &ofs) const { | |||
| ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)\n"; | |||
| if (config_->target() == kARM32M) { | |||
| IncludeCmsisDirectories(ofs); | |||
| } | |||
| ofs << "set(OP_SRC\n"; | |||
| for (const std::string &c_file : ctx_->c_files()) { | |||
| ofs << " " << c_file << ".o\n"; | |||
| } | |||
| ofs << " " << config_->module_name() << "_weight.c.o\n"; | |||
| ofs << " " << config_->module_name() << ".c.o\n"; | |||
| ofs << ")\n"; | |||
| std::set<std::string> kernel_cmake_asm_set_files = ctx_->asm_files(); | |||
| if (!kernel_cmake_asm_set_files.empty()) { | |||
| ofs << "set(ASSEMBLY_SRC\n"; | |||
| for (const std::string &asm_file : kernel_cmake_asm_set_files) { | |||
| ofs << " " << asm_file << ".o\n"; | |||
| } | |||
| ofs << ")\n"; | |||
| ofs << "set_property(SOURCE ${ASSEMBLY_SRC} PROPERTY LANGUAGE C)\n"; | |||
| ofs << "list(APPEND OP_SRC ${ASSEMBLY_SRC})\n"; | |||
| } | |||
| ofs << "file(GLOB NET_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.c)\n"; | |||
| ofs << "add_library(${PROJ_NAME} STATIC ${NET_SRC})\n"; | |||
| return RET_OK; | |||
| } | |||
| int Generator::CodeCMakeFile() { | |||
| std::string src_cmake_file = net_src_file_path_ + cmake_file_name_; | |||
| std::ofstream of(src_cmake_file); | |||
| if (of.bad()) { | |||
| MS_LOG(ERROR) << "open file error " << src_cmake_file; | |||
| return RET_ERROR; | |||
| } | |||
| MS_LOG(INFO) << "write " << src_cmake_file.c_str(); | |||
| if (CodeCMakeExecutableFile(of) != RET_OK) { | |||
| of.close(); | |||
| return RET_ERROR; | |||
| } | |||
| of.close(); | |||
| return RET_OK; | |||
| } | |||
| int Generator::CodeStaticContent() { | |||
| const std::vector<std::pair<std::string, std::string>> static_blocks = { | |||
| {net_inc_file_path_ + "microtensor.h", micro_tensor_h}, | |||
| {net_src_file_path_ + "CMakeLists.txt", src_cmake_lists_txt}, | |||
| {net_main_file_path_ + "debug_utils.h", debug_utils_h}, | |||
| {net_main_file_path_ + "debug_utils.c", debug_utils_c}, | |||
| {net_main_file_path_ + "load_input.h", load_input_h}, | |||
| {net_main_file_path_ + "load_input.c", load_input_c}, | |||
| {net_main_file_path_ + "CMakeLists.txt", bench_cmake_lists_txt}}; | |||
| for (const auto &static_block : static_blocks) { | |||
| std::string file_name = static_block.first; | |||
| std::string content = static_block.second; | |||
| if (WriteContentToFile(file_name, content) != RET_OK) { | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| return RET_OK; | |||
| } | |||
| void Generator::CodeWeightInitFunc(const std::map<std::string, Tensor *> &address_map, std::ofstream &ofs) { | |||
| ofs << "int " << config_->module_name() << "_Init(void *weight_buffer, int weight_size) {\n" | |||
| << "\tif (weight_buffer == NULL) {\n" | |||
| "\t\tMICRO_ERROR(\"weight buffer is NULL\");\n" | |||
| << "\t\treturn RET_ERROR;\n" | |||
| << "\t}\n"; | |||
| CodeReadModelParams(ctx_->saved_weights(), ctx_->tensors_map(), ofs); | |||
| for (const auto &block : ctx_->init_contents()) { | |||
| ofs << "{\n" << block << "\n}\n"; | |||
| } | |||
| ofs << "return RET_OK;"; | |||
| ofs << "}\n\n"; | |||
| } | |||
| void Generator::CodeFreeResource(const std::map<std::string, Tensor *> &address_map, std::ofstream &ofs) const { | |||
| ofs << "\tvoid *allocated[] = {"; | |||
| size_t num = 0; | |||
| for (const auto &item : address_map) { | |||
| std::string name = item.first; | |||
| Tensor *tensor = item.second; | |||
| if (tensor->data_c() != nullptr && tensor->category() != Tensor::Category::CONST_TENSOR) { | |||
| ofs << name << ", "; | |||
| num++; | |||
| } | |||
| } | |||
| ofs << "\t};\n"; | |||
| ofs << "\tfor (int i = 0; i < " << num << "; ++i) {\n"; | |||
| ofs << "\t\tfree(allocated[i]);\n"; | |||
| ofs << "\t\tallocated[i] = NULL;\n"; | |||
| ofs << "\t}\n"; | |||
| } | |||
| int Generator::CodeWeightFile() { | |||
| // weight header file | |||
| std::string hfile = net_src_file_path_ + net_weight_hfile_; | |||
| std::ofstream hofs(hfile); | |||
| if (hofs.bad()) { | |||
| MS_LOG(ERROR) << "open file error" << hfile; | |||
| return RET_ERROR; | |||
| } | |||
| hofs << g_hwLicense; | |||
| for (const auto &h_file : ctx_->h_files()) { | |||
| hofs << "#include \"" << h_file << "\"\n"; | |||
| } | |||
| hofs << "#include <stdlib.h>\n"; | |||
| hofs << "#include <string.h>\n"; | |||
| hofs << "#include \"microtensor.h\"\n\n"; | |||
| hofs << "extern unsigned char *" << ctx_->buffer_name() << ";\n"; | |||
| // weight source file | |||
| std::string cfile = net_src_file_path_ + config_->module_name() + "_weight.c"; | |||
| std::ofstream cofs(cfile); | |||
| if (cofs.bad()) { | |||
| MS_LOG(ERROR) << "open file error" << cfile; | |||
| return RET_ERROR; | |||
| } | |||
| cofs << g_hwLicense; | |||
| cofs << "#include \"" << net_weight_hfile_ << "\"\n\n"; | |||
| cofs << "unsigned char * " << ctx_->buffer_name() << " = 0 ; \n"; | |||
| // reverse key and value of tensors_map | |||
| std::map<std::string, Tensor *> address_map; | |||
| for (const auto &item : ctx_->tensors_map()) { | |||
| address_map.insert(std::make_pair(item.second, item.first)); | |||
| } | |||
| if (config_->is_weight_file()) { | |||
| std::string net_file = net_src_file_path_ + config_->module_name() + ".net"; | |||
| if (SaveDataToNet(ctx_->saved_weights(), net_file) != RET_OK) { | |||
| hofs.close(); | |||
| cofs.close(); | |||
| return RET_ERROR; | |||
| } | |||
| CodeModelParamsDefine(address_map, hofs, cofs); | |||
| CodeWeightInitFunc(address_map, cofs); | |||
| } else { | |||
| CodeModelParamsDefineAndData(ctx_->saved_weights(), hofs, cofs); | |||
| } | |||
| hofs.close(); | |||
| cofs.close(); | |||
| return RET_OK; | |||
| } | |||
| int Generator::GenerateCode() { | |||
| MS_CHECK_RET_CODE(CodeNetHFile(), "code net h file failed."); | |||
| MS_CHECK_RET_CODE(CodeNetCFile(), "code net c file failed."); | |||
| MS_CHECK_RET_CODE(CodeWeightFile(), "code weight file failed."); | |||
| MS_CHECK_RET_CODE(CodeCMakeFile(), "code net cmake file failed."); | |||
| MS_CHECK_RET_CODE(CodeTestFile(), "code test file failed."); | |||
| MS_CHECK_RET_CODE(CodeTestCMakeFile(), "code test cmake file failed."); | |||
| MS_CHECK_RET_CODE(CodeStaticContent(), "code static content failed."); | |||
| return RET_OK; | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,87 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_MICRO_CODER_GENERATOR_H_ | |||
| #define MINDSPORE_MICRO_CODER_GENERATOR_H_ | |||
| #include <sys/stat.h> | |||
| #include <fstream> | |||
| #include <functional> | |||
| #include <map> | |||
| #include <memory> | |||
| #include <sstream> | |||
| #include <string> | |||
| #include <utility> | |||
| #include <vector> | |||
| #include "include/errorcode.h" | |||
| #include "src/tensor.h" | |||
| #include "coder/coder_config.h" | |||
| #include "coder/coder_context.h" | |||
| #include "coder/utils/print_utils.h" | |||
| namespace mindspore::lite::micro { | |||
| constexpr int kWarmUp = 3; | |||
| class Generator { | |||
| public: | |||
| explicit Generator(std::unique_ptr<CoderContext> ctx); | |||
| virtual ~Generator(); | |||
| int GenerateCode(); | |||
| protected: | |||
| virtual int CodeTestFile() = 0; | |||
| virtual int CodeNetHFile() = 0; | |||
| virtual int CodeNetCFile() = 0; | |||
| virtual int CodeCMakeFile(); | |||
| virtual int CodeWeightFile(); | |||
| void CodeNetFileInclude(std::ofstream &ofs); | |||
| int CodeNetFileInputOutput(std::ofstream &ofs); | |||
| void CodeNetFileMembuffer(std::ofstream &ofs); | |||
| void CodeNetRunFunc(std::ofstream &ofs); | |||
| int CodeGraphInOutQuanArgs(std::ofstream &ofs); | |||
| void CodeFreeResource(const std::map<std::string, Tensor *> &address_map, std::ofstream &ofs) const; | |||
| Configurator *config_{nullptr}; | |||
| std::unique_ptr<CoderContext> ctx_{nullptr}; | |||
| bool is_get_quant_args_{false}; | |||
| std::string net_inc_hfile_; | |||
| std::string net_src_cfile_; | |||
| std::string net_main_cfile_; | |||
| std::string net_weight_hfile_; | |||
| std::string net_inc_file_path_; | |||
| std::string net_src_file_path_; | |||
| std::string net_main_file_path_; | |||
| private: | |||
| int CodeTestCMakeFile(); | |||
| int CodeStaticContent(); | |||
| int CodeCMakeExecutableFile(std::ofstream &ofs) const; | |||
| void CodeWeightInitFunc(const std::map<std::string, Tensor *> &address_map, std::ofstream &ofs); | |||
| std::string cmake_file_name_{"net.cmake"}; | |||
| // the user's generated file's permission | |||
| mode_t user_umask_ = 0022; | |||
| // the origin file's permission | |||
| mode_t origin_umask_ = 0000; | |||
| }; | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MINDSPORE_MICRO_CODER_GENERATOR_H_ | |||
| @@ -0,0 +1,300 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "coder/generator/inference/inference_generator.h" | |||
| #include <vector> | |||
| #include <map> | |||
| #include <set> | |||
| #include <string> | |||
| #include "coder/generator/const_blocks/license.h" | |||
| namespace mindspore::lite::micro { | |||
| int InferenceGenerator::CodeNetHFile() { | |||
| std::string net_include_file = net_inc_file_path_ + net_inc_hfile_; | |||
| std::ofstream ofs(net_include_file); | |||
| if (ofs.bad()) { | |||
| MS_LOG(ERROR) << "open file error " << net_include_file.c_str(); | |||
| return RET_ERROR; | |||
| } | |||
| ofs << g_hwLicense; | |||
| if (config_->code_mode() == CodeMode::Code_Android) { | |||
| ofs << "#include \"src/runtime/thread_pool.h\"\n"; | |||
| } | |||
| ofs << "#include \"microtensor.h\"\n\n"; | |||
| ofs << "/**\n" | |||
| << " * set input tensors\n" | |||
| << " * @param inputs, the input data ptr's array of the model, the tensors' count of input may be greater than " | |||
| "one.\n" | |||
| << " * @param num, the input data's number of the model.\n" | |||
| << " **/\n" | |||
| << "int " << config_->module_name() << "_SetInputs(const void **inputs, int num);\n\n"; | |||
| ofs << "/**\n" | |||
| << " * get output tensor of the model \n" | |||
| << " **/\n" | |||
| << "const MicroTensorList *" << config_->module_name() << "_GetOutputs();\n\n"; | |||
| if (is_get_quant_args_) { | |||
| std::vector<Tensor *> graph_inputs = ctx_->graph_inputs(); | |||
| if (graph_inputs.empty()) { | |||
| MS_LOG(ERROR) << "this graph has no input tensor"; | |||
| ofs.close(); | |||
| return RET_ERROR; | |||
| } | |||
| size_t total_input_size = std::accumulate( | |||
| graph_inputs.begin(), graph_inputs.end(), 0UL, | |||
| [](size_t total_input_size, const Tensor *const tensor) { return total_input_size += tensor->Size(); }); | |||
| ofs << "/**\n"; | |||
| ofs << " * get input sizes of the model \n"; | |||
| ofs << " **/\n"; | |||
| ofs << "inline int " << config_->module_name() << "_GetInputSizes() {\n" | |||
| << "\t\t" | |||
| << "return " << total_input_size << ";\n" | |||
| << "}\n\n"; | |||
| ofs << "/**\n"; | |||
| ofs << " * get input and output QuantArgs of the model \n"; | |||
| ofs << " **/\n"; | |||
| ofs << "GraphQuantArgs " << config_->module_name() << "_GetInOutQuantArgs();\n\n"; | |||
| } | |||
| if (config_->is_weight_file()) { | |||
| ofs << "/**\n" | |||
| << " * @param weightBuffer, the ptr of the model's parameters\n" | |||
| << " * @param weightSize, the size of the model's parameters\n" | |||
| << " **/\n" | |||
| << "int " << config_->module_name() << "_Init(void *weightBuffer, int weightSize);\n\n"; | |||
| } | |||
| ofs << "/**\n" | |||
| << " * free the memory of packed weights and model's workspace buffer, input address\n" | |||
| << " **/\n" | |||
| << "void " << config_->module_name() << "_FreeResource();\n"; | |||
| ofs << "/**\n" | |||
| << " * get the memory space size of the inference.\n" | |||
| << " **/\n" | |||
| << "unsigned int " << config_->module_name() << "_GetBufferSize();\n"; | |||
| ofs << "/**\n" | |||
| << " * set the memory space for the inference\n" | |||
| << " **/\n" | |||
| << "int " << config_->module_name() << "_SetBuffer(void *buffer);\n\n"; | |||
| ofs << "/**\n" | |||
| << " * net inference function\n" | |||
| << " **/\n" | |||
| << "void " << config_->module_name() << "_Inference();\n\n"; | |||
| return RET_OK; | |||
| } | |||
| int InferenceGenerator::CodeNetCFile() { | |||
| std::string net_impl_file = net_src_file_path_ + net_src_cfile_; | |||
| std::ofstream ofs(net_impl_file); | |||
| if (ofs.bad()) { | |||
| MS_LOG(ERROR) << "open file error" << net_impl_file.c_str(); | |||
| return RET_ERROR; | |||
| } | |||
| MS_LOG(DEBUG) << "write " << net_impl_file.c_str(); | |||
| CodeNetFileInclude(ofs); | |||
| CodeNetFileMembuffer(ofs); | |||
| if (is_get_quant_args_) { | |||
| if (CodeGraphInOutQuanArgs(ofs) != RET_OK) { | |||
| MS_LOG(ERROR) << "CodeGraphInOutQuanArgs failed"; | |||
| ofs.close(); | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| if (CodeNetFileInputOutput(ofs) != RET_OK) { | |||
| ofs.close(); | |||
| return RET_ERROR; | |||
| } | |||
| ofs << "void " << config_->module_name() << "_FreeResource() {\n"; | |||
| ofs << "\t" << ctx_->buffer_name() << "= NULL;\n"; | |||
| std::vector<Tensor *> inputs = ctx_->graph_inputs(); | |||
| size_t size = inputs.size(); | |||
| for (size_t i = 0; i < size; ++i) { | |||
| ofs << "\t" << ctx_->input_name() + std::to_string(i) << " = NULL;\n"; | |||
| } | |||
| std::map<std::string, Tensor *> address_map; | |||
| for (const auto &item : ctx_->tensors_map()) { | |||
| address_map.insert(std::make_pair(item.second, item.first)); | |||
| } | |||
| if (config_->is_weight_file()) { | |||
| CodeFreeResource(address_map, ofs); | |||
| } | |||
| ofs << "}\n"; | |||
| CodeNetRunFunc(ofs); | |||
| ofs.close(); | |||
| return RET_OK; | |||
| } | |||
| void InferenceGenerator::CodeTestRelevantHeader(std::ofstream &code_test_ofs) { | |||
| code_test_ofs << g_hwLicense; | |||
| code_test_ofs << "#include <stdio.h>\n" | |||
| "#include <string.h>\n" | |||
| "#include <stdlib.h>\n" | |||
| "#include <stdint.h>\n" | |||
| "#include \"microtensor.h\"\n" | |||
| "#include \"load_input.h\"\n" | |||
| "#include \"debug_utils.h\"\n"; | |||
| code_test_ofs << "#include \"" << net_inc_hfile_ << "\"\n"; | |||
| code_test_ofs << "/**\n" | |||
| " * mindspore micro params usage:\n" | |||
| " * args[0]: executable file\n" | |||
| " * args[1]: inputs .bin file\n" | |||
| " * args[2]: model weight .net file\n" | |||
| " * args[3]: loop count for performance testing\n" | |||
| " * args[4]: runtime thread num\n" | |||
| " * args[5]: runtime thread bind mode\n" | |||
| " */\n"; | |||
| code_test_ofs << "\n// Warm up. \n" | |||
| << "void " << config_->module_name() << "_WarmUp() {\n" | |||
| << "\tfor (int i = 0; i < " << kWarmUp << "; ++i) {\n" | |||
| << "\t\t" << config_->module_name() << "_Inference();\n" | |||
| << "\t}\n" | |||
| << "}\n"; | |||
| } | |||
| void InferenceGenerator::CodeTestRelevantTile(std::ofstream &code_test_ofs) { | |||
| if (config_->code_mode() == Code_Android) { | |||
| code_test_ofs << " DestroyThreadPool(THREAD_POOL_DEFAULT);\n"; | |||
| } | |||
| code_test_ofs << " // print model outputs \n"; | |||
| code_test_ofs << " const MicroTensorList *outs = " << config_->module_name() << "_GetOutputs();\n"; | |||
| code_test_ofs << " for (int i = 0; i < outs->num; ++i) {\n" | |||
| " MicroTensor *tensor = outs->tensor + i;\n" | |||
| " PrintTensorData(tensor);\n" | |||
| " }\n"; | |||
| code_test_ofs << " printf(\"" << config_->module_name() << " inference End.\\n\");\n"; | |||
| code_test_ofs << " free(buffer);\n"; | |||
| code_test_ofs << " " << config_->module_name() << "_FreeResource();\n"; | |||
| std::vector<Tensor *> inputs = ctx_->graph_inputs(); | |||
| size_t inputs_num = inputs.size(); | |||
| code_test_ofs << " // this code_block can be ignore \n"; | |||
| code_test_ofs << " for (int i = 0; i < " << inputs_num | |||
| << "; ++i) {\n" | |||
| " free(inputs_binbuf[i]);\n" | |||
| " }\n"; | |||
| code_test_ofs << " return 0;\n"; | |||
| code_test_ofs << "}\n"; | |||
| } | |||
| int InferenceGenerator::CodeTestFile() { | |||
| std::string net_main_impl_file = net_main_file_path_ + net_main_cfile_; | |||
| std::ofstream ofs(net_main_impl_file); | |||
| if (ofs.bad()) { | |||
| MS_LOG(ERROR) << "open file error " << net_main_impl_file.c_str(); | |||
| return RET_ERROR; | |||
| } | |||
| MS_LOG(INFO) << "write " << net_main_impl_file.c_str(); | |||
| CodeTestRelevantHeader(ofs); | |||
| ofs << "int main(int argc, char **argv) {\n" | |||
| " if (argc < 2) { printf(\"There is not input and out file.\\n\"); }\n"; | |||
| ofs << " printf(\"" << config_->module_name() << " inference Start.\\n\");\n"; | |||
| std::vector<Tensor *> inputs = ctx_->graph_inputs(); | |||
| size_t inputs_num = inputs.size(); | |||
| for (size_t i = 0; i < inputs_num; ++i) { | |||
| Tensor *input = inputs[i]; | |||
| std::vector<int> shape = input->shape(); | |||
| ofs << " // model's input_shape is [ "; | |||
| for (int sh : shape) { | |||
| ofs << sh << ", "; | |||
| } | |||
| ofs << "];\n"; | |||
| } | |||
| ofs << " void *inputs_binbuf[" << inputs_num << "];\n"; | |||
| ofs << " int inputs_size[" << inputs_num << "] = {"; | |||
| for (size_t i = 0; i < inputs_num; ++i) { | |||
| Tensor *input = inputs[i]; | |||
| ofs << input->Size() << ", "; | |||
| } | |||
| ofs << "};\n"; | |||
| ofs << " int ret = ReadInputsFile(argv[1], inputs_binbuf, inputs_size, " << inputs_num | |||
| << ");\n" | |||
| " if (ret != RET_OK) {\n" | |||
| " MICRO_ERROR(\"read inputs file failed\");\n" | |||
| " return RET_ERROR;\n" | |||
| " }\n"; | |||
| ofs << " ret = " << config_->module_name() << "_SetInputs((const void **)inputs_binbuf, " << inputs_num | |||
| << ");\n" | |||
| " if (ret != RET_OK) {\n" | |||
| " MICRO_ERROR(\"set inputs failed\");\n" | |||
| " return RET_ERROR;\n" | |||
| " }\n"; | |||
| ofs << " unsigned int total_buffer_size = " << config_->module_name() << "_GetBufferSize();\n"; | |||
| ofs << " void *buffer = malloc(total_buffer_size);\n"; | |||
| ofs << " if (buffer == NULL ){\n" | |||
| " MICRO_ERROR(\"malloc memory buffer failed\");\n" | |||
| " return RET_ERROR;\n" | |||
| " }\n"; | |||
| ofs << " ret = " << config_->module_name() | |||
| << "_SetBuffer(buffer);\n" | |||
| " if (ret != RET_OK) {\n" | |||
| " MICRO_ERROR(\"set inputs failed\");\n" | |||
| " return RET_ERROR;" | |||
| " }\n"; | |||
| if (config_->is_weight_file()) { | |||
| ofs << " int weightSize = 0;\n"; | |||
| ofs << " void *weightBuffer = ReadInputData(argv[2], &weightSize); \n"; | |||
| ofs << " if(" << config_->module_name() << "_Init(weightBuffer, weightSize) != RET_OK) {\n"; | |||
| ofs << " printf(\"model init failed\");\n"; | |||
| ofs << " " << config_->module_name() << "_FreeResource();\n"; | |||
| ofs << " return RET_ERROR;\n"; | |||
| ofs << " }\n"; | |||
| ofs << " free(weightBuffer);\n"; | |||
| ofs << " weightBuffer = NULL;\n"; | |||
| } | |||
| if (config_->code_mode() == CodeMode::Code_Android) { | |||
| ofs << " int thread_num = 4;\n" | |||
| " BindMode bind_mode = NO_BIND_MODE;\n" | |||
| " if (argc >= 6) {\n" | |||
| " thread_num = atoi(argv[4]);\n" | |||
| " bind_mode = atoi(argv[5]);\n" | |||
| " }\n" | |||
| " ret = ConfigThreadPool(THREAD_POOL_DEFAULT, thread_num, bind_mode);\n" | |||
| " if (ret != 0) {\n" | |||
| " printf(\"create thread pool failed\");\n" | |||
| " }\n"; | |||
| } | |||
| ofs << " if (argc >= 4) {\n" | |||
| << " " << config_->module_name() << "_WarmUp();\n" | |||
| << " uint64_t timeAvg = 0;\n" | |||
| << " int loop_count = atoi(argv[3]);\n" | |||
| << " printf(\"\\n### begin to run %d\", loop_count);\n" | |||
| << " for (int i = 0; i < loop_count; i++) {\n" | |||
| << " uint64_t runBegin = GetTimeUs();\n" | |||
| << " " << config_->module_name() << "_Inference();\n" | |||
| << " uint64_t runEnd = GetTimeUs();\n" | |||
| << " uint64_t time = runEnd - runBegin;\n" | |||
| << " timeAvg += time;\n" | |||
| << " }\n" | |||
| << " float cunCost = (float)timeAvg / 1000.0f;\n" | |||
| << " printf(\"\\n###Run over, total time:\\t %5.5f ms.\\n\", cunCost);\n" | |||
| << " printf(\"\\n###Run over, predict per time:\\t %5.5f ms.\\n\", cunCost / loop_count);\n" | |||
| << " }\n"; | |||
| ofs << " " << config_->module_name() << "_Inference();\n"; | |||
| CodeTestRelevantTile(ofs); | |||
| ofs.close(); | |||
| return RET_OK; | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,41 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_MICRO_CODER_ANDROID_GENERATOR_H_ | |||
| #define MINDSPORE_MICRO_CODER_ANDROID_GENERATOR_H_ | |||
| #include <utility> | |||
| #include <memory> | |||
| #include "micro/coder/generator/generator.h" | |||
| namespace mindspore::lite::micro { | |||
| class InferenceGenerator : public Generator { | |||
| public: | |||
| explicit InferenceGenerator(std::unique_ptr<CoderContext> ctx) : Generator(std::move(ctx)) {} | |||
| ~InferenceGenerator() override = default; | |||
| protected: | |||
| int CodeNetHFile() override; | |||
| int CodeNetCFile() override; | |||
| int CodeTestFile() override; | |||
| private: | |||
| void CodeTestRelevantHeader(std::ofstream &code_test_ofs); | |||
| void CodeTestRelevantTile(std::ofstream &code_test_ofs); | |||
| }; | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MINDSPORE_MICRO_CODER_ANDROID_GENERATOR_H_ | |||
| @@ -0,0 +1,172 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "coder/generator/utils/generator_utils.h" | |||
| #include <map> | |||
| #include <fstream> | |||
| #include <vector> | |||
| #include <utility> | |||
| #include <string> | |||
| #include "include/errorcode.h" | |||
| #include "coder/log.h" | |||
| #include "coder/utils/print_utils.h" | |||
| #include "src/common/file_utils.h" | |||
| namespace mindspore::lite::micro { | |||
| int WriteContentToFile(const std::string &file, const std::string &content) { | |||
| std::ofstream of(file); | |||
| if (of.bad()) { | |||
| MS_LOG(ERROR) << "open file error " << file.c_str(); | |||
| return RET_ERROR; | |||
| } | |||
| MS_LOG(INFO) << "write " << file.c_str(); | |||
| of << content; | |||
| of.close(); | |||
| return RET_OK; | |||
| } | |||
| void CodeReadModelParams(const std::map<std::string, Tensor *> &saved_weights, | |||
| const std::map<Tensor *, std::string> &tensors_map, std::ofstream &ofs) { | |||
| ofs << "\n\tstruct ModelParameter {\n" | |||
| << "\t\tvoid *addr;\n" | |||
| << "\t\tsize_t size;\n" | |||
| << "\t\tsize_t offset;\n" | |||
| << "\t};\n"; | |||
| size_t params_num = 0; | |||
| size_t offset = 0; | |||
| ofs << "\n\tstruct ModelParameter model_params[] = {\n"; | |||
| for (const auto &item : saved_weights) { | |||
| std::string name = item.first; | |||
| Tensor *tensor = item.second; | |||
| if (tensor->category() == Tensor::Category::CONST_TENSOR) { | |||
| auto iter = std::find_if(tensors_map.begin(), tensors_map.end(), | |||
| [&tensor](const std::pair<Tensor *, std::string> &t) { return t.first == tensor; }); | |||
| if (iter != tensors_map.end()) { | |||
| ofs << "\t\t{" << name << ", " << tensor->Size() << ", " << offset << "},\n"; | |||
| params_num++; | |||
| } | |||
| offset += tensor->Size(); | |||
| } | |||
| } | |||
| ofs << "\t};\n"; | |||
| offset = 0; | |||
| for (const auto &item : saved_weights) { | |||
| std::string name = item.first; | |||
| Tensor *tensor = item.second; | |||
| if (tensor->category() == Tensor::Category::CONST_TENSOR) { | |||
| auto iter = std::find_if(tensors_map.begin(), tensors_map.end(), | |||
| [&tensor](const std::pair<Tensor *, std::string> &t) { return t.first == tensor; }); | |||
| if (iter == tensors_map.end()) { | |||
| TypeId data_type = tensor->data_type(); | |||
| ofs << "\t" << GetTensorDataType(data_type) << "*" << name << " = (weight_buffer + " << offset << ");\n"; | |||
| } | |||
| offset += tensor->Size(); | |||
| } | |||
| } | |||
| ofs << "\n"; | |||
| ofs << "\tfor(int i = 0; i < " << params_num << "; ++i) {\n" | |||
| << "\t\tif (model_params[i].offset + model_params[i].size > weight_size) {\n" | |||
| "\t\t\tMICRO_ERROR(\"buffer is invalid, size: %d, offset: %lu\", weight_size, model_params[i].offset);\n" | |||
| "\t\t\treturn RET_ERROR;\n" | |||
| "\t\t}\n" | |||
| << "\t\tmemcpy(model_params[i].addr, (weight_buffer + model_params[i].offset), model_params[i].size);\n" | |||
| << "\t}\n"; | |||
| } | |||
| int SaveDataToNet(const std::map<std::string, Tensor *> &tensors_map, const std::string &net_file) { | |||
| std::ofstream out(net_file, std::ios::out | std::ios::trunc | std::ios::binary); | |||
| MS_CHECK_TRUE(out.is_open(), "net file open failed!"); | |||
| for (auto &item : tensors_map) { | |||
| std::string name = item.first; | |||
| Tensor *tensor = item.second; | |||
| if (tensor->category() == Tensor::Category::CONST_TENSOR) { | |||
| if (tensor->data_c() == nullptr) { | |||
| continue; | |||
| } | |||
| out.write(reinterpret_cast<const char *>(tensor->data_c()), tensor->Size()); | |||
| } | |||
| } | |||
| out.close(); | |||
| return RET_OK; | |||
| } | |||
| void CodeModelParamsDefine(const std::map<std::string, Tensor *> &address_map, std::ofstream &hfile, | |||
| std::ofstream &cfile) { | |||
| for (auto &item : address_map) { | |||
| std::string name = item.first; | |||
| Tensor *tensor = item.second; | |||
| if (tensor->data_c() == nullptr) { | |||
| continue; | |||
| } | |||
| if (tensor->category() == Tensor::Category::CONST_TENSOR) { | |||
| PrintTensorForNet(tensor, cfile, hfile, name); | |||
| } else if (tensor->category() == Tensor::Category::VAR) { | |||
| hfile << "extern " << GetTensorDataType(tensor->data_type()) << " *" << name << ";\n"; | |||
| cfile << GetTensorDataType(tensor->data_type()) << "*" << name << " = NULL;\n"; | |||
| } | |||
| } | |||
| cfile << "\n"; | |||
| } | |||
| void CodeModelParamsDefineAndData(const std::map<std::string, Tensor *> &address_map, std::ofstream &hfile, | |||
| std::ofstream &cfile) { | |||
| for (auto &item : address_map) { | |||
| std::string name = item.first; | |||
| Tensor *tensor = item.second; | |||
| if (tensor->category() == Tensor::Category::CONST_TENSOR) { | |||
| if (tensor->data_c() == nullptr) { | |||
| continue; | |||
| } | |||
| PrintTensor(tensor, cfile, hfile, name); | |||
| } | |||
| } | |||
| } | |||
| int PrintMicroTensors(std::ofstream &ofs, std::vector<Tensor *> tensors, const std::string &name, | |||
| const std::map<Tensor *, std::string> &tensors_map) { | |||
| for (size_t index = 0; index < tensors.size(); ++index) { | |||
| Tensor *tensor = tensors[index]; | |||
| auto item = tensors_map.find(tensor); | |||
| if (item == tensors_map.end()) { | |||
| MS_LOG(ERROR) << "nonexistent tensor"; | |||
| return RET_ERROR; | |||
| } | |||
| ofs << " static int dim[] = {"; | |||
| for (size_t i = 0; i < tensor->shape().size(); ++i) { | |||
| ofs << tensor->shape()[i] << ", "; | |||
| } | |||
| ofs << "};\n"; | |||
| ofs << " " << name << "[" << index << "].ndim = " << tensor->shape().size() << ";\n"; | |||
| ofs << " " << name << "[" << index << "].dim = dim;\n"; | |||
| ofs << " " << name << "[" << index << "].type = " << GetMicroTensorDataType(tensor->data_type()) << ";\n"; | |||
| ofs << " " << name << "[" << index << "].format = " << std::to_string(tensor->format()) << ";\n"; | |||
| ofs << " " << name << "[" << index << "].data =" << item->second << ";\n"; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| void IncludeCmsisDirectories(std::ofstream &ofs) { | |||
| ofs << "include_directories(${OP_HEADER_PATH}/cmsis)\n"; | |||
| ofs << "include_directories(${OP_HEADER_PATH}/cmsis/CMSIS/NN/Include)\n"; | |||
| ofs << "include_directories(${OP_HEADER_PATH}/cmsis/CMSIS/DSP/Include)\n"; | |||
| ofs << "include_directories(${OP_HEADER_PATH}/cmsis/CMSIS/Core/Include)\n"; | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,47 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_MICRO_CODER_GENERATOR_GENERATOR_UTILS_H_ | |||
| #define MINDSPORE_MICRO_CODER_GENERATOR_GENERATOR_UTILS_H_ | |||
| #include <map> | |||
| #include <string> | |||
| #include <vector> | |||
| #include "src/tensor.h" | |||
| namespace mindspore::lite::micro { | |||
| int WriteContentToFile(const std::string &file, const std::string &content); | |||
| void CodeReadModelParams(const std::map<std::string, Tensor *> &saved_weights, | |||
| const std::map<Tensor *, std::string> &tensors_map, std::ofstream &ofs); | |||
| int SaveDataToNet(const std::map<std::string, Tensor *> &tensors_map, const std::string &net_file); | |||
| void CodeModelParamsDefine(const std::map<std::string, Tensor *> &address_map, std::ofstream &hfile, | |||
| std::ofstream &cfile); | |||
| void CodeModelParamsDefineAndData(const std::map<std::string, Tensor *> &address_map, std::ofstream &hfile, | |||
| std::ofstream &cfile); | |||
| int PrintMicroTensors(std::ofstream &ofs, std::vector<Tensor *> tensors, const std::string &name, | |||
| const std::map<Tensor *, std::string> &tensors_map); | |||
| void IncludeCmsisDirectories(std::ofstream &ofs); | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MINDSPORE_MICRO_CODER_GENERATOR_GENERATOR_UTILS_H_ | |||
| @@ -0,0 +1,105 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_LOG_H_ | |||
| #define MICRO_LOG_H_ | |||
| #include "src/common/log_adapter.h" | |||
| #include "include/errorcode.h" | |||
| #define MS_CHECK_PTR(ptr) \ | |||
| do { \ | |||
| if ((ptr) == nullptr) { \ | |||
| MS_LOG(ERROR) << ": The pointer[" << #ptr << "] is null."; \ | |||
| return mindspore::lite::RET_ERROR; \ | |||
| } \ | |||
| } while (0) | |||
| #define MS_CHECK_PTR_WITH_EXE(ptr, FUNC) \ | |||
| do { \ | |||
| if ((ptr) == nullptr) { \ | |||
| MS_LOG(ERROR) << ": The pointer[" << #ptr << "] is null."; \ | |||
| FUNC; \ | |||
| return mindspore::lite::RET_ERROR; \ | |||
| } \ | |||
| } while (0) | |||
| #define MS_CHECK_PTR_RET_NULL(ptr) \ | |||
| do { \ | |||
| if ((ptr) == nullptr) { \ | |||
| MS_LOG(ERROR) << ": The pointer[" << #ptr << "] is null."; \ | |||
| return nullptr; \ | |||
| } \ | |||
| } while (0) | |||
| #define MS_CHECK_PTR_IF_NULL(ptr) \ | |||
| do { \ | |||
| if ((ptr) == nullptr) { \ | |||
| MS_LOG(ERROR) << ": The pointer[" << #ptr << "] is null."; \ | |||
| return; \ | |||
| } \ | |||
| } while (0) | |||
| #define MS_CHECK_RET_CODE(code, msg) \ | |||
| do { \ | |||
| if ((code) != RET_OK) { \ | |||
| MS_LOG(ERROR) << msg; \ | |||
| return mindspore::lite::RET_ERROR; \ | |||
| } \ | |||
| } while (0) | |||
| #define MS_CHECK_RET_CODE_WITH_EXE(code, msg, FUNC) \ | |||
| do { \ | |||
| if ((code) != RET_OK) { \ | |||
| MS_LOG(ERROR) << msg; \ | |||
| FUNC; \ | |||
| return mindspore::lite::RET_ERROR; \ | |||
| } \ | |||
| } while (0) | |||
| #define MS_CHECK_RET_CODE_RET_NULL(code, msg) \ | |||
| do { \ | |||
| if ((code) != RET_OK) { \ | |||
| MS_LOG(ERROR) << msg; \ | |||
| return nullptr; \ | |||
| } \ | |||
| } while (0) | |||
| #define MS_CHECK_TRUE(code, msg) \ | |||
| do { \ | |||
| if (!(code)) { \ | |||
| MS_LOG(ERROR) << msg; \ | |||
| return mindspore::lite::RET_ERROR; \ | |||
| } \ | |||
| } while (0) | |||
| #define MS_CHECK_TRUE_RET_NULL(code, msg) \ | |||
| do { \ | |||
| if (!(code)) { \ | |||
| MS_LOG(ERROR) << msg; \ | |||
| return nullptr; \ | |||
| } \ | |||
| } while (0) | |||
| #define MS_CHECK_TRUE_RET_BOOL(code, msg) \ | |||
| do { \ | |||
| if (!(code)) { \ | |||
| MS_LOG(ERROR) << msg; \ | |||
| return false; \ | |||
| } \ | |||
| } while (0) | |||
| #endif // MICRO_LOG_H_ | |||
| @@ -0,0 +1,17 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "coder/coder.h" | |||
| int main(int argc, const char **argv) { return mindspore::lite::micro::RunCoder(argc, argv); } | |||
| @@ -0,0 +1,89 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include <set> | |||
| #include <vector> | |||
| #include <string> | |||
| #include "micro/coder/opcoders/file_collector.h" | |||
| namespace mindspore::lite::micro { | |||
| class Collector { | |||
| public: | |||
| explicit Collector(CoderContext *const ctx) : ctx_(ctx) {} | |||
| virtual ~Collector() = default; | |||
| virtual void operator+=(std::string file) = 0; | |||
| protected: | |||
| CoderContext *const ctx_{nullptr}; | |||
| }; | |||
| class HFileCollector : public Collector { | |||
| public: | |||
| HFileCollector() = delete; | |||
| explicit HFileCollector(CoderContext *const ctx) : Collector(ctx) {} | |||
| void operator+=(std::string file) override { this->files_.insert(file); } | |||
| ~HFileCollector() override { this->ctx_->set_h_files(files_); } | |||
| private: | |||
| std::set<std::string> files_; | |||
| }; | |||
| class CFileCollector : public Collector { | |||
| public: | |||
| CFileCollector() = delete; | |||
| explicit CFileCollector(CoderContext *const ctx) : Collector(ctx) {} | |||
| void operator+=(std::string file) override { this->files_.insert(file); } | |||
| ~CFileCollector() override { this->ctx_->set_c_files(this->files_); } | |||
| private: | |||
| std::set<std::string> files_; | |||
| }; | |||
| class ASMFileCollector : public Collector { | |||
| public: | |||
| ASMFileCollector() = delete; | |||
| explicit ASMFileCollector(CoderContext *const ctx) : Collector(ctx) {} | |||
| void operator+=(std::string file) override { this->files_.insert(file); } | |||
| ~ASMFileCollector() override { this->ctx_->set_asm_files(this->files_); } | |||
| private: | |||
| std::set<std::string> files_; | |||
| }; | |||
| void Collect(CoderContext *const ctx, const std::vector<std::string> &headers, const std::vector<std::string> &cFiles, | |||
| const std::vector<std::string> &asmFiles) { | |||
| auto collect = [](Collector &cc, const std::vector<std::string> &content) { | |||
| std::for_each(content.begin(), content.end(), [&cc](const std::string &s) { cc += s; }); | |||
| }; | |||
| HFileCollector hc(ctx); | |||
| collect(hc, headers); | |||
| CFileCollector cc(ctx); | |||
| collect(cc, cFiles); | |||
| ASMFileCollector ac(ctx); | |||
| collect(ac, asmFiles); | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,30 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_MICRO_CODER_FILE_COLLECTOR_H_ | |||
| #define MINDSPORE_LITE_MICRO_CODER_FILE_COLLECTOR_H_ | |||
| #include <string> | |||
| #include <vector> | |||
| #include "micro/coder/coder_context.h" | |||
| namespace mindspore::lite::micro { | |||
| void Collect(CoderContext *const ctx, const std::vector<std::string> &headers, | |||
| const std::vector<std::string> &cFiles = {}, const std::vector<std::string> &asmFiles = {}); | |||
| } | |||
| // namespace mindspore::lite::micro | |||
| #endif // MINDSPORE_LITE_MICRO_CODER_FILE_COLLECTOR_H_ | |||
| @@ -0,0 +1,74 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include <vector> | |||
| #include "micro/coder/opcoders/op_coder.h" | |||
| namespace mindspore::lite::micro { | |||
| OperatorCoder::~OperatorCoder() { | |||
| node_ = nullptr; | |||
| if (parameter_ != nullptr) { | |||
| free(parameter_); | |||
| } | |||
| } | |||
| const std::vector<Tensor *> OperatorCoder::input_tensors() const { return input_tensors_; } | |||
| const std::vector<Tensor *> OperatorCoder::output_tensors() const { return output_tensors_; } | |||
| void OperatorCoder::set_input_tensor_indices(const std::vector<uint32_t> *input_indices) { | |||
| input_tensor_indices_ = *input_indices; | |||
| } | |||
| void OperatorCoder::set_output_tensor_indices(const std::vector<uint32_t> *output_indices) { | |||
| output_tensor_indices_ = *output_indices; | |||
| } | |||
| const std::vector<uint32_t> OperatorCoder::input_tensor_indices() const { return input_tensor_indices_; } | |||
| const std::vector<uint32_t> OperatorCoder::output_tensor_indices() const { return output_tensor_indices_; } | |||
| void OperatorCoder::AddInputNodeIndex(size_t input_node_index) { | |||
| if (!input_node_set_indices_.count(input_node_index)) { | |||
| input_node_set_indices_.insert(input_node_index); | |||
| input_node_indices_.push_back(input_node_index); | |||
| } | |||
| } | |||
| void OperatorCoder::AddOutputNodeIndex(size_t output_node_index) { | |||
| if (!output_node_set_indices_.count(output_node_index)) { | |||
| output_node_set_indices_.insert(output_node_index); | |||
| output_node_indices_.push_back(output_node_index); | |||
| } | |||
| } | |||
| const std::vector<size_t> OperatorCoder::input_node_indices() const { return this->input_node_indices_; } | |||
| const std::vector<size_t> OperatorCoder::output_node_indices() const { return this->output_node_indices_; } | |||
| void OperatorCoder::set_parameter(OpParameter *parameter) { this->parameter_ = parameter; } | |||
| size_t OperatorCoder::node_index() const { return node_index_; } | |||
| void OperatorCoder::set_thread_num(int thread_num) { | |||
| if (thread_num == 4) { | |||
| this->thread_num_ = thread_num; | |||
| this->thread_num_s_ = "thread_num"; | |||
| return; | |||
| } else { | |||
| return; | |||
| } | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,128 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_MICRO_CODER_OPCODER_H_ | |||
| #define MINDSPORE_LITE_MICRO_CODER_OPCODER_H_ | |||
| #include <vector> | |||
| #include <set> | |||
| #include <string> | |||
| #include <memory> | |||
| #include "coder/coder_context.h" | |||
| #include "coder/coder_graph.h" | |||
| #include "coder/allocator/allocator.h" | |||
| #include "include/errorcode.h" | |||
| #include "src/lite_kernel.h" | |||
| #include "securec/include/securec.h" | |||
| #include "opcoders/op_coder_register.h" | |||
| #include "micro/coder/log.h" | |||
| namespace mindspore::lite::micro { | |||
| class CoderContext; | |||
| constexpr int kPrecision = 19; | |||
| #define CODE_PARALLEL_FUNC(func) code << "ParallelLaunch(THREAD_POOL_DEFAULT, " << func << ", &args, thread_num);\n" | |||
| class OperatorCoder { | |||
| public: | |||
| OperatorCoder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors, | |||
| const Model::Node *node, size_t node_index, Target target) | |||
| : input_tensors_(in_tensors), | |||
| output_tensors_(out_tensors), | |||
| node_(node), | |||
| target_(target), | |||
| node_index_(node_index) { | |||
| allocator_ = MemoryAllocator::GetInstance(); | |||
| // vectors checked not empty in OpCoderBuilder::build | |||
| input_tensor_ = input_tensors_.at(kInputIndex); | |||
| output_tensor_ = output_tensors_.at(kOutputIndex); | |||
| } | |||
| std::string ID() const { return node_->name_; } | |||
| void set_input_tensor_indices(const std::vector<uint32_t> *input_indices); | |||
| void set_output_tensor_indices(const std::vector<uint32_t> *output_indices); | |||
| const std::vector<uint32_t> input_tensor_indices() const; | |||
| const std::vector<uint32_t> output_tensor_indices() const; | |||
| void AddInputNodeIndex(size_t input_node_index); | |||
| void AddOutputNodeIndex(size_t output_node_index); | |||
| const std::vector<size_t> input_node_indices() const; | |||
| const std::vector<size_t> output_node_indices() const; | |||
| size_t node_index() const; | |||
| void set_parameter(OpParameter *parameter); | |||
| const std::vector<Tensor *> input_tensors() const; | |||
| const std::vector<Tensor *> output_tensors() const; | |||
| const PrimitiveC *primitive() const { return node_->primitive_; } | |||
| const Model::Node *node() const { return this->node_; } | |||
| void AddInitialParameters(Tensor *parameter) { initial_parameters_.push_back(parameter); } | |||
| const std::vector<Tensor *> initial_parameters() const { return initial_parameters_; } | |||
| // context | |||
| virtual int Prepare(CoderContext *const context) = 0; | |||
| virtual int DoCode(CoderContext *const context) = 0; | |||
| virtual ~OperatorCoder(); | |||
| void set_thread_num(int thread_num); | |||
| protected: | |||
| std::vector<Tensor *> input_tensors_; | |||
| std::vector<Tensor *> output_tensors_; | |||
| const Model::Node *node_{nullptr}; | |||
| Target target_{kTargetUnknown}; | |||
| Tensor *input_tensor_{nullptr}; | |||
| Tensor *output_tensor_{nullptr}; | |||
| OpParameter *parameter_{nullptr}; | |||
| MemoryAllocator *allocator_{nullptr}; | |||
| std::string thread_num_s_{"1"}; | |||
| int thread_num_{1}; | |||
| private: | |||
| size_t node_index_{0}; | |||
| std::vector<uint32_t> input_tensor_indices_; | |||
| std::vector<uint32_t> output_tensor_indices_; | |||
| std::vector<size_t> input_node_indices_; | |||
| std::vector<size_t> output_node_indices_; | |||
| std::set<size_t> input_node_set_indices_; | |||
| std::set<size_t> output_node_set_indices_; | |||
| std::vector<std::unique_ptr<OperatorCoder>> input_nodes_; | |||
| std::vector<std::unique_ptr<OperatorCoder>> output_nodes_; | |||
| std::vector<Tensor *> initial_parameters_; | |||
| }; | |||
| // a template func for normal op_coder creator | |||
| template <typename T> | |||
| std::unique_ptr<OperatorCoder> CPUOpCoderCreator(const std::vector<Tensor *> &in_tensors, | |||
| const std::vector<Tensor *> &out_tensors, const Model::Node *node, | |||
| size_t node_index, Target target) { | |||
| std::unique_ptr<T> coder = std::make_unique<T>(in_tensors, out_tensors, node, node_index, target); | |||
| return coder; | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MINDSPORE_LITE_MICRO_CODER_OPCODER_H_ | |||
| @@ -0,0 +1,113 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "micro/coder/opcoders/op_coder_builder.h" | |||
| #include <vector> | |||
| #include <memory> | |||
| #include "micro/coder/allocator/allocator.h" | |||
| #include "src/ops/populate/populate_register.h" | |||
| namespace mindspore::lite::micro { | |||
| constexpr int kMAX_THREAD_NUM_SUPPORT = 4; | |||
| std::unique_ptr<OperatorCoder> OpCoderBuilder::build() { | |||
| if (node_->primitive_ == nullptr) { | |||
| return nullptr; | |||
| } | |||
| auto primitive_type = static_cast<schema::PrimitiveType>(node_->primitive_->Type()); | |||
| CoderKey coder_key(target_, data_type_, primitive_type); | |||
| CoderCreatorFunc creator_func = OpCoderFactory::GetInstance()->FindOpCoder(coder_key); | |||
| if (creator_func == nullptr) { | |||
| MS_LOG(ERROR) << "coderFactor create a null op_coder: " << node_->name_ << " primitive type: " | |||
| << mindspore::schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive_type)) | |||
| << " code_target: " << target_ << " data_type: " << EnumNameDataType(data_type_); | |||
| return nullptr; | |||
| } | |||
| if (inputs_.empty() || outputs_.empty()) { | |||
| MS_LOG(ERROR) << "coderFactor create a null op_coder: " << node_->name_ << " primitive type: " | |||
| << mindspore::schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive_type)) | |||
| << " code_target: " << target_ << " data_type: " << EnumNameDataType(data_type_); | |||
| MS_LOG(ERROR) << "input tensors or output tensors are empty"; | |||
| return nullptr; | |||
| } else { | |||
| MS_CHECK_PTR_RET_NULL(inputs_.at(kInputIndex)); | |||
| MS_CHECK_PTR_RET_NULL(outputs_.at(kOutputIndex)); | |||
| } | |||
| std::unique_ptr<OperatorCoder> op_coder = creator_func(inputs_, outputs_, node_, node_index_, target_); | |||
| if (!op_coder) { | |||
| MS_LOG(ERROR) << "coderFactor create a null op_coder: " << node_->name_ << " primitive type: " | |||
| << mindspore::schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive_type)) | |||
| << " code_target: " << target_ << " data_type: " << EnumNameDataType(data_type_); | |||
| return op_coder; | |||
| } | |||
| OpParameter *parameter = | |||
| PopulateRegistry::GetInstance()->GetParameterCreator((schema::PrimitiveType(primitive_type)))(node_->primitive_); | |||
| if (parameter == nullptr) { | |||
| MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " | |||
| << schema::EnumNamePrimitiveType((schema::PrimitiveType)(primitive_type)); | |||
| return nullptr; | |||
| } | |||
| op_coder->set_input_tensor_indices(&input_indices_); | |||
| op_coder->set_output_tensor_indices(&output_indices_); | |||
| int thread_num = this->mode_ == CodeMode::Code_Android ? kMAX_THREAD_NUM_SUPPORT : 1; | |||
| op_coder->set_thread_num(thread_num); | |||
| parameter->thread_num_ = thread_num; | |||
| op_coder->set_parameter(parameter); | |||
| return op_coder; | |||
| } | |||
| OpCoderBuilder &OpCoderBuilder::inputs(const std::vector<Tensor *> &inputs) { | |||
| this->inputs_ = inputs; | |||
| return *this; | |||
| } | |||
| OpCoderBuilder &OpCoderBuilder::outputs(const std::vector<Tensor *> &outputs) { | |||
| this->outputs_ = outputs; | |||
| return *this; | |||
| } | |||
| OpCoderBuilder &OpCoderBuilder::node(const Model::Node *node) { | |||
| this->node_ = node; | |||
| return *this; | |||
| } | |||
| OpCoderBuilder &OpCoderBuilder::data_type(TypeId data_type) { | |||
| this->data_type_ = data_type; | |||
| return *this; | |||
| } | |||
| OpCoderBuilder &OpCoderBuilder::mode(CodeMode mode) { | |||
| this->mode_ = mode; | |||
| return *this; | |||
| } | |||
| OpCoderBuilder &OpCoderBuilder::input_indices(const std::vector<uint32_t> &indices) { | |||
| this->input_indices_ = indices; | |||
| return *this; | |||
| } | |||
| OpCoderBuilder &OpCoderBuilder::output_indices(const std::vector<uint32_t> &indices) { | |||
| this->output_indices_ = indices; | |||
| return *this; | |||
| } | |||
| OpCoderBuilder &OpCoderBuilder::target(Target target) { | |||
| this->target_ = target; | |||
| return *this; | |||
| } | |||
| void OpCoderBuilder::Reset() {} | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,69 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_MICRO_OPCODERS_OP_CODER_BUILDER_H_ | |||
| #define MINDSPORE_LITE_MICRO_OPCODERS_OP_CODER_BUILDER_H_ | |||
| #include <vector> | |||
| #include <memory> | |||
| #include "micro/coder/opcoders/op_coder.h" | |||
| #include "micro/coder/allocator/allocator.h" | |||
| namespace mindspore::lite::micro { | |||
| class OpCoderBuilder { | |||
| public: | |||
| std::unique_ptr<OperatorCoder> build(); | |||
| OpCoderBuilder &inputs(const std::vector<Tensor *> &inputs); | |||
| OpCoderBuilder &outputs(const std::vector<Tensor *> &outputs); | |||
| OpCoderBuilder &node(const Model::Node *node); | |||
| OpCoderBuilder &data_type(TypeId data_type); | |||
| OpCoderBuilder &mode(CodeMode mode); | |||
| OpCoderBuilder &input_indices(const std::vector<uint32_t> &indices); | |||
| OpCoderBuilder &output_indices(const std::vector<uint32_t> &indices); | |||
| OpCoderBuilder &target(Target target); | |||
| void Reset(); | |||
| private: | |||
| std::vector<Tensor *> inputs_; | |||
| std::vector<Tensor *> outputs_; | |||
| const mindspore::lite::Model::Node *node_ = nullptr; | |||
| size_t node_index_{0}; | |||
| Target target_{kTargetUnknown}; | |||
| TypeId data_type_ = kTypeUnknown; | |||
| CodeMode mode_ = Code_Normal; | |||
| std::vector<uint32_t> input_indices_; | |||
| std::vector<uint32_t> output_indices_; | |||
| }; | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MINDSPORE_LITE_MICRO_OPCODERS_OP_CODER_BUILDER_H_ | |||
| @@ -0,0 +1,61 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "opcoders/op_coder_register.h" | |||
| #include <utility> | |||
| namespace mindspore::lite::micro { | |||
| bool CoderKey::operator<(const CoderKey rhs) const { | |||
| return std::tie(this->target_, this->data_type_, this->op_type_) < | |||
| std::tie(rhs.target_, rhs.data_type_, rhs.op_type_); | |||
| } | |||
| OpCoderFactory *OpCoderFactory::GetInstance() { | |||
| static OpCoderFactory reg; | |||
| return ® | |||
| } | |||
| int OpCoderFactory::RegistOpCoder(Target target, TypeId data_type, schema::PrimitiveType operator_type, | |||
| const CoderCreatorFunc &creator_func) { | |||
| // check key | |||
| CoderKey key(target, data_type, operator_type); | |||
| // insert pair to registry | |||
| if (this->opcoder_sets_.find(key) != this->opcoder_sets_.end()) { | |||
| MS_LOG(ERROR) << "coder has already exists!"; | |||
| return RET_ERROR; | |||
| } | |||
| this->opcoder_sets_.insert(std::pair<CoderKey, CoderCreatorFunc>(key, creator_func)); | |||
| return RET_OK; | |||
| } | |||
| CoderCreatorFunc OpCoderFactory::FindOpCoder(const CoderKey &key) { | |||
| auto iterator = this->opcoder_sets_.find(key); | |||
| if (iterator != this->opcoder_sets_.end()) { | |||
| return iterator->second; | |||
| } | |||
| // matching kAllTargets | |||
| iterator = this->opcoder_sets_.find(key.AllKey()); | |||
| if (iterator != this->opcoder_sets_.end()) { | |||
| return iterator->second; | |||
| } | |||
| return nullptr; | |||
| } | |||
| OpCoderRegister::OpCoderRegister(Target target, TypeId data_type, schema::PrimitiveType operator_type, | |||
| const CoderCreatorFunc &creatorFunc) { | |||
| OpCoderFactory::GetInstance()->RegistOpCoder(target, data_type, operator_type, creatorFunc); | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,85 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_MICRO_CODER_OPCODERS_OP_CODER_REGISTER_H_ | |||
| #define MICRO_MICRO_CODER_OPCODERS_OP_CODER_REGISTER_H_ | |||
| #include <map> | |||
| #include <vector> | |||
| #include <memory> | |||
| #include "src/lite_kernel.h" | |||
| #include "include/model.h" | |||
| #include "coder/coder_config.h" | |||
| namespace mindspore::lite::micro { | |||
| class OperatorCoder; | |||
| using CoderCreatorFunc = std::function<std::unique_ptr<OperatorCoder>( | |||
| const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors, const Model::Node *node, | |||
| size_t node_index, Target target)>; | |||
| class CoderKey { | |||
| public: | |||
| CoderKey() = delete; | |||
| CoderKey(Target target, TypeId data_type, schema::PrimitiveType op_type) | |||
| : target_(target), data_type_(data_type), op_type_(op_type) {} | |||
| CoderKey AllKey() const { | |||
| CoderKey key(kAllTargets, data_type_, op_type_); | |||
| return key; | |||
| } | |||
| bool operator<(CoderKey rhs) const; | |||
| ~CoderKey() = default; | |||
| private: | |||
| Target target_ = kTargetUnknown; | |||
| TypeId data_type_ = kTypeUnknown; | |||
| schema::PrimitiveType op_type_ = schema::PrimitiveType_NONE; | |||
| }; | |||
| class OpCoderFactory { | |||
| public: | |||
| OpCoderFactory() = default; | |||
| static OpCoderFactory *GetInstance(); | |||
| int RegistOpCoder(Target target, TypeId data_type, schema::PrimitiveType operator_type, | |||
| const CoderCreatorFunc &creator_func); | |||
| CoderCreatorFunc FindOpCoder(const CoderKey &key); | |||
| ~OpCoderFactory() { opcoder_sets_.clear(); } | |||
| private: | |||
| // target || data type || primitive type | |||
| std::map<CoderKey, CoderCreatorFunc> opcoder_sets_; | |||
| }; | |||
| class OpCoderRegister { | |||
| public: | |||
| OpCoderRegister() = delete; | |||
| OpCoderRegister(Target target, TypeId data_type, schema::PrimitiveType operator_type, | |||
| const CoderCreatorFunc &creator_func); | |||
| ~OpCoderRegister() = default; | |||
| }; | |||
| #define REG_OPERATOR_CODER(target, data_type, operator_type, creator_func) \ | |||
| static OpCoderRegister g_##target##data_type##operator_type##Creator(target, data_type, operator_type, creator_func); | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MICRO_MICRO_CODER_OPCODERS_OP_CODER_REGISTER_H_ | |||
| @@ -0,0 +1,472 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "coder/session_coder.h" | |||
| #include <queue> | |||
| #include <utility> | |||
| #include "coder/allocator/allocator.h" | |||
| #include "coder/coder_context.h" | |||
| #include "coder/debug.h" | |||
| #include "coder/generator/generator.h" | |||
| #include "coder/generator/inference/inference_generator.h" | |||
| #include "coder/opcoders/op_coder_builder.h" | |||
| #include "coder/utils/coder_utils.h" | |||
| #include "coder/log.h" | |||
| #include "include/errorcode.h" | |||
| #include "src/common/file_utils.h" | |||
| namespace mindspore::lite::micro { | |||
| int CoderSession::InferShape() { | |||
| const Model *model = coder_graph_->model(); | |||
| std::vector<lite::Tensor *> all_tensors = coder_graph_->all_tensors(); | |||
| size_t nodes_num = model->all_nodes_.size(); | |||
| for (size_t i = 0; i < nodes_num; ++i) { | |||
| auto curr_node = model->all_nodes_.at(i); | |||
| if (!curr_node) { | |||
| MS_LOG(ERROR) << "model's node is null, who's index is " << i << ". InferShape failed "; | |||
| return RET_ERROR; | |||
| } | |||
| std::vector<Tensor *> inputs; | |||
| std::vector<Tensor *> outputs; | |||
| size_t input_nums = curr_node->input_indices_.size(); | |||
| inputs.reserve(input_nums); | |||
| for (size_t j = 0; j < input_nums; ++j) { | |||
| inputs.push_back(all_tensors.at(curr_node->input_indices_.at(j))); | |||
| } | |||
| size_t output_nums = curr_node->output_indices_.size(); | |||
| outputs.reserve(output_nums); | |||
| for (size_t j = 0; j < output_nums; ++j) { | |||
| outputs.push_back(all_tensors.at(curr_node->output_indices_.at(j))); | |||
| } | |||
| PrimitiveC *primitive = curr_node->primitive_; | |||
| if (primitive == nullptr) { | |||
| MS_LOG(ERROR) << "Op " << curr_node->name_ << " should exist in model!"; | |||
| return RET_ERROR; | |||
| } | |||
| primitive->set_infer_flag(true); | |||
| int ret = primitive->InferShape(inputs, outputs); | |||
| if (ret == RET_INFER_INVALID) { | |||
| MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << curr_node->name_ | |||
| << ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type())) | |||
| << "flag set to false."; | |||
| primitive->set_infer_flag(false); | |||
| } else if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "InferShape failed, name: " << curr_node->name_ << ", type: " | |||
| << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type())); | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| return RET_OK; | |||
| } | |||
| void CoderSession::EndCode() { | |||
| coder_context_->set_tensor_map(allocator_->tensors_map()); | |||
| coder_context_->set_saved_weights(allocator_->saved_weights()); | |||
| coder_context_->set_total_buffer_size(allocator_->total_buffer_size()); | |||
| std::vector<std::string> blocks; | |||
| for (size_t index = 0; index < coder_context_->code_blocks().size(); ++index) { | |||
| auto &curr_node = op_coders_.at(index); | |||
| std::string coder_block = coder_context_->code_blocks().at(index); | |||
| MicroDebug::DumpNodeData(curr_node, coder_context_->tensors_map(), &coder_block); | |||
| blocks.emplace_back(coder_block); | |||
| } | |||
| coder_context_->set_code_blocks(blocks); | |||
| coder_context_->set_graph_inputs(coder_graph_->input_tensors()); | |||
| coder_context_->set_graph_outputs(coder_graph_->output_tensors()); | |||
| } | |||
| int CoderSession::Run() { | |||
| MS_LOG(INFO) << "start run opcoders"; | |||
| // 1. assign memory | |||
| std::vector<lite::Tensor *> inputs = coder_graph_->input_tensors(); | |||
| int ret = allocator_->Assign(inputs, op_coders_); | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "assign memory failed"; | |||
| return RET_ERROR; | |||
| } | |||
| // 2. prepare, init model parameters | |||
| for (const auto &op_coder : op_coders_) { | |||
| if (op_coder == nullptr) { | |||
| MS_LOG(ERROR) << "opcoder is nullptr"; | |||
| return RET_ERROR; | |||
| } | |||
| ret = op_coder->Prepare(coder_context_.get()); | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "prepare coder " << op_coder->ID() << " failed"; | |||
| return RET_ERROR; | |||
| } | |||
| allocator_->enable_is_next(); | |||
| } | |||
| // 3. docode, write operator code | |||
| for (const auto &op_coder : op_coders_) { | |||
| if (op_coder == nullptr) { | |||
| MS_LOG(ERROR) << "opcoder is nullptr"; | |||
| return RET_ERROR; | |||
| } | |||
| ret = op_coder->DoCode(this->coder_context_.get()); | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "do coder " << op_coder->ID() << " failed"; | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| this->EndCode(); | |||
| MS_LOG(INFO) << "run opcoders success"; | |||
| return RET_OK; | |||
| } | |||
| int CoderSession::GenerateCode() { | |||
| MS_LOG(INFO) << "CoderSession::GenerateCode start"; | |||
| std::shared_ptr<Generator> generator; | |||
| Configurator *config = Configurator::GetInstance(); | |||
| CodeMode code_mode = config->code_mode(); | |||
| switch (code_mode) { | |||
| case Code_Normal: | |||
| case Code_Android: | |||
| MS_LOG(INFO) << "generate code for Android"; | |||
| generator = std::make_shared<InferenceGenerator>(std::move(coder_context_)); | |||
| break; | |||
| default: | |||
| MS_LOG(ERROR) << "unsupported generator code mode, " << code_mode; | |||
| return RET_ERROR; | |||
| } | |||
| // when use file, coder context need to remove initial parameters from tensors info | |||
| // we use tmp_tensor_list to storage | |||
| int ret = generator->GenerateCode(); | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "generate code failed"; | |||
| } | |||
| MS_LOG(INFO) << "CoderSession::GenerateCode done"; | |||
| return ret; | |||
| } | |||
| int CoderSession::Init(const std::string &model_path) { | |||
| MS_LOG(INFO) << "CoderSession::Init start"; | |||
| // Load graph | |||
| MS_LOG(DEBUG) << "start reading model file"; | |||
| size_t size = 0; | |||
| char *graph_buf = ReadFile(model_path.c_str(), &size); | |||
| if (graph_buf == nullptr) { | |||
| MS_LOG(ERROR) << "the graphBuf is nullptr"; | |||
| return RET_ERROR; | |||
| } | |||
| // new a coder_context for session | |||
| if (size >= UINT_MAX) { | |||
| MS_LOG(ERROR) << "the size is invalid"; | |||
| delete[] graph_buf; | |||
| return RET_ERROR; | |||
| } | |||
| Model *model = lite::Model::Import(graph_buf, size); | |||
| delete[] graph_buf; | |||
| MS_CHECK_PTR(model); | |||
| coder_graph_ = std::make_unique<CoderGraph>(model); | |||
| coder_context_ = std::make_unique<CoderContext>(); | |||
| allocator_ = MemoryAllocator::GetInstance(); | |||
| allocator_->RecordRuntimeAddrs(coder_context_->input_name(), coder_context_->buffer_name(), | |||
| coder_context_->weight_name()); | |||
| MS_LOG(INFO) << "CoderSession::Init done"; | |||
| return RET_OK; | |||
| } | |||
| int CoderSession::Build() { | |||
| if (coder_graph_ == nullptr) { | |||
| return RET_ERROR; | |||
| } | |||
| int ret = this->CompileGraph(); | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "CompileGraph failed: " << ret; | |||
| return ret; | |||
| } | |||
| return RET_OK; | |||
| } | |||
| int CoderSession::InitNodesInputsAndOutputs() { | |||
| auto &op_coders = this->op_coders_; | |||
| for (const auto &op_coder : op_coders) { | |||
| for (const auto &search : op_coders) { | |||
| if (search.get() == op_coder.get()) { | |||
| continue; | |||
| } | |||
| for (const auto &tensor : op_coder->input_tensors()) { | |||
| std::vector<Tensor *> outputs = search->output_tensors(); | |||
| auto iter = std::find(outputs.begin(), outputs.end(), tensor); | |||
| if (iter != outputs.end()) { | |||
| op_coder->AddInputNodeIndex(search->node_index()); | |||
| } | |||
| } | |||
| for (const auto &tensor : op_coder->output_tensors()) { | |||
| auto inputs = search->input_tensors(); | |||
| auto iter = std::find(inputs.begin(), inputs.end(), tensor); | |||
| if (iter != inputs.end()) { | |||
| op_coder->AddOutputNodeIndex(search->node_index()); | |||
| } | |||
| } | |||
| } | |||
| } | |||
| return RET_OK; | |||
| } | |||
| int CoderSession::InitTensorsRef() { | |||
| auto all_tensors = coder_graph_->all_tensors(); | |||
| for (auto &tensor : all_tensors) { | |||
| size_t refcount = 0; | |||
| for (const auto &node : this->op_coders_) { | |||
| auto inputs = node->input_tensors(); | |||
| auto iter = std::find(inputs.begin(), inputs.end(), tensor); | |||
| if (iter != inputs.end()) { | |||
| refcount++; | |||
| } | |||
| } | |||
| tensor->set_ref_count(refcount); | |||
| } | |||
| return RET_OK; | |||
| } | |||
| int CoderSession::ConvertTensors() { | |||
| auto model = coder_graph_->model(); | |||
| if (model == nullptr) { | |||
| MS_LOG(ERROR) << "Graph model is nullptr"; | |||
| return RET_ERROR; | |||
| } | |||
| std::vector<Tensor *> all_tensors; | |||
| auto clear_tensors = [&all_tensors]() { | |||
| std::for_each(all_tensors.begin(), all_tensors.end(), [](Tensor *&t) { | |||
| delete t; | |||
| t = nullptr; | |||
| }); | |||
| all_tensors.clear(); | |||
| }; | |||
| auto check_dim = [](int dim) -> int { | |||
| MS_CHECK_TRUE(dim > 0, "invalid dim value!"); | |||
| return RET_OK; | |||
| }; | |||
| // deal with allTensors | |||
| uint32_t tensorCount = model->all_tensors_.size(); | |||
| for (uint32_t i = 0; i < tensorCount; ++i) { | |||
| auto *meta_tensor = model->all_tensors_.at(i); | |||
| MS_CHECK_PTR_WITH_EXE(meta_tensor, clear_tensors()); | |||
| // tensor dims | |||
| std::vector<int> shape; | |||
| if (meta_tensor->nodeType() == schema::NodeType_ValueNode) { | |||
| MS_CHECK_PTR_WITH_EXE(meta_tensor->dims(), clear_tensors()); | |||
| for (uint32_t j = 0; j < meta_tensor->dims()->size(); j++) { | |||
| MS_CHECK_PTR(meta_tensor->dims()->data()); | |||
| int dim = static_cast<int>(meta_tensor->dims()->data()[j]); | |||
| MS_CHECK_RET_CODE_WITH_EXE(check_dim(dim), "parse shape failed!", clear_tensors()); | |||
| shape.push_back(dim); | |||
| } | |||
| } | |||
| // tensor Datatype | |||
| int meta_data_type = static_cast<int>(meta_tensor->dataType()); | |||
| auto dstTensor = new (std::nothrow) | |||
| lite::Tensor(TypeId(meta_data_type), shape, meta_tensor->format(), TensorCategory(meta_tensor)); | |||
| MS_CHECK_PTR(dstTensor); | |||
| if (meta_tensor->nodeType() == schema::NodeType_ValueNode && meta_tensor->data() != nullptr && | |||
| meta_tensor->data()->size() > 0) { | |||
| if (shape.empty()) { | |||
| shape.push_back(1); | |||
| } | |||
| // copy data, this is weight && bias | |||
| MS_CHECK_TRUE(meta_tensor->data()->size() > 0, "invalid meta_tensor data size"); | |||
| auto data_size = static_cast<size_t>(meta_tensor->data()->size()); | |||
| MS_CHECK_RET_CODE(dstTensor->MallocData(), "dst tensor malloc data failed!"); | |||
| void *dst_data = dstTensor->data_c(); | |||
| MS_CHECK_RET_CODE(memcpy_s(dst_data, data_size, meta_tensor->data()->data(), data_size), | |||
| "memcpy_s copy data failed!"); | |||
| dstTensor->set_data(dst_data); | |||
| } | |||
| auto quant_params = meta_tensor->quantParams(); | |||
| if (quant_params != nullptr) { | |||
| for (int j = 0; j < static_cast<int>(quant_params->size()); j++) { | |||
| QuantArg quant_arg{}; | |||
| quant_arg.scale = quant_params->Get(j)->scale(); | |||
| quant_arg.zeroPoint = quant_params->Get(j)->zeroPoint(); | |||
| dstTensor->AddQuantParam(quant_arg); | |||
| } | |||
| } | |||
| all_tensors.emplace_back(dstTensor); | |||
| } | |||
| coder_graph_->SetAllTensors(all_tensors); | |||
| return RET_OK; | |||
| } | |||
| int CoderSession::CreateOpCoders() { | |||
| const Model *model = coder_graph_->model(); | |||
| if (model == nullptr) { | |||
| MS_LOG(ERROR) << "Graph model is nullptr"; | |||
| return RET_ERROR; | |||
| } | |||
| Configurator *config = Configurator::GetInstance(); | |||
| Target code_target = config->target(); | |||
| CodeMode code_mode = config->code_mode(); | |||
| uint32_t nodes_size = model->all_nodes_.size(); | |||
| OpCoderBuilder builder; | |||
| for (uint32_t i = 0; i < nodes_size; ++i) { | |||
| const auto *node = model->all_nodes_.at(i); | |||
| if (node == nullptr) { | |||
| MS_LOG(ERROR) << "node is nullptr"; | |||
| return RET_ERROR; | |||
| } | |||
| std::vector<lite::Tensor *> all_tensors = coder_graph_->all_tensors(); | |||
| if (all_tensors.empty()) { | |||
| MS_LOG(ERROR) << "coder_graph has no any tensors"; | |||
| return RET_ERROR; | |||
| } | |||
| // set op_coder's inputs && outputs info | |||
| std::vector<uint32_t> input_indices; | |||
| Uint32Vector node_input_indices = node->input_indices_; | |||
| input_indices.insert(input_indices.end(), node_input_indices.begin(), node_input_indices.end()); | |||
| std::vector<uint32_t> output_indices; | |||
| Uint32Vector node_output_indices = node->output_indices_; | |||
| output_indices.insert(output_indices.end(), node_output_indices.begin(), node_output_indices.end()); | |||
| std::vector<lite::Tensor *> inputs; | |||
| std::vector<lite::Tensor *> outputs; | |||
| for (auto in_index : input_indices) { | |||
| in_index = static_cast<size_t>(in_index); | |||
| if (in_index > all_tensors.size()) { | |||
| MS_LOG(ERROR) << "in_index is invalid"; | |||
| return RET_ERROR; | |||
| } | |||
| inputs.push_back(all_tensors.at(in_index)); | |||
| } | |||
| for (auto ou_index : output_indices) { | |||
| ou_index = static_cast<size_t>(ou_index); | |||
| if (ou_index > all_tensors.size()) { | |||
| MS_LOG(ERROR) << "ou_index is invalid"; | |||
| return RET_ERROR; | |||
| } | |||
| outputs.push_back(all_tensors.at(ou_index)); | |||
| } | |||
| if (inputs.empty()) { | |||
| MS_LOG(ERROR) << "node: " << node->name_ << "has no inputs tensor"; | |||
| return RET_ERROR; | |||
| } | |||
| if (outputs.empty()) { | |||
| MS_LOG(ERROR) << "node: " << node->name_ << "has no outputs tensor"; | |||
| return RET_ERROR; | |||
| } | |||
| TypeId tensor_data_type = inputs.at(0)->data_type(); | |||
| std::unique_ptr<OperatorCoder> op_coder = builder.inputs(inputs) | |||
| .outputs(outputs) | |||
| .node(node) | |||
| .target(code_target) | |||
| .data_type(tensor_data_type) | |||
| .mode(code_mode) | |||
| .input_indices(input_indices) | |||
| .output_indices(output_indices) | |||
| .build(); | |||
| MS_CHECK_PTR(op_coder); | |||
| op_coders_.push_back(std::move(op_coder)); | |||
| builder.Reset(); | |||
| } | |||
| InitNodesInputsAndOutputs(); | |||
| return RET_OK; | |||
| } | |||
| int CoderSession::InitGraphInOutTensors() { | |||
| const Model *model = coder_graph_->model(); | |||
| if (model == nullptr) { | |||
| return RET_ERROR; | |||
| } | |||
| std::vector<size_t> graph_input_node_indexes = lite::GetGraphInputNodes(model); | |||
| std::vector<uint32_t> input_indices; | |||
| for (auto in_node_index : graph_input_node_indexes) { | |||
| in_node_index = static_cast<uint32_t>(in_node_index); | |||
| auto *in_node = model->all_nodes_.at(in_node_index); | |||
| if (in_node == nullptr) { | |||
| return RET_ERROR; | |||
| } | |||
| for (uint32_t i = 0; i < in_node->input_indices_.size(); i++) { | |||
| auto in_tensor_index = size_t(in_node->input_indices_.at(i)); | |||
| bool is_graph_input = false; | |||
| for (uint32_t j = 0; j < model->sub_graphs_.at(0)->input_indices_.size(); j++) { | |||
| if (in_tensor_index == size_t(model->sub_graphs_.at(0)->input_indices_.at(j))) { | |||
| input_indices.push_back(static_cast<uint32_t>(in_tensor_index)); | |||
| is_graph_input = true; | |||
| break; | |||
| } | |||
| } | |||
| if (!is_graph_input) { | |||
| continue; | |||
| } | |||
| if (in_tensor_index < coder_graph_->all_tensors().size()) { | |||
| lite::Tensor *in_tensor = this->coder_graph_->all_tensors().at(in_tensor_index); | |||
| coder_graph_->AddInputMap(in_node->name_, in_tensor); | |||
| } | |||
| } | |||
| } | |||
| coder_graph_->SetInputIndices(input_indices); | |||
| std::vector<uint32_t> output_indices; | |||
| auto graph_output_node_indexes = lite::GetGraphOutputNodes(model); | |||
| for (auto out_node_index : graph_output_node_indexes) { | |||
| out_node_index = static_cast<uint32_t>(out_node_index); | |||
| auto *out_node = model->all_nodes_.at(out_node_index); | |||
| for (uint32_t i = 0; i < out_node->output_indices_.size(); i++) { | |||
| auto out_tensor_index = size_t(out_node->output_indices_.at(i)); | |||
| bool is_graph_output = false; | |||
| for (uint32_t j = 0; j < model->sub_graphs_.at(0)->output_indices_.size(); j++) { | |||
| if (out_tensor_index == size_t(model->sub_graphs_.at(0)->output_indices_.at(j))) { | |||
| output_indices.push_back(static_cast<uint32_t>(out_tensor_index)); | |||
| is_graph_output = true; | |||
| break; | |||
| } | |||
| } | |||
| if (!is_graph_output) { | |||
| continue; | |||
| } | |||
| if (out_tensor_index < coder_graph_->all_tensors().size()) { | |||
| lite::Tensor *out_tensor = this->coder_graph_->all_tensors().at(out_tensor_index); | |||
| if (out_tensor == nullptr) { | |||
| MS_LOG(ERROR) << "can not find any output tensor in all_tensors"; | |||
| return RET_ERROR; | |||
| } | |||
| coder_graph_->AddOutputMap(out_node->name_, out_tensor); | |||
| } | |||
| } | |||
| } | |||
| coder_graph_->SetOutputIndices(output_indices); | |||
| coder_graph_->InitInputs(); | |||
| coder_graph_->InitOutputs(); | |||
| return RET_OK; | |||
| } | |||
| int CoderSession::CompileGraph() { | |||
| MS_CHECK_RET_CODE(ConvertTensors(), "ConvertTensors failed"); | |||
| MS_CHECK_RET_CODE(InitGraphInOutTensors(), "InitGraphInOutTensors failed"); | |||
| // InferShape | |||
| MS_CHECK_RET_CODE(InferShape(), "do infershape failed!"); | |||
| // create all op_coders | |||
| MS_CHECK_RET_CODE(CreateOpCoders(), "CreateOpCoders failed!"); | |||
| MS_CHECK_RET_CODE(InitTensorsRef(), "InitTensorsRefcount failed!"); | |||
| return RET_OK; | |||
| } | |||
| std::shared_ptr<CoderSession> CreateCoderSession() { | |||
| auto session = std::make_shared<CoderSession>(); | |||
| return session; | |||
| } | |||
| CoderSession::~CoderSession() { allocator_->Free(); } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,64 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_CODER_SESSION_CODER_H_ | |||
| #define MICRO_CODER_SESSION_CODER_H_ | |||
| #include <map> | |||
| #include <string> | |||
| #include <vector> | |||
| #include <memory> | |||
| #include "schema/inner/model_generated.h" | |||
| #include "coder/coder_graph.h" | |||
| #include "coder/coder_context.h" | |||
| #include "coder/coder_config.h" | |||
| #include "coder/allocator/allocator.h" | |||
| #include "coder/opcoders/op_coder.h" | |||
| namespace mindspore::lite::micro { | |||
| class CoderSession { | |||
| public: | |||
| CoderSession() = default; | |||
| ~CoderSession(); | |||
| int Init(const std::string &model_path); | |||
| int Build(); | |||
| int Run(); | |||
| int GenerateCode(); | |||
| private: | |||
| int InitNodesInputsAndOutputs(); | |||
| int InitTensorsRef(); | |||
| int ConvertTensors(); | |||
| int CreateOpCoders(); | |||
| int InitGraphInOutTensors(); | |||
| int CompileGraph(); | |||
| int InferShape(); | |||
| void EndCode(); | |||
| std::unique_ptr<CoderGraph> coder_graph_{nullptr}; | |||
| std::unique_ptr<CoderContext> coder_context_{nullptr}; | |||
| MemoryAllocator *allocator_{nullptr}; | |||
| std::vector<std::unique_ptr<OperatorCoder>> op_coders_; | |||
| }; | |||
| std::shared_ptr<CoderSession> CreateCoderSession(); | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MICRO_CODER_SESSION_CODER_H_ | |||
| @@ -0,0 +1,44 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "coder/utils/coder_utils.h" | |||
| #include <string> | |||
| namespace mindspore::lite::micro { | |||
| std::string EnumNameDataType(TypeId type) { | |||
| switch (type) { | |||
| case kNumberTypeInt: | |||
| return "kNumberTypeInt"; | |||
| case kNumberTypeInt8: | |||
| return "kNumberTypeInt8"; | |||
| case kNumberTypeInt16: | |||
| return "kNumberTypeInt16"; | |||
| case kNumberTypeInt32: | |||
| return "kNumberTypeInt32"; | |||
| case kNumberTypeFloat32: | |||
| return "kNumberTypeFloat32"; | |||
| case kNumberTypeFloat16: | |||
| return "kNumberTypeFloat16"; | |||
| case kNumberTypeFloat64: | |||
| return "kNumberTypeFloat64"; | |||
| case kTypeUnknown: | |||
| return "kTypeUnknown"; | |||
| default: | |||
| return "unsupported type, " + std::to_string(type); | |||
| } | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,35 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MICRO_CODER_CODER_UTILS_CODER_UTILS_H_ | |||
| #define MICRO_CODER_CODER_UTILS_CODER_UTILS_H_ | |||
| #include <limits> | |||
| #include <vector> | |||
| #include <string> | |||
| #include "include/errorcode.h" | |||
| #include "securec/include/securec.h" | |||
| #include "src/tensor.h" | |||
| namespace mindspore::lite::micro { | |||
| constexpr int kSubSize = 2; | |||
| constexpr int kDefaultDims = 4; | |||
| std::string EnumNameDataType(TypeId type); | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MICRO_CODER_CODER_UTILS_CODER_UTILS_H_ | |||
| @@ -0,0 +1,121 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "utils/dir_utils.h" | |||
| #include <sys/stat.h> | |||
| #if defined(_WIN32) || defined(_WIN64) | |||
| #include <direct.h> | |||
| #endif | |||
| #include <string> | |||
| #include <fstream> | |||
| #include "include/errorcode.h" | |||
| #include "src/common/log_adapter.h" | |||
| namespace mindspore::lite::micro { | |||
| #if defined(_WIN32) || defined(_WIN64) | |||
| constexpr _mode_t kMicroDirMode = 0777; | |||
| #else | |||
| constexpr __mode_t kMicroDirMode = 0777; | |||
| #endif | |||
| static std::array<std::string, 3> kWorkDirs = {"src", "include", "benchmark"}; | |||
| bool DirExists(const std::string &dir_path) { | |||
| struct stat file_info; | |||
| if (stat(dir_path.c_str(), &file_info) != 0) { | |||
| return false; | |||
| } | |||
| return (file_info.st_mode & S_IFDIR) != 0; | |||
| } | |||
| bool FileExists(const std::string &path) { | |||
| std::ifstream file(path); | |||
| return file.good(); | |||
| } | |||
| static int MkMicroDir(const std::string ¤tDir) { | |||
| #if defined(_WIN32) || defined(_WIN64) | |||
| std::ofstream currentFile; | |||
| std::string readMeFile = currentDir + "\\readMe.txt"; | |||
| currentFile.open(readMeFile); | |||
| currentFile << "This is a directory for generating coding files. Do not edit !!!\n"; | |||
| if (!currentFile.is_open()) { | |||
| if (_mkdir(currentDir.c_str()) != 0) { | |||
| MS_LOG(ERROR) << currentDir << ": mkdir failed, please check filePath!!!"; | |||
| currentFile.close(); | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| currentFile.close(); | |||
| #else | |||
| std::ifstream currentFile; | |||
| currentFile.open(currentDir); | |||
| if (!currentFile.is_open()) { | |||
| if (mkdir(currentDir.c_str(), kMicroDirMode) != 0) { | |||
| MS_LOG(ERROR) << currentDir << ": mkdir failed, please check filePath!!!"; | |||
| currentFile.close(); | |||
| return RET_ERROR; | |||
| } | |||
| } | |||
| currentFile.close(); | |||
| #endif | |||
| return RET_OK; | |||
| } | |||
| int InitProjDirs(const std::string &pro_root_dir, const std::string &module_name) { | |||
| #if defined(_WIN32) || defined(_WIN64) | |||
| std::ofstream pro_file; | |||
| std::string read_me_file = pro_root_dir + "\\readMe.txt"; | |||
| pro_file.open(read_me_file.c_str()); | |||
| pro_file << "This is a directory for generating coding files. Do not edit !!!\n"; | |||
| #else | |||
| std::ifstream pro_file; | |||
| pro_file.open(pro_root_dir.c_str()); | |||
| #endif | |||
| if (!pro_file.is_open()) { | |||
| MS_LOG(ERROR) << pro_root_dir << ": model's root dir not exists or have no access to open, please check it!!!"; | |||
| pro_file.close(); | |||
| return RET_ERROR; | |||
| } | |||
| // check other dirs && make them if not exists | |||
| // 1. coderDir 2.WorkRootDir 3. WorkChildDir | |||
| std::string current_dir; | |||
| std::string slashCh = std::string(kSlash); | |||
| if (pro_root_dir.back() == slashCh.back()) { | |||
| current_dir = pro_root_dir + module_name; | |||
| } else { | |||
| current_dir = pro_root_dir + slashCh + module_name; | |||
| } | |||
| std::string work_dir = current_dir; | |||
| STATUS ret = MkMicroDir(current_dir); | |||
| if (ret == RET_ERROR) { | |||
| pro_file.close(); | |||
| return ret; | |||
| } | |||
| for (const auto &work : kWorkDirs) { | |||
| current_dir = work_dir + slashCh + work; | |||
| ret = MkMicroDir(current_dir); | |||
| if (ret == RET_ERROR) { | |||
| pro_file.close(); | |||
| return ret; | |||
| } | |||
| } | |||
| pro_file.close(); | |||
| return RET_OK; | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,34 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_PREDICT_MICRO_CODER_CODER_UTILS_INIT_PROJ_DIRS_H_ | |||
| #define MINDSPORE_PREDICT_MICRO_CODER_CODER_UTILS_INIT_PROJ_DIRS_H_ | |||
| #include <string> | |||
| namespace mindspore::lite::micro { | |||
| #if defined(_WIN32) || defined(_WIN64) | |||
| static const char kSlash[] = "\\"; | |||
| #else | |||
| static const char kSlash[] = "/"; | |||
| #endif | |||
| int InitProjDirs(const std::string &project_root_dir, const std::string &module_name); | |||
| bool DirExists(const std::string &dir_path); | |||
| bool FileExists(const std::string &dir_path); | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MINDSPORE_PREDICT_MICRO_CODER_CODER_UTILS_INIT_PROJ_DIRS_H_ | |||
| @@ -0,0 +1,294 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "coder/utils/print_utils.h" | |||
| namespace mindspore::lite::micro { | |||
| std::string GetPrintFormat(const lite::Tensor *tensor) { | |||
| switch (tensor->data_type()) { | |||
| case kNumberTypeFloat: { | |||
| return "%f"; | |||
| } | |||
| case kNumberTypeInt8: { | |||
| return "%c"; | |||
| } | |||
| case kNumberTypeInt32: { | |||
| return "%d"; | |||
| } | |||
| case kNumberTypeUInt8: { | |||
| return "%d"; | |||
| } | |||
| case kNumberTypeInt16: { | |||
| return "%d"; | |||
| } | |||
| case kNumberTypeUInt32: { | |||
| return "%ld"; | |||
| } | |||
| case kNumberTypeInt64: { | |||
| return "%l64d"; | |||
| } | |||
| case kNumberTypeUInt16: { | |||
| return "%f"; | |||
| } | |||
| case kNumberTypeFloat16: { | |||
| MS_LOG(WARNING) << "unsupported data type: kNumberTypeFloat16"; | |||
| return "float "; | |||
| } | |||
| default: | |||
| MS_LOG(WARNING) << "unsupported data type: " << tensor->data_type(); | |||
| return "%d"; | |||
| } | |||
| } | |||
| template <typename T> | |||
| void PrintTensorData(const lite::Tensor *tensor, std::ofstream &of, const std::string &left = "\t") { | |||
| const int NUM = 20; | |||
| T *data = reinterpret_cast<T *>(tensor->data_c()); | |||
| of << "{\n" << left; | |||
| int len = tensor->ElementsNum(); | |||
| if (typeid(T) == typeid(float)) { | |||
| of.precision(kWeightPrecision); | |||
| for (int i = 0; i < len - 1; ++i) { | |||
| of << data[i] << ","; | |||
| if (i % NUM == NUM - 1) { | |||
| of << std::endl << left; | |||
| } | |||
| } | |||
| if (len > 0) { | |||
| of << data[len - 1]; | |||
| } | |||
| } else { | |||
| for (int i = 0; i < len - 1; ++i) { | |||
| of << std::to_string(data[i]) << ","; | |||
| if (i % NUM == NUM - 1) { | |||
| of << std::endl << left; | |||
| } | |||
| } | |||
| if (len > 0) { | |||
| of << std::to_string(data[len - 1]); | |||
| } | |||
| } | |||
| of << "\n" << left << "};\n\n"; | |||
| } | |||
| void PrintTensor(const lite::Tensor *tensor, std::ofstream &weightOf, std::ofstream &hOf, | |||
| const std::string &tensorName) { | |||
| switch (tensor->data_type()) { | |||
| case kNumberTypeFloat: { | |||
| weightOf << "const float " << tensorName << "[] = "; | |||
| hOf << "extern const float " << tensorName << "[];\n"; | |||
| PrintTensorData<float>(tensor, weightOf); | |||
| break; | |||
| } | |||
| case kNumberTypeFloat32: { | |||
| weightOf << "const float " << tensorName << "[] = "; | |||
| hOf << "extern const float " << tensorName << "[];\n"; | |||
| PrintTensorData<float>(tensor, weightOf); | |||
| break; | |||
| } | |||
| case kNumberTypeInt8: { | |||
| weightOf << "const signed char " << tensorName << "[] = "; | |||
| hOf << "extern const signed char " << tensorName << "[];\n"; | |||
| PrintTensorData<char>(tensor, weightOf); | |||
| break; | |||
| } | |||
| case kNumberTypeInt32: { | |||
| weightOf << "const int " << tensorName << "[] = "; | |||
| hOf << "extern const int " << tensorName << "[];\n"; | |||
| PrintTensorData<int>(tensor, weightOf); | |||
| break; | |||
| } | |||
| case kNumberTypeUInt8: { | |||
| weightOf << "const unsigned char " << tensorName << "[] = "; | |||
| hOf << "extern const unsigned char " << tensorName << "[];\n"; | |||
| PrintTensorData<unsigned char>(tensor, weightOf); | |||
| break; | |||
| } | |||
| case kNumberTypeInt16: { | |||
| weightOf << "const short " << tensorName << "[] = "; | |||
| hOf << "extern const short " << tensorName << "[];\n"; | |||
| PrintTensorData<int16_t>(tensor, weightOf); | |||
| break; | |||
| } | |||
| case kNumberTypeUInt32: { | |||
| weightOf << "const unsigned int " << tensorName << "[] = "; | |||
| hOf << "extern const unsigned int " << tensorName << "[];\n"; | |||
| PrintTensorData<unsigned int>(tensor, weightOf); | |||
| break; | |||
| } | |||
| case kNumberTypeInt64: { | |||
| weightOf << "const long " << tensorName << "[] = "; | |||
| hOf << "extern const long " << tensorName << "[];\n"; | |||
| PrintTensorData<int64_t>(tensor, weightOf); | |||
| break; | |||
| } | |||
| case kNumberTypeUInt16: { | |||
| weightOf << "const unsigned short " << tensorName << "[] = "; | |||
| hOf << "extern const unsigned short " << tensorName << "[];\n"; | |||
| PrintTensorData<uint16_t>(tensor, weightOf); | |||
| break; | |||
| } | |||
| case kNumberTypeFloat16: { | |||
| MS_LOG(WARNING) << "unsupported data type: kNumberTypeFloat16"; | |||
| break; | |||
| } | |||
| default: | |||
| MS_LOG(WARNING) << "unsupported data type: " << tensor->data_type(); | |||
| } | |||
| } | |||
| void PrintTensorForNet(const lite::Tensor *tensor, std::ofstream &weightOf, std::ofstream &hOf, | |||
| const std::string &tensorName) { | |||
| MS_LOG(DEBUG) << "PrintTensorForNet tensor dtype: " << tensor->data_type(); | |||
| switch (tensor->data_type()) { | |||
| case kNumberTypeFloat: { | |||
| weightOf << "float " << tensorName << "[" << tensor->ElementsNum() << "]={0};\n"; | |||
| hOf << "extern float " << tensorName << "[];\n"; | |||
| break; | |||
| } | |||
| case kNumberTypeFloat32: { | |||
| weightOf << "float " << tensorName << "[" << tensor->ElementsNum() << "]={0};\n"; | |||
| hOf << "extern float " << tensorName << "[];\n"; | |||
| break; | |||
| } | |||
| case kNumberTypeInt8: { | |||
| weightOf << "signed char " << tensorName << "[" << tensor->ElementsNum() << "]={0};\n"; | |||
| hOf << "extern signed char " << tensorName << "[];\n"; | |||
| break; | |||
| } | |||
| case kNumberTypeInt32: { | |||
| weightOf << "int " << tensorName << "[" << tensor->ElementsNum() << "]={0};\n"; | |||
| hOf << "extern int " << tensorName << "[];\n"; | |||
| break; | |||
| } | |||
| case kNumberTypeUInt8: { | |||
| weightOf << "unsigned char " << tensorName << "[" << tensor->ElementsNum() << "]={0};\n"; | |||
| hOf << "extern unsigned char " << tensorName << "[];\n"; | |||
| break; | |||
| } | |||
| case kNumberTypeInt16: { | |||
| weightOf << "short " << tensorName << "[" << tensor->ElementsNum() << "]={0};\n"; | |||
| hOf << "extern short " << tensorName << "[];\n"; | |||
| break; | |||
| } | |||
| case kNumberTypeUInt32: { | |||
| weightOf << "unsigned int " << tensorName << "[" << tensor->ElementsNum() << "]={0};\n"; | |||
| hOf << "extern unsigned int " << tensorName << "[];\n"; | |||
| break; | |||
| } | |||
| case kNumberTypeInt64: { | |||
| weightOf << "long " << tensorName << "[" << tensor->ElementsNum() << "]={0};\n"; | |||
| hOf << "extern long " << tensorName << "[];\n"; | |||
| break; | |||
| } | |||
| case kNumberTypeUInt16: { | |||
| weightOf << "unsigned short " << tensorName << "[" << tensor->ElementsNum() << "]={0};\n"; | |||
| hOf << "extern unsigned short " << tensorName << "[];\n"; | |||
| break; | |||
| } | |||
| case kNumberTypeFloat16: { | |||
| weightOf << "float " << tensorName << "[" << tensor->ElementsNum() << "]={0};\n"; | |||
| hOf << "extern float " << tensorName << "[];\n"; | |||
| break; | |||
| } | |||
| default: | |||
| MS_LOG(WARNING) << "Default DataType_DT not support. Tensor name: " << tensorName.c_str(); | |||
| } | |||
| } | |||
| std::string GetTensorDataType(const TypeId typeId) { | |||
| switch (typeId) { | |||
| case kNumberTypeFloat32: { | |||
| return "float "; | |||
| } | |||
| case kNumberTypeFloat: { | |||
| return "float "; | |||
| } | |||
| case kNumberTypeInt8: { | |||
| return "char "; | |||
| } | |||
| case kNumberTypeInt: { | |||
| return "int "; | |||
| } | |||
| case kNumberTypeInt32: { | |||
| return "int "; | |||
| } | |||
| case kNumberTypeUInt8: { | |||
| return "unsigned char "; | |||
| } | |||
| case kNumberTypeInt16: { | |||
| return "short "; | |||
| } | |||
| case kNumberTypeUInt32: { | |||
| return "unsigned int "; | |||
| } | |||
| case kNumberTypeInt64: { | |||
| return "long "; | |||
| } | |||
| case kNumberTypeUInt16: { | |||
| return "unsigned short "; | |||
| } | |||
| case kNumberTypeFloat16: { | |||
| MS_LOG(WARNING) << "unsupported data type: kNumberTypeFloat16"; | |||
| return "float "; | |||
| } | |||
| default: | |||
| MS_LOG(WARNING) << "unsupported data type: " << typeId; | |||
| return "int"; | |||
| } | |||
| } | |||
| std::string GetMicroTensorDataType(TypeId type) { | |||
| switch (type) { | |||
| case kNumberTypeFloat: | |||
| case kNumberTypeFloat32: { | |||
| return "DataType_DT_FLOAT"; | |||
| } | |||
| case kNumberTypeInt8: { | |||
| return "DataType_DT_INT8"; | |||
| } | |||
| case kNumberTypeInt: | |||
| case kNumberTypeInt32: { | |||
| return "DataType_DT_INT32"; | |||
| } | |||
| case kNumberTypeUInt8: { | |||
| return "DataType_DT_UINT8"; | |||
| } | |||
| case kNumberTypeInt16: { | |||
| return "DataType_DT_INT16"; | |||
| } | |||
| case kNumberTypeUInt32: { | |||
| return "DataType_DT_UINT32"; | |||
| } | |||
| case kNumberTypeInt64: { | |||
| return "DataType_DT_INT64"; | |||
| } | |||
| case kNumberTypeUInt16: { | |||
| return "DataType_DT_UINT16"; | |||
| } | |||
| case kNumberTypeFloat16: { | |||
| MS_LOG(WARNING) << "unsupported data type: kNumberTypeFloat16"; | |||
| return "DataType_DT_FLOAT16"; | |||
| } | |||
| default: | |||
| MS_LOG(WARNING) << "unsupported data type: " << type << ", reference: " << kNumberTypeInt; | |||
| return "DataType_DT_UNDEFINED"; | |||
| } | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| @@ -0,0 +1,74 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_MICRO_PRINT_UTILS_H_ | |||
| #define MINDSPORE_MICRO_PRINT_UTILS_H_ | |||
| #include <fstream> | |||
| #include <sstream> | |||
| #include <string> | |||
| #include <map> | |||
| #include <typeindex> | |||
| #include <typeinfo> | |||
| #include "src/tensor.h" | |||
| #include "nnacl/int8/quantize.h" | |||
| namespace mindspore::lite::micro { | |||
| constexpr int kWeightPrecision = 9; | |||
| std::string GetPrintFormat(const lite::Tensor *tensor); | |||
| void PrintTensor(const lite::Tensor *tensor, std::ofstream &weightOf, std::ofstream &hOf, | |||
| const std::string &tensorName); | |||
| void PrintTensorForNet(const lite::Tensor *tensor, std::ofstream &weightOf, std::ofstream &hOf, | |||
| const std::string &tensorName); | |||
| std::string GetTensorDataType(const TypeId typeId); | |||
| std::string GetMicroTensorDataType(TypeId type); | |||
| /** | |||
| * @tparam T | |||
| * @param t, basic data type variable, or tensor | |||
| * @return, data type name | |||
| */ | |||
| template <typename T> | |||
| std::string GetVariableTypeName() { | |||
| std::map<std::type_index, std::string> types_name = {{std::type_index(typeid(int)), "int"}, | |||
| {std::type_index(typeid(int32_t)), "int32_t"}, | |||
| {std::type_index(typeid(int16_t)), "int16_t"}, | |||
| {std::type_index(typeid(int8_t)), "int8_t"}, | |||
| {std::type_index(typeid(float)), "float"}, | |||
| {std::type_index(typeid(double)), "double"}, | |||
| {std::type_index(typeid(::QuantArg)), "QuantArg"}, | |||
| {std::type_index(typeid(int *)), "int *"}, | |||
| {std::type_index(typeid(int32_t *)), "int32_t *"}, | |||
| {std::type_index(typeid(int16_t *)), "int16_t *"}, | |||
| {std::type_index(typeid(int8_t *)), "int8_t *"}, | |||
| {std::type_index(typeid(float *)), "float *"}}; | |||
| auto item = types_name.find(std::type_index(typeid(T))); | |||
| if (item != types_name.end()) { | |||
| return item->second; | |||
| } | |||
| MS_LOG(ERROR) << "unsupported variable type"; | |||
| return ""; | |||
| } | |||
| } // namespace mindspore::lite::micro | |||
| #endif // MINDSPORE_MICRO_PRINT_UTILS_H_ | |||