You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

coder_utils.cc 5.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /**
  2. * Copyright 2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "coder/utils/coder_utils.h"
  17. #include <set>
  18. #include <queue>
  19. #include <string>
  20. #include <memory>
  21. #include <fstream>
  22. #include "coder/log.h"
  23. #include "coder/utils/type_cast.h"
  24. #include "coder/allocator/allocator.h"
  25. namespace mindspore::lite::micro {
  26. template <typename T>
  27. void TensorDataToFile(const lite::Tensor *tensor, std::ofstream &ofs) {
  28. const int NUM = 45;
  29. T *data = reinterpret_cast<T *>(tensor->data_c());
  30. if (data == nullptr) {
  31. MS_LOG(ERROR) << "data is nullptr";
  32. return;
  33. }
  34. ofs << "{\n";
  35. if (typeid(T) == typeid(float)) {
  36. ofs.precision(kWeightPrecision);
  37. }
  38. int len = tensor->ElementsNum();
  39. for (int i = 0; i < len; ++i) {
  40. ofs << data[i] << ", ";
  41. if (i % NUM == NUM - 1) {
  42. ofs << "\n";
  43. }
  44. }
  45. ofs << "\n};\n\n";
  46. }
  47. void PrintTensorData(const lite::Tensor *tensor, std::ofstream &ofs) {
  48. TypeId type = tensor->data_type();
  49. switch (tensor->data_type()) {
  50. case kNumberTypeFloat:
  51. case kNumberTypeFloat32:
  52. TensorDataToFile<float>(tensor, ofs);
  53. break;
  54. case kNumberTypeInt8:
  55. TensorDataToFile<int8_t>(tensor, ofs);
  56. break;
  57. case kNumberTypeInt:
  58. case kNumberTypeInt32:
  59. TensorDataToFile<int32_t>(tensor, ofs);
  60. case kNumberTypeInt64:
  61. TensorDataToFile<int64_t>(tensor, ofs);
  62. break;
  63. case kNumberTypeUInt8:
  64. TensorDataToFile<uint8_t>(tensor, ofs);
  65. break;
  66. case kNumberTypeUInt32:
  67. TensorDataToFile<uint32_t>(tensor, ofs);
  68. break;
  69. default:
  70. MS_LOG(ERROR) << "unsupported data type: " << EnumNameDataType(type);
  71. break;
  72. }
  73. }
  74. template <typename T>
  75. std::string ArrayToString(const std::vector<T> &array) {
  76. std::string result = "{";
  77. std::for_each(array.begin(), array.end(), [&result](const T &t) { result += std::to_string(t) + ", "; });
  78. return result + "}";
  79. }
  80. std::string TensorsToString(const std::vector<Tensor *> &tensors, const std::string &is_input) {
  81. MemoryAllocator *allocator = MemoryAllocator::GetInstance();
  82. std::string info;
  83. for (const auto &tensor : tensors) {
  84. if (tensor->category() == Tensor::Category::CONST_TENSOR) {
  85. continue;
  86. }
  87. info += " {\n";
  88. info += " int dim[] = " + ArrayToString(tensor->shape()) + ";\n";
  89. info += " MicroTensor tensor = {";
  90. info += EnumMicroTensorDataType(tensor->data_type()) + ", ";
  91. info += EnumMicroTensorFormat(tensor->format()) + ", ";
  92. info += std::to_string(tensor->shape().size()) + ", dim, ";
  93. info += allocator->GetRuntimeAddr(tensor) + "};\n";
  94. info += " fprintf(output_file, \"" + is_input + " Tensor: " + allocator->GetRuntimeAddr(tensor) + "\\n\");\n";
  95. info += " PrintTensor(&tensor, output_file, \"" + is_input + "\");\n";
  96. info += " }\n";
  97. }
  98. return info;
  99. }
  100. std::vector<std::string> AddDumpDataInfo(const std::vector<std::string> &blocks,
  101. const std::vector<std::unique_ptr<OperatorCoder>> &opcoders) {
  102. std::vector<std::string> results;
  103. if (blocks.size() != opcoders.size()) {
  104. MS_LOG(ERROR) << "error, coder blocks size is not equal to opcoders size";
  105. return results;
  106. }
  107. size_t num = opcoders.size();
  108. for (size_t i = 0; i < num; ++i) {
  109. auto &opcoder = opcoders.at(i);
  110. std::string code = blocks.at(i);
  111. std::string name = opcoder->ID();
  112. code += " {\n";
  113. code += " FILE *output_file = fopen(\"./" + name + ".ir\", \"w\");\n";
  114. code += " fprintf(output_file, \"Node:" + name + "\\n\");\n";
  115. code += TensorsToString(opcoder->input_tensors(), "input");
  116. code += TensorsToString(opcoder->output_tensors(), "output");
  117. code += " fclose(output_file);\n";
  118. code += " }\n";
  119. results.emplace_back(code);
  120. }
  121. return results;
  122. }
  123. std::vector<std::string> SplitString(std::string str, const std::string &pattern) {
  124. std::vector<std::string> results;
  125. if (str.empty()) {
  126. MS_LOG(ERROR) << "source string is empty";
  127. return results;
  128. }
  129. str += pattern;
  130. while (!str.empty()) {
  131. size_t size = str.size();
  132. size_t pos = str.find(pattern);
  133. std::string sub_string = str.substr(0, pos);
  134. results.push_back(sub_string);
  135. str = str.substr(pos + 1, size);
  136. }
  137. return results;
  138. }
  139. std::set<OperatorCoder *> FindInferenceOpcoders(OperatorCoder *edge) {
  140. std::set<OperatorCoder *> subgraph;
  141. std::queue<OperatorCoder *> to_visit;
  142. to_visit.push(edge);
  143. while (!to_visit.empty()) {
  144. size_t size = to_visit.size();
  145. for (size_t i = 0; i < size; ++i) {
  146. OperatorCoder *curr = to_visit.front();
  147. to_visit.pop();
  148. if (subgraph.find(curr) != subgraph.end()) {
  149. continue;
  150. }
  151. subgraph.insert(curr);
  152. for (const auto &op : curr->input_ops()) {
  153. to_visit.push(op);
  154. }
  155. }
  156. }
  157. auto item = subgraph.find(edge);
  158. if (item == subgraph.end()) {
  159. MS_LOG(ERROR) << "failed to find the edge in the subgraph";
  160. return subgraph;
  161. }
  162. // erase edge operator coder from subgraph
  163. subgraph.erase(item);
  164. return subgraph;
  165. }
  166. } // namespace mindspore::lite::micro