You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

utils.py 11 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Define common utils."""
  16. import json
  17. import os
  18. import stat
  19. from importlib import import_module
  20. from importlib.util import find_spec
  21. from typing import List, Tuple, Mapping
  22. import numpy as np
  23. from mindspore.train.serialization import save_checkpoint
  24. from mindinsight.mindconverter.common.exceptions import ScriptGenerationError, ReportGenerationError, \
  25. UnknownModelError, CheckPointGenerationError, WeightMapGenerationError
  26. from mindinsight.mindconverter.common.log import logger as log
  27. from mindinsight.mindconverter.graph_based_converter.constant import SEPARATOR_IN_ONNX_OP, BINARY_HEADER_PYTORCH_BITS, \
  28. FrameworkType, BINARY_HEADER_PYTORCH_FILE, TENSORFLOW_MODEL_SUFFIX, THIRD_PART_VERSION
  29. def is_converted(operation: str):
  30. """
  31. Whether convert successful.
  32. Args:
  33. operation (str): Operation name.
  34. Returns:
  35. bool, true or false.
  36. """
  37. return operation and SEPARATOR_IN_ONNX_OP not in operation
  38. def _add_outputs_of_onnx_model(model, output_nodes: List[str]):
  39. """
  40. Add output nodes of onnx model.
  41. Args:
  42. model (ModelProto): ONNX model.
  43. output_nodes (list[str]): Output nodes list.
  44. Returns:
  45. ModelProto, edited ONNX model.
  46. """
  47. onnx = import_module("onnx")
  48. for opt_name in output_nodes:
  49. intermediate_layer_value_info = onnx.helper.ValueInfoProto()
  50. intermediate_layer_value_info.name = opt_name
  51. model.graph.output.append(intermediate_layer_value_info)
  52. return model
  53. def check_dependency_integrity(*packages):
  54. """Check dependency package integrity."""
  55. try:
  56. for pkg in packages:
  57. import_module(pkg)
  58. return True
  59. except ImportError:
  60. return False
  61. def build_feed_dict(onnx_model, input_nodes: dict):
  62. """Build feed dict for onnxruntime."""
  63. dtype_mapping = getattr(import_module("tf2onnx.utils"), "ONNX_TO_NUMPY_DTYPE")
  64. input_nodes_types = {
  65. node.name: dtype_mapping[node.type.tensor_type.elem_type]
  66. for node in onnx_model.graph.input
  67. }
  68. feed_dict = {
  69. name: np.random.rand(*shape).astype(input_nodes_types[name])
  70. for name, shape in input_nodes.items()
  71. }
  72. return feed_dict
  73. def fetch_output_from_onnx_model(model, feed_dict: dict, output_nodes: List[str]):
  74. """
  75. Fetch specific nodes output from onnx model.
  76. Notes:
  77. Only support to get output without batch dimension.
  78. Args:
  79. model (ModelProto): ONNX model.
  80. feed_dict (dict): Feed forward inputs.
  81. output_nodes (list[str]): Output nodes list.
  82. Returns:
  83. dict, nodes' output value.
  84. """
  85. if not isinstance(feed_dict, dict) or not isinstance(output_nodes, list):
  86. raise TypeError("`feed_dict` should be type of dict, and `output_nodes` "
  87. "should be type of List[str].")
  88. edit_model = _add_outputs_of_onnx_model(model, output_nodes)
  89. ort = import_module("onnxruntime")
  90. sess = ort.InferenceSession(path_or_bytes=bytes(edit_model.SerializeToString()))
  91. fetched_res = sess.run(output_names=output_nodes, input_feed=feed_dict)
  92. run_result = dict()
  93. for idx, opt in enumerate(output_nodes):
  94. run_result[opt] = fetched_res[idx]
  95. return run_result
  96. def save_code_file_and_report(model_name: str, code_lines: Mapping[str, Tuple],
  97. out_folder: str, report_folder: str):
  98. """
  99. Save code file and report.
  100. Args:
  101. model_name (str): Model name.
  102. code_lines (dict): Code lines.
  103. out_folder (str): Output folder.
  104. report_folder (str): Report output folder.
  105. """
  106. flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
  107. modes = stat.S_IRUSR | stat.S_IWUSR
  108. modes_usr = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
  109. out_folder = os.path.realpath(out_folder)
  110. if not report_folder:
  111. report_folder = out_folder
  112. else:
  113. report_folder = os.path.realpath(report_folder)
  114. if not os.path.exists(out_folder):
  115. os.makedirs(out_folder, modes_usr)
  116. if not os.path.exists(report_folder):
  117. os.makedirs(report_folder, modes_usr)
  118. for file_name in code_lines:
  119. code, report, trainable_weights, weight_map = code_lines[file_name]
  120. code_file_path = os.path.realpath(os.path.join(out_folder, f"{model_name}.py"))
  121. report_file_path = os.path.realpath(os.path.join(report_folder, f"report_of_{model_name}.txt"))
  122. try:
  123. if os.path.exists(code_file_path):
  124. raise ScriptGenerationError("Code file with the same name already exists.")
  125. with os.fdopen(os.open(code_file_path, flags, modes), 'w') as file:
  126. file.write(code)
  127. except (IOError, FileExistsError) as error:
  128. raise ScriptGenerationError(str(error))
  129. try:
  130. if os.path.exists(report_file_path):
  131. raise ReportGenerationError("Report file with the same name already exists.")
  132. with os.fdopen(os.open(report_file_path, flags, stat.S_IRUSR), "w") as rpt_f:
  133. rpt_f.write(report)
  134. except (IOError, FileExistsError) as error:
  135. raise ReportGenerationError(str(error))
  136. ckpt_file_path = os.path.realpath(os.path.join(out_folder, f"{model_name}.ckpt"))
  137. try:
  138. if os.path.exists(ckpt_file_path):
  139. raise CheckPointGenerationError("Checkpoint file with the same name already exists.")
  140. save_checkpoint(trainable_weights, ckpt_file_path)
  141. except TypeError as error:
  142. raise CheckPointGenerationError(str(error))
  143. weight_map_path = os.path.realpath(os.path.join(out_folder, f"weight_map_of_{model_name}.json"))
  144. try:
  145. if os.path.exists(weight_map_path):
  146. raise WeightMapGenerationError("Weight map file with the same name already exists.")
  147. with os.fdopen(os.open(weight_map_path, flags, stat.S_IRUSR), 'w') as map_f:
  148. weight_map_json = {f"{model_name}": weight_map}
  149. json.dump(weight_map_json, map_f)
  150. except (IOError, FileExistsError) as error:
  151. raise WeightMapGenerationError(str(error))
  152. def onnx_satisfied():
  153. """Validate ONNX , ONNXRUNTIME, ONNXOPTIMIZER installation."""
  154. if not find_spec("onnx") or not find_spec("onnxruntime") or not find_spec("onnxoptimizer"):
  155. return False
  156. return True
  157. def lib_version_satisfied(current_ver: str, mini_ver_limited: str,
  158. newest_ver_limited: str = ""):
  159. """
  160. Check python lib version whether is satisfied.
  161. Notes:
  162. Version number must be format of x.x.x, e.g. 1.1.0.
  163. Args:
  164. current_ver (str): Current lib version.
  165. mini_ver_limited (str): Mini lib version.
  166. newest_ver_limited (str): Newest lib version.
  167. Returns:
  168. bool, true or false.
  169. """
  170. required_version_number_len = 3
  171. if len(list(current_ver.split("."))) != required_version_number_len or \
  172. len(list(mini_ver_limited.split("."))) != required_version_number_len or \
  173. (newest_ver_limited and len(newest_ver_limited.split(".")) != required_version_number_len):
  174. raise ValueError("Version number must be format of x.x.x.")
  175. if current_ver < mini_ver_limited or (newest_ver_limited and current_ver > newest_ver_limited):
  176. return False
  177. return True
  178. def get_dict_key_by_value(val, dic):
  179. """
  180. Return the first appeared key of a dictionary by given value.
  181. Args:
  182. val (Any): Value of the key.
  183. dic (dict): Dictionary to be checked.
  184. Returns:
  185. Any, key of the given value.
  186. """
  187. for d_key, d_val in dic.items():
  188. if d_val == val:
  189. return d_key
  190. return None
  191. def convert_bytes_string_to_string(bytes_str):
  192. """
  193. Convert a byte string to string by utf-8.
  194. Args:
  195. bytes_str (bytes): A bytes string.
  196. Returns:
  197. str, a str with utf-8 encoding.
  198. """
  199. if isinstance(bytes_str, bytes):
  200. return bytes_str.decode('utf-8')
  201. return bytes_str
  202. def get_framework_type(model_path):
  203. """Get framework type."""
  204. if model_path.endswith('.onnx'):
  205. return FrameworkType.PYTORCH.value
  206. try:
  207. with open(model_path, 'rb') as f:
  208. if f.read(BINARY_HEADER_PYTORCH_BITS) == BINARY_HEADER_PYTORCH_FILE:
  209. framework_type = FrameworkType.PYTORCH.value
  210. elif os.path.basename(model_path).split(".")[-1].lower() == TENSORFLOW_MODEL_SUFFIX:
  211. framework_type = FrameworkType.TENSORFLOW.value
  212. else:
  213. framework_type = FrameworkType.UNKNOWN.value
  214. except IOError:
  215. error_msg = "Get UNSUPPORTED model."
  216. error = UnknownModelError(error_msg)
  217. log.error(str(error))
  218. raise error
  219. return framework_type
  220. def reset_init_or_construct(template, variable_slot, new_data, scope):
  221. """Reset init statement."""
  222. template[variable_slot][scope].clear()
  223. template[variable_slot][scope] += new_data
  224. return template
  225. def replace_string_in_list(str_list: list, original_str: str, target_str: str):
  226. """
  227. Replace a string in a list by provided string.
  228. Args:
  229. str_list (list): A list contains the string to be replaced.
  230. original_str (str): The string to be replaced.
  231. target_str (str): The replacement of string.
  232. Returns,
  233. list, the original list with replaced string.
  234. """
  235. return [s.replace(original_str, target_str) for s in str_list]
  236. def get_third_part_lib_validation_error_info(lib_list):
  237. """Get error info when not satisfying third part lib validation."""
  238. error_info = None
  239. link_str = ', '
  240. for idx, lib in enumerate(lib_list):
  241. if idx == len(lib_list) - 1:
  242. link_str = ' and '
  243. lib_version_required = THIRD_PART_VERSION[lib]
  244. if len(lib_version_required) == 2:
  245. lib_version_required_min = lib_version_required[0]
  246. lib_version_required_max = lib_version_required[1]
  247. if lib_version_required_min == lib_version_required_max:
  248. info = f"{lib}(=={lib_version_required_min})"
  249. else:
  250. info = f"{lib}(>={lib_version_required_min} and <{lib_version_required_max})"
  251. else:
  252. info = f"{lib}(>={lib_version_required[0]})"
  253. if not error_info:
  254. error_info = info
  255. else:
  256. error_info = link_str.join((error_info, info))
  257. return error_info