You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

aicpu_data_parser.py 7.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. The parser for AI CPU preprocess data.
  17. """
  18. import os
  19. import stat
  20. from mindspore.profiler.common.util import fwrite_format, get_file_join_name
  21. from mindspore import log as logger
  22. class DataPreProcessParser:
  23. """
  24. The Parser for AI CPU preprocess data.
  25. Args:
  26. input_path(str): The profiling job path.
  27. output_filename(str): The output data path and name.
  28. """
  29. _source_file_target_old = 'DATA_PREPROCESS.dev.AICPU.'
  30. _source_file_target = 'DATA_PREPROCESS.AICPU.'
  31. _dst_file_title = 'title:DATA_PREPROCESS AICPU'
  32. _dst_file_column_title = ['serial_number', 'node_type_name', 'total_time(ms)',
  33. 'dispatch_time(ms)', 'execution_time(ms)', 'run_start',
  34. 'run_end']
  35. _ms_unit = 1000
  36. def __init__(self, input_path, output_filename):
  37. self._input_path = input_path
  38. self._output_filename = output_filename
  39. self._source_file_name = self._get_source_file()
  40. self._ms_kernel_flag = 3
  41. self._other_kernel_flag = 6
  42. self._thread_flag = 7
  43. self._ms_kernel_run_end_index = 2
  44. self._other_kernel_run_end_index = 5
  45. self._result_list = []
  46. self._min_cycle_counter = float('inf')
  47. def _get_source_file(self):
  48. """Get log file name, which was created by ada service."""
  49. file_name = get_file_join_name(self._input_path, self._source_file_target)
  50. if not file_name:
  51. file_name = get_file_join_name(self._input_path, self._source_file_target_old)
  52. if not file_name:
  53. data_path = os.path.join(self._input_path, "data")
  54. file_name = get_file_join_name(data_path, self._source_file_target)
  55. if not file_name:
  56. file_name = get_file_join_name(data_path, self._source_file_target_old)
  57. return file_name
  58. def _get_kernel_result(self, number, node_list, thread_list):
  59. """Get the profiling data form different aicpu kernel"""
  60. try:
  61. if len(node_list) == self._ms_kernel_flag and len(thread_list) == self._thread_flag:
  62. node_type_name = node_list[0].split(':')[-1]
  63. run_end_index = self._ms_kernel_run_end_index
  64. elif len(node_list) == self._other_kernel_flag and len(thread_list) == self._thread_flag:
  65. node_type_name = node_list[0].split(':')[-1].split('/')[-1].split('-')[0]
  66. run_end_index = self._other_kernel_run_end_index
  67. else:
  68. logger.warning("the data format can't support 'node_list':%s", str(node_list))
  69. return None
  70. us_unit = 100 # Convert 10ns to 1us.
  71. run_start_counter = float(node_list[1].split(':')[-1].split(' ')[1]) / us_unit
  72. run_end_counter = float(node_list[run_end_index].split(':')[-1].split(' ')[1]) / us_unit
  73. run_start = node_list[1].split(':')[-1].split(' ')[0]
  74. run_end = node_list[run_end_index].split(':')[-1].split(' ')[0]
  75. exe_time = (float(run_end) - float(run_start)) / self._ms_unit
  76. total_time = float(thread_list[-1].split('=')[-1].split()[0]) / self._ms_unit
  77. dispatch_time = float(thread_list[-2].split('=')[-1].split()[0]) / self._ms_unit
  78. return [number, node_type_name, total_time, dispatch_time, exe_time,
  79. run_start_counter, run_end_counter]
  80. except IndexError as e:
  81. logger.error(e)
  82. return None
  83. def execute(self):
  84. """Execute the parser, get result data, and write it to the output file."""
  85. if not os.path.exists(self._source_file_name):
  86. logger.info("Did not find the aicpu profiling source file")
  87. return
  88. with open(self._source_file_name, 'rb') as ai_cpu_data:
  89. ai_cpu_str = str(ai_cpu_data.read().replace(b'\n\x00', b' ___ ')
  90. .replace(b'\x00', b' ___ '))[2:-1]
  91. ai_cpu_lines = ai_cpu_str.split(" ___ ")
  92. os.chmod(self._source_file_name, stat.S_IREAD | stat.S_IWRITE)
  93. result_list = list()
  94. ai_cpu_total_time_summary = 0
  95. # Node serial number.
  96. serial_number = 1
  97. for i in range(len(ai_cpu_lines) - 1):
  98. node_line = ai_cpu_lines[i]
  99. thread_line = ai_cpu_lines[i + 1]
  100. if "Node" in node_line and "Thread" in thread_line:
  101. # Get the node data from node_line
  102. node_list = node_line.split(',')
  103. thread_list = thread_line.split(',')
  104. result = self._get_kernel_result(serial_number, node_list, thread_list)
  105. if result is None:
  106. continue
  107. result_list.append(result)
  108. # Calculate the total time.
  109. total_time = result[2]
  110. ai_cpu_total_time_summary += total_time
  111. # Increase node serial number.
  112. serial_number += 1
  113. elif "Node" in node_line and "Thread" not in thread_line:
  114. node_type_name = node_line.split(',')[0].split(':')[-1]
  115. logger.warning("The node type:%s cannot find thread data", node_type_name)
  116. if result_list:
  117. ai_cpu_total_time = format(ai_cpu_total_time_summary, '.6f')
  118. result_list.append(["AI CPU Total Time(ms):", ai_cpu_total_time])
  119. fwrite_format(self._output_filename, " ".join(self._dst_file_column_title), is_start=True, is_print=True)
  120. fwrite_format(self._output_filename, result_list, is_print=True)
  121. # For timeline display.
  122. self._result_list = result_list
  123. def query_aicpu_data(self):
  124. """
  125. Get execution time of AI CPU operator.
  126. Returns:
  127. a dict, the metadata of AI CPU operator execution time.
  128. """
  129. stream_id = 0 # Default stream id for AI CPU.
  130. pid = 9000 # Default pid for AI CPU.
  131. total_time = 0
  132. min_cycle_counter = float('inf')
  133. aicpu_info = []
  134. op_count_list = []
  135. for aicpu_item in self._result_list:
  136. if "AI CPU Total Time(ms):" in aicpu_item:
  137. total_time = aicpu_item[-1]
  138. continue
  139. op_name = aicpu_item[1]
  140. start_time = float(aicpu_item[5]) / self._ms_unit
  141. min_cycle_counter = min(min_cycle_counter, start_time)
  142. duration = aicpu_item[4]
  143. aicpu_info.append([op_name, stream_id, start_time, duration, pid])
  144. # Record the number of operator types.
  145. if op_name not in op_count_list:
  146. op_count_list.append(op_name)
  147. self._min_cycle_counter = min_cycle_counter
  148. aicpu_dict = {
  149. 'info': aicpu_info,
  150. 'total_time': float(total_time),
  151. 'op_exe_times': len(aicpu_info),
  152. 'num_of_ops': len(op_count_list),
  153. 'num_of_streams': 1
  154. }
  155. return aicpu_dict
  156. @property
  157. def min_cycle_counter(self):
  158. """Get minimum cycle counter in AI CPU."""
  159. return self._min_cycle_counter