You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_minddata_analyzer.py 14 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """
  16. Test MindData Profiling Analyzer Support
  17. """
  18. import csv
  19. import json
  20. import os
  21. import numpy as np
  22. import mindspore.common.dtype as mstype
  23. import mindspore.dataset as ds
  24. import mindspore.dataset.transforms.c_transforms as C
  25. import mindspore._c_dataengine as cde
  26. from mindspore.profiler.parser.minddata_analyzer import MinddataProfilingAnalyzer
  27. # add file name to rank id mapping to avoid file writing crash
  28. file_name_map_rank_id = {"test_analyze_basic": "0",
  29. "test_analyze_sequential_pipelines_invalid": "1"}
  30. class TestMinddataProfilingAnalyzer:
  31. """
  32. Test the MinddataProfilingAnalyzer class
  33. """
  34. def setup_class(self):
  35. """
  36. Run once for the class
  37. """
  38. # Get instance pointer for MindData profiling manager
  39. self.md_profiler = cde.GlobalContext.profiling_manager()
  40. self._PIPELINE_FILE = "./pipeline_profiling"
  41. self._CPU_UTIL_FILE = "./minddata_cpu_utilization"
  42. self._DATASET_ITERATOR_FILE = "./dataset_iterator_profiling"
  43. self._SUMMARY_JSON_FILE = "./minddata_pipeline_summary"
  44. self._SUMMARY_CSV_FILE = "./minddata_pipeline_summary"
  45. self._ANALYZE_FILE_PATH = "./"
  46. # This is the set of keys for success case
  47. self._EXPECTED_SUMMARY_KEYS_SUCCESS = \
  48. ['avg_cpu_pct', 'avg_cpu_pct_per_worker', 'children_ids', 'num_workers', 'op_ids', 'op_names',
  49. 'parent_id', 'per_batch_time', 'per_pipeline_time', 'per_push_queue_time', 'pipeline_ops',
  50. 'queue_average_size', 'queue_empty_freq_pct', 'queue_utilization_pct']
  51. def setup_method(self):
  52. """
  53. Run before each test function.
  54. """
  55. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  56. file_id = file_name_map_rank_id[file_name]
  57. pipeline_file = self._PIPELINE_FILE + "_" + file_id + ".json"
  58. cpu_util_file = self._CPU_UTIL_FILE + "_" + file_id + ".json"
  59. dataset_iterator_file = self._DATASET_ITERATOR_FILE + "_" + file_id + ".txt"
  60. summary_json_file = self._SUMMARY_JSON_FILE + "_" + file_id + ".json"
  61. summary_csv_file = self._SUMMARY_CSV_FILE + "_" + file_id + ".csv"
  62. # Confirm MindData Profiling files do not yet exist
  63. assert os.path.exists(pipeline_file) is False
  64. assert os.path.exists(cpu_util_file) is False
  65. assert os.path.exists(dataset_iterator_file) is False
  66. # Confirm MindData Profiling analyze summary files do not yet exist
  67. assert os.path.exists(summary_json_file) is False
  68. assert os.path.exists(summary_csv_file) is False
  69. # Set the MindData Profiling related environment variables
  70. os.environ['RANK_ID'] = file_id
  71. os.environ['DEVICE_ID'] = file_id
  72. # Initialize MindData profiling manager
  73. self.md_profiler.init()
  74. # Start MindData Profiling
  75. self.md_profiler.start()
  76. def teardown_method(self):
  77. """
  78. Run after each test function.
  79. """
  80. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  81. file_id = file_name_map_rank_id[file_name]
  82. pipeline_file = self._PIPELINE_FILE + "_" + file_id + ".json"
  83. cpu_util_file = self._CPU_UTIL_FILE + "_" + file_id + ".json"
  84. dataset_iterator_file = self._DATASET_ITERATOR_FILE + "_" + file_id + ".txt"
  85. summary_json_file = self._SUMMARY_JSON_FILE + "_" + file_id + ".json"
  86. summary_csv_file = self._SUMMARY_CSV_FILE + "_" + file_id + ".csv"
  87. # Delete MindData profiling files generated from the test.
  88. os.remove(pipeline_file)
  89. os.remove(cpu_util_file)
  90. os.remove(dataset_iterator_file)
  91. # Delete MindData profiling analyze summary files generated from the test.
  92. os.remove(summary_json_file)
  93. os.remove(summary_csv_file)
  94. # Disable MindData Profiling related environment variables
  95. del os.environ['RANK_ID']
  96. del os.environ['DEVICE_ID']
  97. def get_csv_result(self, file_pathname):
  98. """
  99. Get result from the CSV file.
  100. Args:
  101. file_pathname (str): The CSV file pathname.
  102. Returns:
  103. list[list], the parsed CSV information.
  104. """
  105. result = []
  106. with open(file_pathname, 'r') as csvfile:
  107. csv_reader = csv.reader(csvfile)
  108. for row in csv_reader:
  109. result.append(row)
  110. return result
  111. def verify_md_summary(self, md_summary_dict, EXPECTED_SUMMARY_KEYS):
  112. """
  113. Verify the content of the 3 variations of the MindData Profiling analyze summary output.
  114. """
  115. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  116. file_id = file_name_map_rank_id[file_name]
  117. summary_json_file = self._SUMMARY_JSON_FILE + "_" + file_id + ".json"
  118. summary_csv_file = self._SUMMARY_CSV_FILE + "_" + file_id + ".csv"
  119. # Confirm MindData Profiling analyze summary files are created
  120. assert os.path.exists(summary_json_file) is True
  121. assert os.path.exists(summary_csv_file) is True
  122. # Build a list of the sorted returned keys
  123. summary_returned_keys = list(md_summary_dict.keys())
  124. summary_returned_keys.sort()
  125. # 1. Confirm expected keys are in returned keys
  126. for k in EXPECTED_SUMMARY_KEYS:
  127. assert k in summary_returned_keys
  128. # Read summary JSON file
  129. with open(summary_json_file) as f:
  130. summary_json_data = json.load(f)
  131. # Build a list of the sorted JSON keys
  132. summary_json_keys = list(summary_json_data.keys())
  133. summary_json_keys.sort()
  134. # 2a. Confirm expected keys are in JSON file keys
  135. for k in EXPECTED_SUMMARY_KEYS:
  136. assert k in summary_json_keys
  137. # 2b. Confirm returned dictionary keys are identical to JSON file keys
  138. np.testing.assert_array_equal(summary_returned_keys, summary_json_keys)
  139. # Read summary CSV file
  140. summary_csv_data = self.get_csv_result(summary_csv_file)
  141. # Build a list of the sorted CSV keys from the first column in the CSV file
  142. summary_csv_keys = []
  143. for x in summary_csv_data:
  144. summary_csv_keys.append(x[0])
  145. summary_csv_keys.sort()
  146. # 3a. Confirm expected keys are in the first column of the CSV file
  147. for k in EXPECTED_SUMMARY_KEYS:
  148. assert k in summary_csv_keys
  149. # 3b. Confirm returned dictionary keys are identical to CSV file first column keys
  150. np.testing.assert_array_equal(summary_returned_keys, summary_csv_keys)
  151. def mysource(self):
  152. """Source for data values"""
  153. for i in range(8000):
  154. yield (np.array([i]),)
  155. def test_analyze_basic(self):
  156. """
  157. Test MindData profiling analyze summary files exist with basic pipeline.
  158. Also test basic content (subset of keys and values) from the returned summary result.
  159. """
  160. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  161. file_id = file_name_map_rank_id[file_name]
  162. pipeline_file = self._PIPELINE_FILE + "_" + file_id + ".json"
  163. cpu_util_file = self._CPU_UTIL_FILE + "_" + file_id + ".json"
  164. dataset_iterator_file = self._DATASET_ITERATOR_FILE + "_" + file_id + ".txt"
  165. # Create this basic and common linear pipeline
  166. # Generator -> Map -> Batch -> Repeat -> EpochCtrl
  167. data1 = ds.GeneratorDataset(self.mysource, ["col1"])
  168. type_cast_op = C.TypeCast(mstype.int32)
  169. data1 = data1.map(operations=type_cast_op, input_columns="col1")
  170. data1 = data1.batch(16)
  171. data1 = data1.repeat(2)
  172. num_iter = 0
  173. # Note: If create_tuple_iterator() is called with num_epochs>1, then EpochCtrlOp is added to the pipeline
  174. for _ in data1.create_dict_iterator(num_epochs=2):
  175. num_iter = num_iter + 1
  176. # Confirm number of rows returned
  177. assert num_iter == 1000
  178. # Stop MindData Profiling and save output files to current working directory
  179. self.md_profiler.stop()
  180. self.md_profiler.save(os.getcwd())
  181. # Confirm MindData Profiling files are created
  182. assert os.path.exists(pipeline_file) is True
  183. assert os.path.exists(cpu_util_file) is True
  184. assert os.path.exists(dataset_iterator_file) is True
  185. # Call MindData Analyzer for generated MindData profiling files to generate MindData pipeline summary result
  186. md_analyzer = MinddataProfilingAnalyzer(self._ANALYZE_FILE_PATH, file_id, self._ANALYZE_FILE_PATH)
  187. md_summary_dict = md_analyzer.analyze()
  188. # Verify MindData Profiling Analyze Summary output
  189. # Note: MindData Analyzer returns the result in 3 formats:
  190. # 1. returned dictionary
  191. # 2. JSON file
  192. # 3. CSV file
  193. self.verify_md_summary(md_summary_dict, self._EXPECTED_SUMMARY_KEYS_SUCCESS)
  194. # 4. Verify non-variant values or number of values in the tested pipeline for certain keys
  195. # of the returned dictionary
  196. # Note: Values of num_workers are not tested since default may change in the future
  197. # Note: Values related to queue metrics are not tested since they may vary on different execution environments
  198. assert md_summary_dict["pipeline_ops"] == ["EpochCtrl(id=0)", "Repeat(id=1)", "Batch(id=2)", "Map(id=3)",
  199. "Generator(id=4)"]
  200. assert md_summary_dict["op_names"] == ["EpochCtrl", "Repeat", "Batch", "Map", "Generator"]
  201. assert md_summary_dict["op_ids"] == [0, 1, 2, 3, 4]
  202. assert len(md_summary_dict["num_workers"]) == 5
  203. assert len(md_summary_dict["queue_average_size"]) == 5
  204. assert len(md_summary_dict["queue_utilization_pct"]) == 5
  205. assert len(md_summary_dict["queue_empty_freq_pct"]) == 5
  206. assert md_summary_dict["children_ids"] == [[1], [2], [3], [4], []]
  207. assert md_summary_dict["parent_id"] == [-1, 0, 1, 2, 3]
  208. assert len(md_summary_dict["avg_cpu_pct"]) == 5
  209. def test_analyze_sequential_pipelines_invalid(self):
  210. """
  211. Test invalid scenario in which MinddataProfilingAnalyzer is called for two sequential pipelines.
  212. """
  213. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  214. file_id = file_name_map_rank_id[file_name]
  215. pipeline_file = self._PIPELINE_FILE + "_" + file_id + ".json"
  216. cpu_util_file = self._CPU_UTIL_FILE + "_" + file_id + ".json"
  217. dataset_iterator_file = self._DATASET_ITERATOR_FILE + "_" + file_id + ".txt"
  218. # Create the pipeline
  219. # Generator -> Map -> Batch -> EpochCtrl
  220. data1 = ds.GeneratorDataset(self.mysource, ["col1"])
  221. type_cast_op = C.TypeCast(mstype.int32)
  222. data1 = data1.map(operations=type_cast_op, input_columns="col1")
  223. data1 = data1.batch(64)
  224. # Phase 1 - For the pipeline, call create_tuple_iterator with num_epochs>1
  225. # Note: This pipeline has 4 ops: Generator -> Map -> Batch -> EpochCtrl
  226. num_iter = 0
  227. # Note: If create_tuple_iterator() is called with num_epochs>1, then EpochCtrlOp is added to the pipeline
  228. for _ in data1.create_dict_iterator(num_epochs=2):
  229. num_iter = num_iter + 1
  230. # Confirm number of rows returned
  231. assert num_iter == 125
  232. # Stop MindData Profiling and save output files to current working directory
  233. self.md_profiler.stop()
  234. self.md_profiler.save(os.getcwd())
  235. # Confirm MindData Profiling files are created
  236. assert os.path.exists(pipeline_file) is True
  237. assert os.path.exists(cpu_util_file) is True
  238. assert os.path.exists(dataset_iterator_file) is True
  239. # Phase 2 - For the pipeline, call create_tuple_iterator with num_epochs=1
  240. # Note: This pipeline has 3 ops: Generator -> Map -> Batch
  241. # Initialize and Start MindData profiling manager
  242. self.md_profiler.init()
  243. self.md_profiler.start()
  244. num_iter = 0
  245. # Note: If create_tuple_iterator() is called with num_epochs=1, then EpochCtrlOp is NOT added to the pipeline
  246. for _ in data1.create_dict_iterator(num_epochs=1):
  247. num_iter = num_iter + 1
  248. # Confirm number of rows returned
  249. assert num_iter == 125
  250. # Stop MindData Profiling and save output files to current working directory
  251. self.md_profiler.stop()
  252. self.md_profiler.save(os.getcwd())
  253. # Confirm MindData Profiling files are created
  254. # Note: There is an MD bug in which which the pipeline file is not recreated;
  255. # it still has 4 ops instead of 3 ops
  256. assert os.path.exists(pipeline_file) is True
  257. assert os.path.exists(cpu_util_file) is True
  258. assert os.path.exists(dataset_iterator_file) is True
  259. # Call MindData Analyzer for generated MindData profiling files to generate MindData pipeline summary result
  260. md_analyzer = MinddataProfilingAnalyzer(self._ANALYZE_FILE_PATH, file_id, self._ANALYZE_FILE_PATH)
  261. md_summary_dict = md_analyzer.analyze()
  262. # Verify MindData Profiling Analyze Summary output
  263. self.verify_md_summary(md_summary_dict, self._EXPECTED_SUMMARY_KEYS_SUCCESS)
  264. # Confirm pipeline data contains info for 3 ops
  265. assert md_summary_dict["pipeline_ops"] == ["Batch(id=0)", "Map(id=1)", "Generator(id=2)"]
  266. # Verify CPU util data contains info for 3 ops
  267. assert len(md_summary_dict["avg_cpu_pct"]) == 3