You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_querier.py 24 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Test the querier module."""
  16. from unittest import TestCase, mock
  17. from google.protobuf.json_format import ParseDict
  18. import mindinsight.datavisual.proto_files.mindinsight_lineage_pb2 as summary_pb2
  19. from mindinsight.lineagemgr.common.exceptions.exceptions import (LineageParamTypeError, LineageQuerierParamException,
  20. LineageSummaryAnalyzeException,
  21. LineageSummaryParseException)
  22. from mindinsight.lineagemgr.querier.querier import Querier
  23. from mindinsight.lineagemgr.summary.lineage_summary_analyzer import LineageInfo
  24. from . import event_data
  25. def create_lineage_info(train_event_dict, eval_event_dict, dataset_event_dict):
  26. """
  27. Create parsed lineage info tuple.
  28. Args:
  29. train_event_dict (Union[dict, None]): The dict of train event.
  30. eval_event_dict (Union[dict, None]): The dict of evaluation event.
  31. dataset_event_dict (Union[dict, None]): The dict of dataset graph event.
  32. Returns:
  33. namedtuple, parsed lineage info.
  34. """
  35. if train_event_dict is not None:
  36. train_event = summary_pb2.LineageEvent()
  37. ParseDict(train_event_dict, train_event)
  38. else:
  39. train_event = None
  40. if eval_event_dict is not None:
  41. eval_event = summary_pb2.LineageEvent()
  42. ParseDict(eval_event_dict, eval_event)
  43. else:
  44. eval_event = None
  45. if dataset_event_dict is not None:
  46. dataset_event = summary_pb2.LineageEvent()
  47. ParseDict(dataset_event_dict, dataset_event)
  48. else:
  49. dataset_event = None
  50. lineage_info = LineageInfo(
  51. train_lineage=train_event,
  52. eval_lineage=eval_event,
  53. dataset_graph=dataset_event,
  54. )
  55. return lineage_info
  56. def create_filtration_result(summary_dir, train_event_dict,
  57. eval_event_dict, metric_dict, dataset_dict):
  58. """
  59. Create filteration result.
  60. Args:
  61. summary_dir (str): The summary dir.
  62. train_event_dict (dict): The dict of train event.
  63. eval_event_dict (dict): The dict of evaluation event.
  64. metric_dict (dict): The dict of metric.
  65. dataset_dict (dict): The dict of dataset graph.
  66. Returns:
  67. dict, the filteration result.
  68. """
  69. filtration_result = {
  70. "summary_dir": summary_dir,
  71. "model_lineage": {
  72. "loss_function": train_event_dict['train_lineage']['hyper_parameters']['loss_function'],
  73. "train_dataset_path": train_event_dict['train_lineage']['train_dataset']['train_dataset_path'],
  74. "train_dataset_count": train_event_dict['train_lineage']['train_dataset']['train_dataset_size'],
  75. "test_dataset_path": eval_event_dict['evaluation_lineage']['valid_dataset']['valid_dataset_path'],
  76. "test_dataset_count": eval_event_dict['evaluation_lineage']['valid_dataset']['valid_dataset_size'],
  77. "network": train_event_dict['train_lineage']['algorithm']['network'],
  78. "optimizer": train_event_dict['train_lineage']['hyper_parameters']['optimizer'],
  79. "learning_rate": train_event_dict['train_lineage']['hyper_parameters']['learning_rate'],
  80. "epoch": train_event_dict['train_lineage']['hyper_parameters']['epoch'],
  81. "batch_size": train_event_dict['train_lineage']['hyper_parameters']['batch_size'],
  82. "loss": train_event_dict['train_lineage']['algorithm']['loss'],
  83. "model_size": train_event_dict['train_lineage']['model']['size'],
  84. "metric": metric_dict,
  85. "dataset_mark": '2',
  86. "user_defined": {}
  87. },
  88. "dataset_graph": dataset_dict,
  89. }
  90. return filtration_result
  91. def get_lineage_infos():
  92. """
  93. Get tuples of lineage info, simulate the function of summary analyzer.
  94. Returns:
  95. list[namedtuple], tuples of lineage info.
  96. """
  97. train_events = [
  98. event_data.EVENT_TRAIN_DICT_0,
  99. event_data.EVENT_TRAIN_DICT_1,
  100. event_data.EVENT_TRAIN_DICT_2,
  101. event_data.EVENT_TRAIN_DICT_3,
  102. event_data.EVENT_TRAIN_DICT_4,
  103. event_data.EVENT_TRAIN_DICT_5,
  104. None
  105. ]
  106. eval_events = [
  107. event_data.EVENT_EVAL_DICT_0,
  108. event_data.EVENT_EVAL_DICT_1,
  109. event_data.EVENT_EVAL_DICT_2,
  110. event_data.EVENT_EVAL_DICT_3,
  111. event_data.EVENT_EVAL_DICT_4,
  112. None,
  113. event_data.EVENT_EVAL_DICT_5
  114. ]
  115. dataset_events = [
  116. event_data.EVENT_DATASET_DICT_0
  117. ]*7
  118. lineage_infos = list(
  119. map(
  120. lambda event: create_lineage_info(event[0], event[1], event[2]),
  121. zip(train_events, eval_events, dataset_events)
  122. )
  123. )
  124. return lineage_infos
  125. LINEAGE_INFO_0 = {
  126. 'summary_dir': '/path/to/summary0',
  127. **event_data.EVENT_TRAIN_DICT_0['train_lineage'],
  128. 'metric': event_data.METRIC_0,
  129. 'valid_dataset': event_data.EVENT_EVAL_DICT_0['evaluation_lineage']['valid_dataset'],
  130. 'dataset_graph': event_data.DATASET_DICT_0
  131. }
  132. LINEAGE_INFO_1 = {
  133. 'summary_dir': '/path/to/summary1',
  134. **event_data.EVENT_TRAIN_DICT_1['train_lineage'],
  135. 'metric': event_data.METRIC_1,
  136. 'valid_dataset': event_data.EVENT_EVAL_DICT_1['evaluation_lineage']['valid_dataset'],
  137. 'dataset_graph': event_data.DATASET_DICT_0
  138. }
  139. LINEAGE_FILTRATION_0 = create_filtration_result(
  140. '/path/to/summary0',
  141. event_data.EVENT_TRAIN_DICT_0,
  142. event_data.EVENT_EVAL_DICT_0,
  143. event_data.METRIC_0,
  144. event_data.DATASET_DICT_0
  145. )
  146. LINEAGE_FILTRATION_1 = create_filtration_result(
  147. '/path/to/summary1',
  148. event_data.EVENT_TRAIN_DICT_1,
  149. event_data.EVENT_EVAL_DICT_1,
  150. event_data.METRIC_1,
  151. event_data.DATASET_DICT_0
  152. )
  153. LINEAGE_FILTRATION_2 = create_filtration_result(
  154. '/path/to/summary2',
  155. event_data.EVENT_TRAIN_DICT_2,
  156. event_data.EVENT_EVAL_DICT_2,
  157. event_data.METRIC_2,
  158. event_data.DATASET_DICT_0
  159. )
  160. LINEAGE_FILTRATION_3 = create_filtration_result(
  161. '/path/to/summary3',
  162. event_data.EVENT_TRAIN_DICT_3,
  163. event_data.EVENT_EVAL_DICT_3,
  164. event_data.METRIC_3,
  165. event_data.DATASET_DICT_0
  166. )
  167. LINEAGE_FILTRATION_4 = create_filtration_result(
  168. '/path/to/summary4',
  169. event_data.EVENT_TRAIN_DICT_4,
  170. event_data.EVENT_EVAL_DICT_4,
  171. event_data.METRIC_4,
  172. event_data.DATASET_DICT_0
  173. )
  174. LINEAGE_FILTRATION_5 = {
  175. "summary_dir": '/path/to/summary5',
  176. "model_lineage": {
  177. "loss_function":
  178. event_data.EVENT_TRAIN_DICT_5['train_lineage']['hyper_parameters']['loss_function'],
  179. "train_dataset_path": None,
  180. "train_dataset_count":
  181. event_data.EVENT_TRAIN_DICT_5['train_lineage']['train_dataset']['train_dataset_size'],
  182. "test_dataset_path": None,
  183. "test_dataset_count": None,
  184. "network": event_data.EVENT_TRAIN_DICT_5['train_lineage']['algorithm']['network'],
  185. "optimizer": event_data.EVENT_TRAIN_DICT_5['train_lineage']['hyper_parameters']['optimizer'],
  186. "learning_rate":
  187. event_data.EVENT_TRAIN_DICT_5['train_lineage']['hyper_parameters']['learning_rate'],
  188. "epoch": event_data.EVENT_TRAIN_DICT_5['train_lineage']['hyper_parameters']['epoch'],
  189. "batch_size": event_data.EVENT_TRAIN_DICT_5['train_lineage']['hyper_parameters']['batch_size'],
  190. "loss": event_data.EVENT_TRAIN_DICT_5['train_lineage']['algorithm']['loss'],
  191. "model_size": event_data.EVENT_TRAIN_DICT_5['train_lineage']['model']['size'],
  192. "metric": {},
  193. "dataset_mark": '2',
  194. "user_defined": {}
  195. },
  196. "dataset_graph": event_data.DATASET_DICT_0
  197. }
  198. LINEAGE_FILTRATION_6 = {
  199. "summary_dir": '/path/to/summary6',
  200. "model_lineage": {
  201. "loss_function": None,
  202. "train_dataset_path": None,
  203. "train_dataset_count": None,
  204. "test_dataset_path":
  205. event_data.EVENT_EVAL_DICT_5['evaluation_lineage']['valid_dataset']['valid_dataset_path'],
  206. "test_dataset_count":
  207. event_data.EVENT_EVAL_DICT_5['evaluation_lineage']['valid_dataset']['valid_dataset_size'],
  208. "network": None,
  209. "optimizer": None,
  210. "learning_rate": None,
  211. "epoch": None,
  212. "batch_size": None,
  213. "loss": None,
  214. "model_size": None,
  215. "metric": event_data.METRIC_5,
  216. "dataset_mark": '2',
  217. "user_defined": {}
  218. },
  219. "dataset_graph": event_data.DATASET_DICT_0
  220. }
  221. class TestQuerier(TestCase):
  222. """Test the class of `Querier`."""
  223. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_user_defined_info')
  224. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_summary_infos')
  225. def setUp(self, *args):
  226. """Initialization before test case execution."""
  227. args[0].return_value = create_lineage_info(
  228. event_data.EVENT_TRAIN_DICT_0,
  229. event_data.EVENT_EVAL_DICT_0,
  230. event_data.EVENT_DATASET_DICT_0
  231. )
  232. args[1].return_value = []
  233. single_summary_path = '/path/to/summary0/log0'
  234. self.single_querier = Querier(single_summary_path)
  235. lineage_infos = get_lineage_infos()
  236. args[0].side_effect = lineage_infos
  237. summary_paths = [
  238. '/path/to/summary0/log0',
  239. '/path/to/summary1/log1',
  240. '/path/to/summary2/log2',
  241. '/path/to/summary3/log3',
  242. '/path/to/summary4/log4',
  243. '/path/to/summary5/log5',
  244. '/path/to/summary6/log6'
  245. ]
  246. self.multi_querier = Querier(summary_paths)
  247. def test_get_summary_lineage_success_1(self):
  248. """Test the success of get_summary_lineage."""
  249. expected_result = [LINEAGE_INFO_0]
  250. result = self.single_querier.get_summary_lineage()
  251. self.assertListEqual(expected_result, result)
  252. def test_get_summary_lineage_success_2(self):
  253. """Test the success of get_summary_lineage."""
  254. expected_result = [LINEAGE_INFO_0]
  255. result = self.single_querier.get_summary_lineage(
  256. summary_dir='/path/to/summary0'
  257. )
  258. self.assertListEqual(expected_result, result)
  259. def test_get_summary_lineage_success_3(self):
  260. """Test the success of get_summary_lineage."""
  261. expected_result = [
  262. {
  263. 'summary_dir': '/path/to/summary0',
  264. 'model': event_data.EVENT_TRAIN_DICT_0['train_lineage']['model'],
  265. 'algorithm': event_data.EVENT_TRAIN_DICT_0['train_lineage']['algorithm']
  266. }
  267. ]
  268. result = self.single_querier.get_summary_lineage(
  269. filter_keys=['model', 'algorithm']
  270. )
  271. self.assertListEqual(expected_result, result)
  272. def test_get_summary_lineage_success_4(self):
  273. """Test the success of get_summary_lineage."""
  274. expected_result = [
  275. LINEAGE_INFO_0,
  276. LINEAGE_INFO_1,
  277. {
  278. 'summary_dir': '/path/to/summary2',
  279. **event_data.EVENT_TRAIN_DICT_2['train_lineage'],
  280. 'metric': event_data.METRIC_2,
  281. 'valid_dataset': event_data.EVENT_EVAL_DICT_2['evaluation_lineage']['valid_dataset'],
  282. 'dataset_graph': event_data.DATASET_DICT_0
  283. },
  284. {
  285. 'summary_dir': '/path/to/summary3',
  286. **event_data.EVENT_TRAIN_DICT_3['train_lineage'],
  287. 'metric': event_data.METRIC_3,
  288. 'valid_dataset': event_data.EVENT_EVAL_DICT_3['evaluation_lineage']['valid_dataset'],
  289. 'dataset_graph': event_data.DATASET_DICT_0
  290. },
  291. {
  292. 'summary_dir': '/path/to/summary4',
  293. **event_data.EVENT_TRAIN_DICT_4['train_lineage'],
  294. 'metric': event_data.METRIC_4,
  295. 'valid_dataset': event_data.EVENT_EVAL_DICT_4['evaluation_lineage']['valid_dataset'],
  296. 'dataset_graph': event_data.DATASET_DICT_0
  297. },
  298. {
  299. 'summary_dir': '/path/to/summary5',
  300. **event_data.EVENT_TRAIN_DICT_5['train_lineage'],
  301. 'metric': {},
  302. 'valid_dataset': {},
  303. 'dataset_graph': event_data.DATASET_DICT_0
  304. },
  305. {
  306. 'summary_dir': '/path/to/summary6',
  307. 'hyper_parameters': {},
  308. 'algorithm': {},
  309. 'model': {},
  310. 'train_dataset': {},
  311. 'metric': event_data.METRIC_5,
  312. 'valid_dataset': event_data.EVENT_EVAL_DICT_5['evaluation_lineage']['valid_dataset'],
  313. 'dataset_graph': event_data.DATASET_DICT_0
  314. }
  315. ]
  316. result = self.multi_querier.get_summary_lineage()
  317. self.assertListEqual(expected_result, result)
  318. def test_get_summary_lineage_success_5(self):
  319. """Test the success of get_summary_lineage."""
  320. expected_result = [LINEAGE_INFO_1]
  321. result = self.multi_querier.get_summary_lineage(
  322. summary_dir='/path/to/summary1'
  323. )
  324. self.assertListEqual(expected_result, result)
  325. def test_get_summary_lineage_success_6(self):
  326. """Test the success of get_summary_lineage."""
  327. expected_result = [
  328. {
  329. 'summary_dir': '/path/to/summary0',
  330. 'hyper_parameters': event_data.EVENT_TRAIN_DICT_0['train_lineage']['hyper_parameters'],
  331. 'train_dataset': event_data.EVENT_TRAIN_DICT_0['train_lineage']['train_dataset'],
  332. 'metric': event_data.METRIC_0,
  333. 'valid_dataset': event_data.EVENT_EVAL_DICT_0['evaluation_lineage']['valid_dataset']
  334. }
  335. ]
  336. filter_keys = [
  337. 'metric', 'hyper_parameters', 'train_dataset', 'valid_dataset'
  338. ]
  339. result = self.multi_querier.get_summary_lineage(
  340. summary_dir='/path/to/summary0', filter_keys=filter_keys
  341. )
  342. self.assertListEqual(expected_result, result)
  343. def test_get_summary_lineage_fail(self):
  344. """Test the function of get_summary_lineage with exception."""
  345. filter_keys = ['xxx']
  346. self.assertRaises(
  347. LineageQuerierParamException,
  348. self.multi_querier.get_summary_lineage,
  349. filter_keys=filter_keys
  350. )
  351. self.assertRaises(
  352. LineageQuerierParamException,
  353. self.multi_querier.get_summary_lineage,
  354. summary_dir='xxx'
  355. )
  356. def test_filter_summary_lineage_success_1(self):
  357. """Test the success of filter_summary_lineage."""
  358. condition = {
  359. 'optimizer': {
  360. 'in': [
  361. 'ApplyMomentum0',
  362. 'ApplyMomentum1',
  363. 'ApplyMomentum2',
  364. 'ApplyMomentum4'
  365. ]
  366. },
  367. 'learning_rate': {
  368. 'lt': 0.5,
  369. 'gt': 0.2
  370. },
  371. 'sorted_name': 'summary_dir'
  372. }
  373. expected_result = {
  374. 'customized': event_data.CUSTOMIZED_0,
  375. 'object': [
  376. LINEAGE_FILTRATION_1,
  377. LINEAGE_FILTRATION_2
  378. ],
  379. 'count': 2,
  380. }
  381. result = self.multi_querier.filter_summary_lineage(condition=condition)
  382. self.assertDictEqual(expected_result, result)
  383. def test_filter_summary_lineage_success_2(self):
  384. """Test the success of filter_summary_lineage."""
  385. condition = {
  386. 'batch_size': {
  387. 'le': 50,
  388. 'ge': 35
  389. },
  390. 'model_size': {
  391. 'lt': 400716934,
  392. 'gt': 400716931
  393. },
  394. 'sorted_name': 'batch_size',
  395. 'sorted_type': 'descending'
  396. }
  397. expected_result = {
  398. 'customized': event_data.CUSTOMIZED_0,
  399. 'object': [
  400. LINEAGE_FILTRATION_2,
  401. LINEAGE_FILTRATION_3
  402. ],
  403. 'count': 2,
  404. }
  405. result = self.multi_querier.filter_summary_lineage(condition=condition)
  406. self.assertDictEqual(expected_result, result)
  407. def test_filter_summary_lineage_success_3(self):
  408. """Test the success of filter_summary_lineage."""
  409. condition = {
  410. 'limit': 2,
  411. 'offset': 1
  412. }
  413. expected_result = {
  414. 'customized': event_data.CUSTOMIZED_0,
  415. 'object': [
  416. LINEAGE_FILTRATION_2,
  417. LINEAGE_FILTRATION_3
  418. ],
  419. 'count': 7,
  420. }
  421. result = self.multi_querier.filter_summary_lineage(condition=condition)
  422. self.assertDictEqual(expected_result, result)
  423. def test_filter_summary_lineage_success_4(self):
  424. """Test the success of filter_summary_lineage."""
  425. expected_result = {
  426. 'customized': event_data.CUSTOMIZED_0,
  427. 'object': [
  428. LINEAGE_FILTRATION_0,
  429. LINEAGE_FILTRATION_1,
  430. LINEAGE_FILTRATION_2,
  431. LINEAGE_FILTRATION_3,
  432. LINEAGE_FILTRATION_4,
  433. LINEAGE_FILTRATION_5,
  434. LINEAGE_FILTRATION_6
  435. ],
  436. 'count': 7,
  437. }
  438. result = self.multi_querier.filter_summary_lineage()
  439. self.assertDictEqual(expected_result, result)
  440. def test_filter_summary_lineage_success_5(self):
  441. """Test the success of filter_summary_lineage."""
  442. condition = {
  443. 'optimizer': {
  444. 'eq': 'ApplyMomentum4'
  445. }
  446. }
  447. expected_result = {
  448. 'customized': event_data.CUSTOMIZED_0,
  449. 'object': [LINEAGE_FILTRATION_4],
  450. 'count': 1,
  451. }
  452. result = self.multi_querier.filter_summary_lineage(condition=condition)
  453. self.assertDictEqual(expected_result, result)
  454. def test_filter_summary_lineage_success_6(self):
  455. """Test the success of filter_summary_lineage."""
  456. condition = {
  457. 'sorted_name': 'metric/accuracy',
  458. 'sorted_type': 'ascending'
  459. }
  460. expected_result = {
  461. 'customized': event_data.CUSTOMIZED_0,
  462. 'object': [
  463. LINEAGE_FILTRATION_0,
  464. LINEAGE_FILTRATION_5,
  465. LINEAGE_FILTRATION_1,
  466. LINEAGE_FILTRATION_2,
  467. LINEAGE_FILTRATION_3,
  468. LINEAGE_FILTRATION_4,
  469. LINEAGE_FILTRATION_6
  470. ],
  471. 'count': 7,
  472. }
  473. result = self.multi_querier.filter_summary_lineage(condition=condition)
  474. self.assertDictEqual(expected_result, result)
  475. def test_filter_summary_lineage_success_7(self):
  476. """Test the success of filter_summary_lineage."""
  477. condition = {
  478. 'sorted_name': 'metric/accuracy',
  479. 'sorted_type': 'descending'
  480. }
  481. expected_result = {
  482. 'customized': event_data.CUSTOMIZED_1,
  483. 'object': [
  484. LINEAGE_FILTRATION_6,
  485. LINEAGE_FILTRATION_4,
  486. LINEAGE_FILTRATION_3,
  487. LINEAGE_FILTRATION_2,
  488. LINEAGE_FILTRATION_1,
  489. LINEAGE_FILTRATION_0,
  490. LINEAGE_FILTRATION_5
  491. ],
  492. 'count': 7,
  493. }
  494. result = self.multi_querier.filter_summary_lineage(condition=condition)
  495. self.assertDictEqual(expected_result, result)
  496. def test_filter_summary_lineage_success_8(self):
  497. """Test the success of filter_summary_lineage."""
  498. condition = {
  499. 'metric/accuracy': {
  500. 'lt': 1.0000006,
  501. 'gt': 1.0000004
  502. }
  503. }
  504. expected_result = {
  505. 'customized': event_data.CUSTOMIZED_0,
  506. 'object': [LINEAGE_FILTRATION_4],
  507. 'count': 1,
  508. }
  509. result = self.multi_querier.filter_summary_lineage(condition=condition)
  510. self.assertDictEqual(expected_result, result)
  511. def test_filter_summary_lineage_success_9(self):
  512. """Test the success of filter_summary_lineage."""
  513. condition = {
  514. 'limit': 3,
  515. 'offset': 3
  516. }
  517. expected_result = {
  518. 'customized': {},
  519. 'object': [],
  520. 'count': 7,
  521. }
  522. result = self.multi_querier.filter_summary_lineage(condition=condition)
  523. self.assertDictEqual(expected_result, result)
  524. def test_filter_summary_lineage_fail(self):
  525. """Test the function of filter_summary_lineage with exception."""
  526. condition = {
  527. 'xxx': {
  528. 'lt': 1.0000006,
  529. 'gt': 1.0000004
  530. }
  531. }
  532. self.assertRaises(
  533. LineageQuerierParamException,
  534. self.multi_querier.filter_summary_lineage,
  535. condition=condition
  536. )
  537. condition = {
  538. 'accuracy': {
  539. 'xxx': 1
  540. }
  541. }
  542. self.assertRaises(
  543. LineageQuerierParamException,
  544. self.multi_querier.filter_summary_lineage,
  545. condition=condition
  546. )
  547. condition = {
  548. 'sorted_name': 'xxx'
  549. }
  550. self.assertRaises(
  551. LineageQuerierParamException,
  552. self.multi_querier.filter_summary_lineage,
  553. condition=condition
  554. )
  555. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_summary_infos')
  556. def test_init_fail(self, *args):
  557. """Test the function of init with exception."""
  558. summary_path = {'xxx': 1}
  559. with self.assertRaises(LineageParamTypeError):
  560. Querier(summary_path)
  561. summary_path = None
  562. with self.assertRaises(LineageQuerierParamException):
  563. Querier(summary_path)
  564. args[0].side_effect = LineageSummaryAnalyzeException
  565. summary_path = '/path/to/summary0/log0'
  566. with self.assertRaises(LineageSummaryParseException):
  567. Querier(summary_path)
  568. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_user_defined_info')
  569. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_summary_infos')
  570. def test_parse_fail_summary_logs_1(self, *args):
  571. """Test the function of parsing fail summary logs."""
  572. lineage_infos = get_lineage_infos()
  573. args[0].side_effect = lineage_infos
  574. args[1].return_value = []
  575. summary_path = ['/path/to/summary0/log0']
  576. querier = Querier(summary_path)
  577. querier._parse_failed_paths.append('/path/to/summary1/log1')
  578. expected_result = [
  579. LINEAGE_INFO_0,
  580. LINEAGE_INFO_1
  581. ]
  582. result = querier.get_summary_lineage()
  583. self.assertListEqual(expected_result, result)
  584. self.assertListEqual([], querier._parse_failed_paths)
  585. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_user_defined_info')
  586. @mock.patch('mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_summary_infos')
  587. def test_parse_fail_summary_logs_2(self, *args):
  588. """Test the function of parsing fail summary logs."""
  589. args[0].return_value = create_lineage_info(
  590. event_data.EVENT_TRAIN_DICT_0,
  591. event_data.EVENT_EVAL_DICT_0,
  592. event_data.EVENT_DATASET_DICT_0,
  593. )
  594. args[1].return_value = []
  595. summary_path = ['/path/to/summary0/log0']
  596. querier = Querier(summary_path)
  597. querier._parse_failed_paths.append('/path/to/summary1/log1')
  598. args[0].return_value = create_lineage_info(None, None, None)
  599. expected_result = [LINEAGE_INFO_0]
  600. result = querier.get_summary_lineage()
  601. self.assertListEqual(expected_result, result)
  602. self.assertListEqual(
  603. ['/path/to/summary1/log1'], querier._parse_failed_paths
  604. )

MindInsight为MindSpore提供了简单易用的调优调试能力。在训练过程中,可以将标量、张量、图像、计算图、模型超参、训练耗时等数据记录到文件中,通过MindInsight可视化页面进行查看及分析。