diff --git a/mindinsight/datavisual/data_transform/data_manager.py b/mindinsight/datavisual/data_transform/data_manager.py index 96ca5e89..12baa323 100644 --- a/mindinsight/datavisual/data_transform/data_manager.py +++ b/mindinsight/datavisual/data_transform/data_manager.py @@ -862,11 +862,10 @@ class DataManager: """ logger.info("Start to load data") DataManager.check_reload_interval(reload_interval) - thread = threading.Thread(target=self._load_data_in_thread_wrapper, + thread = threading.Thread(target=self._load_data_in_thread, name='start_load_data_thread', args=(reload_interval,), daemon=True) - thread.daemon = True thread.start() return thread @@ -884,7 +883,7 @@ class DataManager: if reload_interval < 0: raise ParamValueError("The value of reload interval should be >= 0.") - def _load_data_in_thread_wrapper(self, reload_interval): + def _load_data_in_thread(self, reload_interval): """Wrapper for load data in thread.""" if self._load_data_lock.locked(): return @@ -911,8 +910,13 @@ class DataManager: max_processes_cnt=settings.MAX_PROCESSES_COUNT) as computing_resource_mgr: with computing_resource_mgr.get_executor() as executor: self._brief_cache.update_cache(executor) + brief_cache_update = time.time() for _ in self._detail_cache.update_cache(executor): - self._brief_cache.update_cache(executor) + update_interval = time.time() - brief_cache_update + logger.debug('Loading one round of detail cache taking %ss.', update_interval) + if update_interval > 3: # Use 3 seconds as threshold to avoid updating too often + self._brief_cache.update_cache(executor) + brief_cache_update += update_interval executor.wait_all_tasks_finish() with self._status_mutex: if not self._brief_cache.has_content() and not self._detail_cache.has_content():