You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

_checkpoint.py 20 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Checkpoint related classes and functions."""
  16. import os
  17. import stat
  18. import time
  19. import threading
  20. import mindspore.context as context
  21. from mindspore import log as logger
  22. from mindspore import nn
  23. from mindspore._checkparam import Validator
  24. from mindspore.train._utils import _make_directory
  25. from mindspore.train.serialization import save_checkpoint, _save_graph
  26. from mindspore.parallel._ps_context import _is_role_pserver, _get_ps_mode_rank
  27. from mindspore.parallel._cell_wrapper import destroy_allgather_cell
  28. from ._callback import Callback, set_cur_net
  29. from ...common.tensor import Tensor
  30. _cur_dir = os.getcwd()
  31. _save_dir = _cur_dir
  32. def _chg_ckpt_file_name_if_same_exist(directory, prefix):
  33. """Check if there is a file with the same name."""
  34. files = os.listdir(directory)
  35. suffix_num = 0
  36. pre_len = len(prefix)
  37. for filename in files:
  38. name_ext = os.path.splitext(filename)
  39. if name_ext[-1] != ".ckpt":
  40. continue
  41. # find same prefix file
  42. if filename.find(prefix) == 0 and not filename[pre_len].isalpha():
  43. # add the max suffix + 1
  44. index = filename[pre_len:].find("-")
  45. if index == 0:
  46. suffix_num = max(suffix_num, 1)
  47. elif index != -1:
  48. num = filename[pre_len+1:pre_len+index]
  49. if num.isdigit():
  50. suffix_num = max(suffix_num, int(num)+1)
  51. if suffix_num != 0:
  52. prefix = prefix + "_" + str(suffix_num)
  53. return prefix
  54. class CheckpointConfig:
  55. """
  56. The configuration of model checkpoint.
  57. Note:
  58. During the training process, if dataset is transmitted through the data channel,
  59. It is suggested to set 'save_checkpoint_steps' to an integer multiple of loop_size.
  60. Otherwise, the time to save the checkpoint may be biased.
  61. Args:
  62. save_checkpoint_steps (int): Steps to save checkpoint. Default: 1.
  63. save_checkpoint_seconds (int): Seconds to save checkpoint.
  64. Can't be used with save_checkpoint_steps at the same time. Default: 0.
  65. keep_checkpoint_max (int): Maximum number of checkpoint files can be saved. Default: 5.
  66. keep_checkpoint_per_n_minutes (int): Keep one checkpoint every n minutes.
  67. Can't be used with keep_checkpoint_max at the same time. Default: 0.
  68. integrated_save (bool): Whether to perform integrated save function in automatic model parallel scene.
  69. Integrated save function is only supported in automatic parallel scene, not supported
  70. in manual parallel. Default: True.
  71. async_save (bool): Whether asynchronous execution saves the checkpoint to a file. Default: False.
  72. saved_network (Cell): Network to be saved in checkpoint file. If the saved_network has no relation
  73. with the network in training, the initial value of saved_network will be saved. Default: None.
  74. enc_key (Union[None, bytes]): Byte type key used for encryption. If the value is None, the encryption
  75. is not required. Default: None.
  76. enc_mode (str): This parameter is valid only when enc_key is not set to None. Specifies the encryption
  77. mode, currently supports 'AES-GCM' and 'AES-CBC'. Default: 'AES-GCM'.
  78. Raises:
  79. ValueError: If the input_param is None or 0.
  80. Examples:
  81. >>> class LeNet5(nn.Cell):
  82. >>> def __init__(self, num_class=10, num_channel=1):
  83. >>> super(LeNet5, self).__init__()
  84. >>> self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')
  85. >>> self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
  86. >>> self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))
  87. >>> self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))
  88. >>> self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))
  89. >>> self.relu = nn.ReLU()
  90. >>> self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
  91. >>> self.flatten = nn.Flatten()
  92. >>>
  93. >>> def construct(self, x):
  94. >>> x = self.max_pool2d(self.relu(self.conv1(x)))
  95. >>> x = self.max_pool2d(self.relu(self.conv2(x)))
  96. >>> x = self.flatten(x)
  97. >>> x = self.relu(self.fc1(x))
  98. >>> x = self.relu(self.fc2(x))
  99. >>> x = self.fc3(x)
  100. >>> return x
  101. >>>
  102. >>> net = LeNet5()
  103. >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
  104. >>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9)
  105. >>> model = Model(net, loss_fn=loss, optimizer=optim)
  106. >>> data_path = './MNIST_Data'
  107. >>> dataset = create_dataset(data_path)
  108. >>> config = CheckpointConfig(saved_network=net)
  109. >>> ckpoint_cb = ModelCheckpoint(prefix='LeNet5', directory='./checkpoint', config=config)
  110. >>> model.train(10, dataset, callbacks=ckpoint_cb)
  111. """
  112. def __init__(self,
  113. save_checkpoint_steps=1,
  114. save_checkpoint_seconds=0,
  115. keep_checkpoint_max=5,
  116. keep_checkpoint_per_n_minutes=0,
  117. integrated_save=True,
  118. async_save=False,
  119. saved_network=None,
  120. enc_key=None,
  121. enc_mode='AES-GCM'):
  122. if save_checkpoint_steps is not None:
  123. save_checkpoint_steps = Validator.check_non_negative_int(save_checkpoint_steps)
  124. if save_checkpoint_seconds is not None:
  125. save_checkpoint_seconds = Validator.check_non_negative_int(save_checkpoint_seconds)
  126. if keep_checkpoint_max is not None:
  127. keep_checkpoint_max = Validator.check_non_negative_int(keep_checkpoint_max)
  128. if keep_checkpoint_per_n_minutes is not None:
  129. keep_checkpoint_per_n_minutes = Validator.check_non_negative_int(keep_checkpoint_per_n_minutes)
  130. if saved_network is not None and not isinstance(saved_network, nn.Cell):
  131. raise TypeError(f"The type of saved_network must be None or Cell, but got {str(type(saved_network))}.")
  132. if not save_checkpoint_steps and not save_checkpoint_seconds and \
  133. not keep_checkpoint_max and not keep_checkpoint_per_n_minutes:
  134. raise ValueError("The input_param can't be all None or 0")
  135. self._save_checkpoint_steps = save_checkpoint_steps
  136. self._save_checkpoint_seconds = save_checkpoint_seconds
  137. if self._save_checkpoint_steps and self._save_checkpoint_steps > 0:
  138. self._save_checkpoint_seconds = None
  139. self._keep_checkpoint_max = keep_checkpoint_max
  140. self._keep_checkpoint_per_n_minutes = keep_checkpoint_per_n_minutes
  141. if self._keep_checkpoint_max and self._keep_checkpoint_max > 0:
  142. self._keep_checkpoint_per_n_minutes = None
  143. else:
  144. if not self._keep_checkpoint_per_n_minutes or self._keep_checkpoint_per_n_minutes == 0:
  145. self._keep_checkpoint_max = 1
  146. self._integrated_save = Validator.check_bool(integrated_save)
  147. self._async_save = Validator.check_bool(async_save)
  148. self._saved_network = saved_network
  149. self._enc_key = Validator.check_isinstance('enc_key', enc_key, (type(None), bytes))
  150. self._enc_mode = Validator.check_isinstance('enc_mode', enc_mode, str)
  151. @property
  152. def save_checkpoint_steps(self):
  153. """Get the value of _save_checkpoint_steps."""
  154. return self._save_checkpoint_steps
  155. @property
  156. def save_checkpoint_seconds(self):
  157. """Get the value of _save_checkpoint_seconds."""
  158. return self._save_checkpoint_seconds
  159. @property
  160. def keep_checkpoint_max(self):
  161. """Get the value of _keep_checkpoint_max."""
  162. return self._keep_checkpoint_max
  163. @property
  164. def keep_checkpoint_per_n_minutes(self):
  165. """Get the value of _keep_checkpoint_per_n_minutes."""
  166. return self._keep_checkpoint_per_n_minutes
  167. @property
  168. def integrated_save(self):
  169. """Get the value of _integrated_save."""
  170. return self._integrated_save
  171. @property
  172. def async_save(self):
  173. """Get the value of _async_save."""
  174. return self._async_save
  175. @property
  176. def saved_network(self):
  177. """Get the value of _saved_network"""
  178. return self._saved_network
  179. @property
  180. def enc_key(self):
  181. """Get the value of _enc_key"""
  182. return self._enc_key
  183. @property
  184. def enc_mode(self):
  185. """Get the value of _enc_mode"""
  186. return self._enc_mode
  187. def get_checkpoint_policy(self):
  188. """Get the policy of checkpoint."""
  189. checkpoint_policy = {'save_checkpoint_steps': self.save_checkpoint_steps,
  190. 'save_checkpoint_seconds': self.save_checkpoint_seconds,
  191. 'keep_checkpoint_max': self.keep_checkpoint_max,
  192. 'keep_checkpoint_per_n_minutes': self.keep_checkpoint_per_n_minutes,
  193. 'saved_network': self.saved_network}
  194. return checkpoint_policy
  195. class ModelCheckpoint(Callback):
  196. """
  197. The checkpoint callback class.
  198. It is called to combine with train process and save the model and network parameters after training.
  199. Note:
  200. In the distributed training scenario, please specify different directories for each training process
  201. to save the checkpoint file. Otherwise, the training may fail.
  202. Args:
  203. prefix (str): The prefix name of checkpoint files. Default: "CKP".
  204. directory (str): The path of the folder which will be saved in the checkpoint file. Default: None.
  205. config (CheckpointConfig): Checkpoint strategy configuration. Default: None.
  206. Raises:
  207. ValueError: If the prefix is invalid.
  208. TypeError: If the config is not CheckpointConfig type.
  209. """
  210. def __init__(self, prefix='CKP', directory=None, config=None):
  211. super(ModelCheckpoint, self).__init__()
  212. self._latest_ckpt_file_name = ""
  213. self._init_time = time.time()
  214. self._last_time = time.time()
  215. self._last_time_for_keep = time.time()
  216. self._last_triggered_step = 0
  217. if not isinstance(prefix, str) or prefix.find('/') >= 0:
  218. raise ValueError("Prefix {} for checkpoint file name invalid, "
  219. "please check and correct it and then continue.".format(prefix))
  220. self._prefix = prefix
  221. if directory is not None:
  222. self._directory = _make_directory(directory)
  223. else:
  224. self._directory = _cur_dir
  225. if config is None:
  226. self._config = CheckpointConfig()
  227. else:
  228. if not isinstance(config, CheckpointConfig):
  229. raise TypeError("config should be CheckpointConfig type.")
  230. self._config = config
  231. # get existing checkpoint files
  232. self._manager = CheckpointManager()
  233. self._prefix = _chg_ckpt_file_name_if_same_exist(self._directory, self._prefix)
  234. self._graph_saved = False
  235. self._need_flush_from_cache = True
  236. def step_end(self, run_context):
  237. """
  238. Save the checkpoint at the end of step.
  239. Args:
  240. run_context (RunContext): Context of the train running.
  241. """
  242. if _is_role_pserver():
  243. self._prefix = "PServer_" + str(_get_ps_mode_rank()) + "_" + self._prefix
  244. cb_params = run_context.original_args()
  245. _make_directory(self._directory)
  246. # save graph (only once)
  247. if not self._graph_saved:
  248. graph_file_name = os.path.join(self._directory, self._prefix + '-graph.meta')
  249. if os.path.isfile(graph_file_name) and context.get_context("mode") == context.GRAPH_MODE:
  250. os.remove(graph_file_name)
  251. _save_graph(cb_params.train_network, graph_file_name)
  252. self._graph_saved = True
  253. thread_list = threading.enumerate()
  254. for thread in thread_list:
  255. if thread.getName() == "asyn_save_ckpt":
  256. thread.join()
  257. self._save_ckpt(cb_params)
  258. def end(self, run_context):
  259. """
  260. Save the last checkpoint after training finished.
  261. Args:
  262. run_context (RunContext): Context of the train running.
  263. """
  264. cb_params = run_context.original_args()
  265. _to_save_last_ckpt = True
  266. self._save_ckpt(cb_params, _to_save_last_ckpt)
  267. thread_list = threading.enumerate()
  268. for thread in thread_list:
  269. if thread.getName() == "asyn_save_ckpt":
  270. thread.join()
  271. destroy_allgather_cell()
  272. def _check_save_ckpt(self, cb_params, force_to_save):
  273. """Check whether save checkpoint files or not."""
  274. if self._config.save_checkpoint_steps and self._config.save_checkpoint_steps > 0:
  275. if cb_params.cur_step_num >= self._last_triggered_step + self._config.save_checkpoint_steps \
  276. or force_to_save is True:
  277. return True
  278. elif self._config.save_checkpoint_seconds and self._config.save_checkpoint_seconds > 0:
  279. self._cur_time = time.time()
  280. if (self._cur_time - self._last_time) > self._config.save_checkpoint_seconds or force_to_save is True:
  281. self._last_time = self._cur_time
  282. return True
  283. return False
  284. def _save_ckpt(self, cb_params, force_to_save=False):
  285. """Save checkpoint files."""
  286. if cb_params.cur_step_num == self._last_triggered_step:
  287. return
  288. # if param is cache enable, flush data from cache to host before save_ckpt
  289. if self._need_flush_from_cache:
  290. self._flush_from_cache(cb_params)
  291. save_ckpt = self._check_save_ckpt(cb_params, force_to_save)
  292. step_num_in_epoch = int((cb_params.cur_step_num - 1) % cb_params.batch_num + 1)
  293. if save_ckpt:
  294. cur_ckpoint_file = self._prefix + "-" + str(cb_params.cur_epoch_num) + "_" \
  295. + str(step_num_in_epoch) + ".ckpt"
  296. # update checkpoint file list.
  297. self._manager.update_ckpoint_filelist(self._directory, self._prefix)
  298. # keep checkpoint files number equal max number.
  299. if self._config.keep_checkpoint_max and 0 < self._config.keep_checkpoint_max <= self._manager.ckpoint_num:
  300. self._manager.remove_oldest_ckpoint_file()
  301. elif self._config.keep_checkpoint_per_n_minutes and self._config.keep_checkpoint_per_n_minutes > 0:
  302. self._cur_time_for_keep = time.time()
  303. if (self._cur_time_for_keep - self._last_time_for_keep) \
  304. < self._config.keep_checkpoint_per_n_minutes * 60:
  305. self._manager.keep_one_ckpoint_per_minutes(self._config.keep_checkpoint_per_n_minutes,
  306. self._cur_time_for_keep)
  307. # generate the new checkpoint file and rename it.
  308. global _save_dir
  309. _save_dir = self._directory
  310. cur_file = os.path.join(self._directory, cur_ckpoint_file)
  311. self._last_time_for_keep = time.time()
  312. self._last_triggered_step = cb_params.cur_step_num
  313. if context.get_context("enable_ge"):
  314. set_cur_net(cb_params.train_network)
  315. cb_params.train_network.exec_checkpoint_graph()
  316. network = self._config.saved_network if self._config.saved_network is not None else cb_params.train_network
  317. save_checkpoint(network, cur_file, self._config.integrated_save,
  318. self._config.async_save, self._config.enc_key, self._config.enc_mode)
  319. self._latest_ckpt_file_name = cur_file
  320. def _flush_from_cache(self, cb_params):
  321. """Flush cache data to host if tensor is cache enable."""
  322. has_cache_params = False
  323. params = cb_params.train_network.get_parameters()
  324. for param in params:
  325. if param.cache_enable:
  326. has_cache_params = True
  327. Tensor(param).flush_from_cache()
  328. if not has_cache_params:
  329. self._need_flush_from_cache = False
  330. @property
  331. def latest_ckpt_file_name(self):
  332. """Return the latest checkpoint path and file name."""
  333. return self._latest_ckpt_file_name
  334. class CheckpointManager:
  335. """Manage checkpoint files according to train_config of checkpoint."""
  336. def __init__(self):
  337. self._ckpoint_filelist = []
  338. @property
  339. def ckpoint_filelist(self):
  340. """Get all the related checkpoint files managed here."""
  341. return self._ckpoint_filelist
  342. @property
  343. def ckpoint_num(self):
  344. """Get the number of the related checkpoint files managed here."""
  345. return len(self._ckpoint_filelist)
  346. def update_ckpoint_filelist(self, directory, prefix):
  347. """Update the checkpoint file list."""
  348. self._ckpoint_filelist = []
  349. files = os.listdir(directory)
  350. for filename in files:
  351. if os.path.splitext(filename)[-1] == ".ckpt" and filename.startswith(prefix):
  352. mid_name = filename[len(prefix):-5]
  353. flag = True
  354. for char in mid_name:
  355. if char.isalpha():
  356. flag = False
  357. if flag:
  358. self._ckpoint_filelist.append(directory + '/' + filename)
  359. def remove_ckpoint_file(self, file_name):
  360. """Remove the specified checkpoint file from this checkpoint manager and also from the directory."""
  361. try:
  362. os.chmod(file_name, stat.S_IWRITE)
  363. os.remove(file_name)
  364. self._ckpoint_filelist.remove(file_name)
  365. except OSError:
  366. logger.warning("OSError, failed to remove the older ckpt file %s.", file_name)
  367. except ValueError:
  368. logger.warning("ValueError, failed to remove the older ckpt file %s.", file_name)
  369. def remove_oldest_ckpoint_file(self):
  370. """Remove the oldest checkpoint file from this checkpoint manager and also from the directory."""
  371. ckpoint_files = sorted(self._ckpoint_filelist, key=os.path.getmtime)
  372. self.remove_ckpoint_file(ckpoint_files[0])
  373. def keep_one_ckpoint_per_minutes(self, minutes, cur_time):
  374. """Only keep the latest one ckpt file per minutes, remove other files generated in [last_time, cur_time]."""
  375. movs = []
  376. oldest_file = ''
  377. oldest_time = cur_time
  378. for ck_file in self._ckpoint_filelist:
  379. modify_time = os.path.getmtime(ck_file)
  380. if cur_time - modify_time < 60 * minutes:
  381. movs.append(ck_file)
  382. if modify_time < oldest_time:
  383. oldest_time = modify_time
  384. oldest_file = ck_file
  385. for mv_file in movs:
  386. if mv_file == oldest_file:
  387. continue
  388. self.remove_ckpoint_file(mv_file)