You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

_checkpoint.py 17 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Checkpoint related classes and functions."""
  16. import os
  17. import stat
  18. import time
  19. import threading
  20. import mindspore.context as context
  21. from mindspore import log as logger
  22. from mindspore import nn
  23. from mindspore._checkparam import Validator
  24. from mindspore.train._utils import _make_directory
  25. from mindspore.train.serialization import save_checkpoint, _save_graph
  26. from mindspore.parallel._ps_context import _is_role_pserver, _get_ps_mode_rank
  27. from ._callback import Callback, set_cur_net
  28. _cur_dir = os.getcwd()
  29. _save_dir = _cur_dir
  30. def _check_file_name_prefix(file_name_prefix):
  31. """
  32. Check file name valid or not.
  33. File name can't include '/'. This file name naming convention only apply to Linux.
  34. """
  35. if not isinstance(file_name_prefix, str) or file_name_prefix.find('/') >= 0:
  36. return False
  37. return True
  38. def _chg_ckpt_file_name_if_same_exist(directory, prefix):
  39. """Check if there is a file with the same name."""
  40. files = os.listdir(directory)
  41. suffix_num = 0
  42. pre_len = len(prefix)
  43. for filename in files:
  44. name_ext = os.path.splitext(filename)
  45. if name_ext[-1] != ".ckpt":
  46. continue
  47. # find same prefix file
  48. if filename.find(prefix) == 0 and not filename[pre_len].isalpha():
  49. # add the max suffix + 1
  50. index = filename[pre_len:].find("-")
  51. if index == 0:
  52. suffix_num = max(suffix_num, 1)
  53. elif index != -1:
  54. num = filename[pre_len+1:pre_len+index]
  55. if num.isdigit():
  56. suffix_num = max(suffix_num, int(num)+1)
  57. if suffix_num != 0:
  58. prefix = prefix + "_" + str(suffix_num)
  59. return prefix
  60. class CheckpointConfig:
  61. """
  62. The configuration of model checkpoint.
  63. Note:
  64. During the training process, if dataset is transmitted through the data channel,
  65. It is suggested to set 'save_checkpoint_steps' to an integer multiple of loop_size.
  66. Otherwise, the time to save the checkpoint may be biased.
  67. Args:
  68. save_checkpoint_steps (int): Steps to save checkpoint. Default: 1.
  69. save_checkpoint_seconds (int): Seconds to save checkpoint. Default: 0.
  70. Can't be used with save_checkpoint_steps at the same time.
  71. keep_checkpoint_max (int): Maximum number of checkpoint files can be saved. Default: 5.
  72. keep_checkpoint_per_n_minutes (int): Keep one checkpoint every n minutes. Default: 0.
  73. Can't be used with keep_checkpoint_max at the same time.
  74. integrated_save (bool): Whether to perform integrated save function in automatic model parallel scene.
  75. Default: True. Integrated save function is only supported in automatic parallel scene, not supported
  76. in manual parallel.
  77. async_save (bool): Whether asynchronous execution saves the checkpoint to a file. Default: False.
  78. saved_network (Cell): Network to be saved in checkpoint file. Default: None.
  79. Raises:
  80. ValueError: If the input_param is None or 0.
  81. Examples:
  82. >>> class Net(nn.Cell):
  83. >>> def __init__(self):
  84. >>> super(Net, self).__init__()
  85. >>> self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal')
  86. >>> self.bn = nn.BatchNorm2d(64)
  87. >>> self.relu = nn.ReLU()
  88. >>> self.flatten = nn.Flatten()
  89. >>> self.fc = nn.Dense(64*224*224, 12)
  90. >>>
  91. >>> def construct(self, x):
  92. >>> x = self.conv(x)
  93. >>> x = self.bn(x)
  94. >>> x = self.relu(x)
  95. >>> x = self.flatten(x)
  96. >>> out = self.fc(x)
  97. >>> return out
  98. >>>
  99. >>> net = Net()
  100. >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
  101. >>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9)
  102. >>> model = Model(net, loss_fn=loss, optimizer=optim)
  103. >>> dataset = get_dataset()
  104. >>> config = CheckpointConfig(saved_network=net)
  105. >>> ckpoint_cb = ModelCheckpoint(prefix="ck_prefix", directory='./', config=config)
  106. >>> model.train(10, dataset, callbacks=ckpoint_cb)
  107. """
  108. def __init__(self,
  109. save_checkpoint_steps=1,
  110. save_checkpoint_seconds=0,
  111. keep_checkpoint_max=5,
  112. keep_checkpoint_per_n_minutes=0,
  113. integrated_save=True,
  114. async_save=False,
  115. saved_network=None):
  116. if save_checkpoint_steps is not None:
  117. save_checkpoint_steps = Validator.check_non_negative_int(save_checkpoint_steps)
  118. if save_checkpoint_seconds is not None:
  119. save_checkpoint_seconds = Validator.check_non_negative_int(save_checkpoint_seconds)
  120. if keep_checkpoint_max is not None:
  121. keep_checkpoint_max = Validator.check_non_negative_int(keep_checkpoint_max)
  122. if keep_checkpoint_per_n_minutes is not None:
  123. keep_checkpoint_per_n_minutes = Validator.check_non_negative_int(keep_checkpoint_per_n_minutes)
  124. if saved_network is not None and not isinstance(saved_network, nn.Cell):
  125. raise TypeError(f"The type of saved_network must be None or Cell, but got {str(type(saved_network))}.")
  126. if not save_checkpoint_steps and not save_checkpoint_seconds and \
  127. not keep_checkpoint_max and not keep_checkpoint_per_n_minutes:
  128. raise ValueError("The input_param can't be all None or 0")
  129. self._save_checkpoint_steps = save_checkpoint_steps
  130. self._save_checkpoint_seconds = save_checkpoint_seconds
  131. if self._save_checkpoint_steps and self._save_checkpoint_steps > 0:
  132. self._save_checkpoint_seconds = None
  133. self._keep_checkpoint_max = keep_checkpoint_max
  134. self._keep_checkpoint_per_n_minutes = keep_checkpoint_per_n_minutes
  135. if self._keep_checkpoint_max and self._keep_checkpoint_max > 0:
  136. self._keep_checkpoint_per_n_minutes = None
  137. else:
  138. if not self._keep_checkpoint_per_n_minutes or self._keep_checkpoint_per_n_minutes == 0:
  139. self._keep_checkpoint_max = 1
  140. self._integrated_save = Validator.check_bool(integrated_save)
  141. self._async_save = Validator.check_bool(async_save)
  142. self._saved_network = saved_network
  143. @property
  144. def save_checkpoint_steps(self):
  145. """Get the value of _save_checkpoint_steps."""
  146. return self._save_checkpoint_steps
  147. @property
  148. def save_checkpoint_seconds(self):
  149. """Get the value of _save_checkpoint_seconds."""
  150. return self._save_checkpoint_seconds
  151. @property
  152. def keep_checkpoint_max(self):
  153. """Get the value of _keep_checkpoint_max."""
  154. return self._keep_checkpoint_max
  155. @property
  156. def keep_checkpoint_per_n_minutes(self):
  157. """Get the value of _keep_checkpoint_per_n_minutes."""
  158. return self._keep_checkpoint_per_n_minutes
  159. @property
  160. def integrated_save(self):
  161. """Get the value of _integrated_save."""
  162. return self._integrated_save
  163. @property
  164. def async_save(self):
  165. """Get the value of _async_save."""
  166. return self._async_save
  167. @property
  168. def saved_network(self):
  169. """Get the value of _saved_network"""
  170. return self._saved_network
  171. def get_checkpoint_policy(self):
  172. """Get the policy of checkpoint."""
  173. checkpoint_policy = {'save_checkpoint_steps': self.save_checkpoint_steps,
  174. 'save_checkpoint_seconds': self.save_checkpoint_seconds,
  175. 'keep_checkpoint_max': self.keep_checkpoint_max,
  176. 'keep_checkpoint_per_n_minutes': self.keep_checkpoint_per_n_minutes,
  177. 'saved_network': self.saved_network}
  178. return checkpoint_policy
  179. class ModelCheckpoint(Callback):
  180. """
  181. The checkpoint callback class.
  182. It is called to combine with train process and save the model and network parameters after traning.
  183. Args:
  184. prefix (str): The prefix name of checkpoint files. Default: "CKP".
  185. directory (str): The path of the folder which will be saved in the checkpoint file. Default: None.
  186. config (CheckpointConfig): Checkpoint strategy configuration. Default: None.
  187. Raises:
  188. ValueError: If the prefix is invalid.
  189. TypeError: If the config is not CheckpointConfig type.
  190. """
  191. def __init__(self, prefix='CKP', directory=None, config=None):
  192. super(ModelCheckpoint, self).__init__()
  193. self._latest_ckpt_file_name = ""
  194. self._init_time = time.time()
  195. self._last_time = time.time()
  196. self._last_time_for_keep = time.time()
  197. self._last_triggered_step = 0
  198. if _check_file_name_prefix(prefix):
  199. self._prefix = prefix
  200. else:
  201. raise ValueError("Prefix {} for checkpoint file name invalid, "
  202. "please check and correct it and then continue.".format(prefix))
  203. if directory is not None:
  204. self._directory = _make_directory(directory)
  205. else:
  206. self._directory = _cur_dir
  207. if config is None:
  208. self._config = CheckpointConfig()
  209. else:
  210. if not isinstance(config, CheckpointConfig):
  211. raise TypeError("config should be CheckpointConfig type.")
  212. self._config = config
  213. # get existing checkpoint files
  214. self._manager = CheckpointManager()
  215. self._prefix = _chg_ckpt_file_name_if_same_exist(self._directory, self._prefix)
  216. self._graph_saved = False
  217. def step_end(self, run_context):
  218. """
  219. Save the checkpoint at the end of step.
  220. Args:
  221. run_context (RunContext): Context of the train running.
  222. """
  223. if _is_role_pserver():
  224. self._prefix = "PServer_" + str(_get_ps_mode_rank()) + "_" + self._prefix
  225. cb_params = run_context.original_args()
  226. # save graph (only once)
  227. if not self._graph_saved:
  228. graph_file_name = os.path.join(self._directory, self._prefix + '-graph.meta')
  229. _save_graph(cb_params.train_network, graph_file_name)
  230. self._graph_saved = True
  231. self._save_ckpt(cb_params)
  232. def end(self, run_context):
  233. """
  234. Save the last checkpoint after training finished.
  235. Args:
  236. run_context (RunContext): Context of the train running.
  237. """
  238. cb_params = run_context.original_args()
  239. _to_save_last_ckpt = True
  240. self._save_ckpt(cb_params, _to_save_last_ckpt)
  241. thread_list = threading.enumerate()
  242. if len(thread_list) > 1:
  243. for thread in thread_list:
  244. if thread.getName() == "asyn_save_ckpt":
  245. thread.join()
  246. from mindspore.parallel._cell_wrapper import destroy_allgather_cell
  247. destroy_allgather_cell()
  248. def _check_save_ckpt(self, cb_params, force_to_save):
  249. """Check whether save checkpoint files or not."""
  250. if self._config.save_checkpoint_steps and self._config.save_checkpoint_steps > 0:
  251. if cb_params.cur_step_num >= self._last_triggered_step + self._config.save_checkpoint_steps \
  252. or force_to_save is True:
  253. return True
  254. elif self._config.save_checkpoint_seconds and self._config.save_checkpoint_seconds > 0:
  255. self._cur_time = time.time()
  256. if (self._cur_time - self._last_time) > self._config.save_checkpoint_seconds or force_to_save is True:
  257. self._last_time = self._cur_time
  258. return True
  259. return False
  260. def _save_ckpt(self, cb_params, force_to_save=False):
  261. """Save checkpoint files."""
  262. if cb_params.cur_step_num == self._last_triggered_step:
  263. return
  264. save_ckpt = self._check_save_ckpt(cb_params, force_to_save)
  265. step_num_in_epoch = int((cb_params.cur_step_num - 1) % cb_params.batch_num + 1)
  266. if save_ckpt:
  267. cur_ckpoint_file = self._prefix + "-" + str(cb_params.cur_epoch_num) + "_" \
  268. + str(step_num_in_epoch) + ".ckpt"
  269. # update checkpoint file list.
  270. self._manager.update_ckpoint_filelist(self._directory, self._prefix)
  271. # keep checkpoint files number equal max number.
  272. if self._config.keep_checkpoint_max and 0 < self._config.keep_checkpoint_max <= self._manager.ckpoint_num:
  273. self._manager.remove_oldest_ckpoint_file()
  274. elif self._config.keep_checkpoint_per_n_minutes and self._config.keep_checkpoint_per_n_minutes > 0:
  275. self._cur_time_for_keep = time.time()
  276. if (self._cur_time_for_keep - self._last_time_for_keep) \
  277. < self._config.keep_checkpoint_per_n_minutes * 60:
  278. self._manager.keep_one_ckpoint_per_minutes(self._config.keep_checkpoint_per_n_minutes,
  279. self._cur_time_for_keep)
  280. # generate the new checkpoint file and rename it.
  281. global _save_dir
  282. _save_dir = self._directory
  283. cur_file = os.path.join(self._directory, cur_ckpoint_file)
  284. self._last_time_for_keep = time.time()
  285. self._last_triggered_step = cb_params.cur_step_num
  286. if context.get_context("enable_ge"):
  287. set_cur_net(cb_params.train_network)
  288. cb_params.train_network.exec_checkpoint_graph()
  289. network = self._config.saved_network if self._config.saved_network is not None else cb_params.train_network
  290. save_checkpoint(network, cur_file, self._config.integrated_save,
  291. self._config.async_save)
  292. self._latest_ckpt_file_name = cur_file
  293. @property
  294. def latest_ckpt_file_name(self):
  295. """Return the latest checkpoint path and file name."""
  296. return self._latest_ckpt_file_name
  297. class CheckpointManager:
  298. """Manage checkpoint files according to train_config of checkpoint."""
  299. def __init__(self):
  300. self._ckpoint_filelist = []
  301. @property
  302. def ckpoint_filelist(self):
  303. """Get all the related checkpoint files managed here."""
  304. return self._ckpoint_filelist
  305. @property
  306. def ckpoint_num(self):
  307. """Get the number of the related checkpoint files managed here."""
  308. return len(self._ckpoint_filelist)
  309. def update_ckpoint_filelist(self, directory, prefix):
  310. """Update the checkpoint file list."""
  311. self._ckpoint_filelist = []
  312. files = os.listdir(directory)
  313. for filename in files:
  314. if os.path.splitext(filename)[-1] == ".ckpt" and filename.startswith(prefix):
  315. mid_name = filename[len(prefix):-5]
  316. flag = True
  317. for char in mid_name:
  318. if char.isalpha():
  319. flag = False
  320. if flag:
  321. self._ckpoint_filelist.append(directory + '/' + filename)
  322. def remove_ckpoint_file(self, file_name):
  323. """Remove the specified checkpoint file from this checkpoint manager and also from the directory."""
  324. try:
  325. os.chmod(file_name, stat.S_IWRITE)
  326. os.remove(file_name)
  327. self._ckpoint_filelist.remove(file_name)
  328. except OSError:
  329. logger.warning("OSError, failed to remove the older ckpt file %s.", file_name)
  330. except ValueError:
  331. logger.warning("ValueError, failed to remove the older ckpt file %s.", file_name)
  332. def remove_oldest_ckpoint_file(self):
  333. """Remove the oldest checkpoint file from this checkpoint manager and also from the directory."""
  334. ckpoint_files = sorted(self._ckpoint_filelist, key=os.path.getmtime)
  335. self.remove_ckpoint_file(ckpoint_files[0])
  336. def keep_one_ckpoint_per_minutes(self, minutes, cur_time):
  337. """Only keep the latest one ckpt file per minutes, remove other files generated in [last_time, cur_time]."""
  338. movs = []
  339. oldest_file = ''
  340. oldest_time = cur_time
  341. for ck_file in self._ckpoint_filelist:
  342. modify_time = os.path.getmtime(ck_file)
  343. if cur_time - modify_time < 60 * minutes:
  344. movs.append(ck_file)
  345. if modify_time < oldest_time:
  346. oldest_time = modify_time
  347. oldest_file = ck_file
  348. for mv_file in movs:
  349. if mv_file == oldest_file:
  350. continue
  351. self.remove_ckpoint_file(mv_file)