You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

_checkpoint.py 18 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Checkpoint related classes and functions."""
  16. import os
  17. import stat
  18. import time
  19. import threading
  20. import mindspore.context as context
  21. from mindspore import log as logger
  22. from mindspore import nn
  23. from mindspore._checkparam import Validator
  24. from mindspore.train._utils import _make_directory
  25. from mindspore.train.serialization import save_checkpoint, _save_graph
  26. from mindspore.parallel._ps_context import _is_role_pserver, _get_ps_mode_rank
  27. from ._callback import Callback, set_cur_net
  28. _cur_dir = os.getcwd()
  29. _save_dir = _cur_dir
  30. def _check_file_name_prefix(file_name_prefix):
  31. """
  32. Check file name valid or not.
  33. File name can't include '/'. This file name naming convention only apply to Linux.
  34. """
  35. if not isinstance(file_name_prefix, str) or file_name_prefix.find('/') >= 0:
  36. return False
  37. return True
  38. def _chg_ckpt_file_name_if_same_exist(directory, prefix):
  39. """Check if there is a file with the same name."""
  40. files = os.listdir(directory)
  41. suffix_num = 0
  42. pre_len = len(prefix)
  43. for filename in files:
  44. name_ext = os.path.splitext(filename)
  45. if name_ext[-1] != ".ckpt":
  46. continue
  47. # find same prefix file
  48. if filename.find(prefix) == 0 and not filename[pre_len].isalpha():
  49. # add the max suffix + 1
  50. index = filename[pre_len:].find("-")
  51. if index == 0:
  52. suffix_num = max(suffix_num, 1)
  53. elif index != -1:
  54. num = filename[pre_len+1:pre_len+index]
  55. if num.isdigit():
  56. suffix_num = max(suffix_num, int(num)+1)
  57. if suffix_num != 0:
  58. prefix = prefix + "_" + str(suffix_num)
  59. return prefix
  60. class CheckpointConfig:
  61. """
  62. The configuration of model checkpoint.
  63. Note:
  64. During the training process, if dataset is transmitted through the data channel,
  65. It is suggested to set 'save_checkpoint_steps' to an integer multiple of loop_size.
  66. Otherwise, the time to save the checkpoint may be biased.
  67. Args:
  68. save_checkpoint_steps (int): Steps to save checkpoint. Default: 1.
  69. save_checkpoint_seconds (int): Seconds to save checkpoint. Default: 0.
  70. Can't be used with save_checkpoint_steps at the same time.
  71. keep_checkpoint_max (int): Maximum number of checkpoint files can be saved. Default: 5.
  72. keep_checkpoint_per_n_minutes (int): Keep one checkpoint every n minutes. Default: 0.
  73. Can't be used with keep_checkpoint_max at the same time.
  74. integrated_save (bool): Whether to perform integrated save function in automatic model parallel scene.
  75. Default: True. Integrated save function is only supported in automatic parallel scene, not supported
  76. in manual parallel.
  77. async_save (bool): Whether asynchronous execution saves the checkpoint to a file. Default: False.
  78. saved_network (Cell): Network to be saved in checkpoint file. If the saved_network has no relation
  79. with the network in training, the initial value of saved_network will be saved. Default: None.
  80. Raises:
  81. ValueError: If the input_param is None or 0.
  82. Examples:
  83. >>> class LeNet5(nn.Cell):
  84. >>> def __init__(self, num_class=10, num_channel=1):
  85. >>> super(LeNet5, self).__init__()
  86. >>> self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')
  87. >>> self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
  88. >>> self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))
  89. >>> self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))
  90. >>> self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))
  91. >>> self.relu = nn.ReLU()
  92. >>> self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
  93. >>> self.flatten = nn.Flatten()
  94. >>>
  95. >>> def construct(self, x):
  96. >>> x = self.max_pool2d(self.relu(self.conv1(x)))
  97. >>> x = self.max_pool2d(self.relu(self.conv2(x)))
  98. >>> x = self.flatten(x)
  99. >>> x = self.relu(self.fc1(x))
  100. >>> x = self.relu(self.fc2(x))
  101. >>> x = self.fc3(x)
  102. >>> return x
  103. >>>
  104. >>> net = LeNet5()
  105. >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
  106. >>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9)
  107. >>> model = Model(net, loss_fn=loss, optimizer=optim)
  108. >>> data_path = './MNIST_Data'
  109. >>> dataset = create_dataset(data_path)
  110. >>> config = CheckpointConfig(saved_network=net)
  111. >>> ckpoint_cb = ModelCheckpoint(prefix='LeNet5', directory='./checkpoint', config=config)
  112. >>> model.train(10, dataset, callbacks=ckpoint_cb)
  113. """
  114. def __init__(self,
  115. save_checkpoint_steps=1,
  116. save_checkpoint_seconds=0,
  117. keep_checkpoint_max=5,
  118. keep_checkpoint_per_n_minutes=0,
  119. integrated_save=True,
  120. async_save=False,
  121. saved_network=None):
  122. if save_checkpoint_steps is not None:
  123. save_checkpoint_steps = Validator.check_non_negative_int(save_checkpoint_steps)
  124. if save_checkpoint_seconds is not None:
  125. save_checkpoint_seconds = Validator.check_non_negative_int(save_checkpoint_seconds)
  126. if keep_checkpoint_max is not None:
  127. keep_checkpoint_max = Validator.check_non_negative_int(keep_checkpoint_max)
  128. if keep_checkpoint_per_n_minutes is not None:
  129. keep_checkpoint_per_n_minutes = Validator.check_non_negative_int(keep_checkpoint_per_n_minutes)
  130. if saved_network is not None and not isinstance(saved_network, nn.Cell):
  131. raise TypeError(f"The type of saved_network must be None or Cell, but got {str(type(saved_network))}.")
  132. if not save_checkpoint_steps and not save_checkpoint_seconds and \
  133. not keep_checkpoint_max and not keep_checkpoint_per_n_minutes:
  134. raise ValueError("The input_param can't be all None or 0")
  135. self._save_checkpoint_steps = save_checkpoint_steps
  136. self._save_checkpoint_seconds = save_checkpoint_seconds
  137. if self._save_checkpoint_steps and self._save_checkpoint_steps > 0:
  138. self._save_checkpoint_seconds = None
  139. self._keep_checkpoint_max = keep_checkpoint_max
  140. self._keep_checkpoint_per_n_minutes = keep_checkpoint_per_n_minutes
  141. if self._keep_checkpoint_max and self._keep_checkpoint_max > 0:
  142. self._keep_checkpoint_per_n_minutes = None
  143. else:
  144. if not self._keep_checkpoint_per_n_minutes or self._keep_checkpoint_per_n_minutes == 0:
  145. self._keep_checkpoint_max = 1
  146. self._integrated_save = Validator.check_bool(integrated_save)
  147. self._async_save = Validator.check_bool(async_save)
  148. self._saved_network = saved_network
  149. @property
  150. def save_checkpoint_steps(self):
  151. """Get the value of _save_checkpoint_steps."""
  152. return self._save_checkpoint_steps
  153. @property
  154. def save_checkpoint_seconds(self):
  155. """Get the value of _save_checkpoint_seconds."""
  156. return self._save_checkpoint_seconds
  157. @property
  158. def keep_checkpoint_max(self):
  159. """Get the value of _keep_checkpoint_max."""
  160. return self._keep_checkpoint_max
  161. @property
  162. def keep_checkpoint_per_n_minutes(self):
  163. """Get the value of _keep_checkpoint_per_n_minutes."""
  164. return self._keep_checkpoint_per_n_minutes
  165. @property
  166. def integrated_save(self):
  167. """Get the value of _integrated_save."""
  168. return self._integrated_save
  169. @property
  170. def async_save(self):
  171. """Get the value of _async_save."""
  172. return self._async_save
  173. @property
  174. def saved_network(self):
  175. """Get the value of _saved_network"""
  176. return self._saved_network
  177. def get_checkpoint_policy(self):
  178. """Get the policy of checkpoint."""
  179. checkpoint_policy = {'save_checkpoint_steps': self.save_checkpoint_steps,
  180. 'save_checkpoint_seconds': self.save_checkpoint_seconds,
  181. 'keep_checkpoint_max': self.keep_checkpoint_max,
  182. 'keep_checkpoint_per_n_minutes': self.keep_checkpoint_per_n_minutes,
  183. 'saved_network': self.saved_network}
  184. return checkpoint_policy
  185. class ModelCheckpoint(Callback):
  186. """
  187. The checkpoint callback class.
  188. It is called to combine with train process and save the model and network parameters after traning.
  189. Args:
  190. prefix (str): The prefix name of checkpoint files. Default: "CKP".
  191. directory (str): The path of the folder which will be saved in the checkpoint file. Default: None.
  192. config (CheckpointConfig): Checkpoint strategy configuration. Default: None.
  193. Raises:
  194. ValueError: If the prefix is invalid.
  195. TypeError: If the config is not CheckpointConfig type.
  196. """
  197. def __init__(self, prefix='CKP', directory=None, config=None):
  198. super(ModelCheckpoint, self).__init__()
  199. self._latest_ckpt_file_name = ""
  200. self._init_time = time.time()
  201. self._last_time = time.time()
  202. self._last_time_for_keep = time.time()
  203. self._last_triggered_step = 0
  204. if _check_file_name_prefix(prefix):
  205. self._prefix = prefix
  206. else:
  207. raise ValueError("Prefix {} for checkpoint file name invalid, "
  208. "please check and correct it and then continue.".format(prefix))
  209. if directory is not None:
  210. self._directory = _make_directory(directory)
  211. else:
  212. self._directory = _cur_dir
  213. if config is None:
  214. self._config = CheckpointConfig()
  215. else:
  216. if not isinstance(config, CheckpointConfig):
  217. raise TypeError("config should be CheckpointConfig type.")
  218. self._config = config
  219. # get existing checkpoint files
  220. self._manager = CheckpointManager()
  221. self._prefix = _chg_ckpt_file_name_if_same_exist(self._directory, self._prefix)
  222. self._graph_saved = False
  223. def step_end(self, run_context):
  224. """
  225. Save the checkpoint at the end of step.
  226. Args:
  227. run_context (RunContext): Context of the train running.
  228. """
  229. if _is_role_pserver():
  230. self._prefix = "PServer_" + str(_get_ps_mode_rank()) + "_" + self._prefix
  231. cb_params = run_context.original_args()
  232. _make_directory(self._directory)
  233. # save graph (only once)
  234. if not self._graph_saved:
  235. graph_file_name = os.path.join(self._directory, self._prefix + '-graph.meta')
  236. if os.path.isfile(graph_file_name) and context.get_context("mode") == context.GRAPH_MODE:
  237. os.remove(graph_file_name)
  238. _save_graph(cb_params.train_network, graph_file_name)
  239. self._graph_saved = True
  240. thread_list = threading.enumerate()
  241. for thread in thread_list:
  242. if thread.getName() == "asyn_save_ckpt":
  243. thread.join()
  244. self._save_ckpt(cb_params)
  245. def end(self, run_context):
  246. """
  247. Save the last checkpoint after training finished.
  248. Args:
  249. run_context (RunContext): Context of the train running.
  250. """
  251. cb_params = run_context.original_args()
  252. _to_save_last_ckpt = True
  253. self._save_ckpt(cb_params, _to_save_last_ckpt)
  254. thread_list = threading.enumerate()
  255. for thread in thread_list:
  256. if thread.getName() == "asyn_save_ckpt":
  257. thread.join()
  258. from mindspore.parallel._cell_wrapper import destroy_allgather_cell
  259. destroy_allgather_cell()
  260. def _check_save_ckpt(self, cb_params, force_to_save):
  261. """Check whether save checkpoint files or not."""
  262. if self._config.save_checkpoint_steps and self._config.save_checkpoint_steps > 0:
  263. if cb_params.cur_step_num >= self._last_triggered_step + self._config.save_checkpoint_steps \
  264. or force_to_save is True:
  265. return True
  266. elif self._config.save_checkpoint_seconds and self._config.save_checkpoint_seconds > 0:
  267. self._cur_time = time.time()
  268. if (self._cur_time - self._last_time) > self._config.save_checkpoint_seconds or force_to_save is True:
  269. self._last_time = self._cur_time
  270. return True
  271. return False
  272. def _save_ckpt(self, cb_params, force_to_save=False):
  273. """Save checkpoint files."""
  274. if cb_params.cur_step_num == self._last_triggered_step:
  275. return
  276. save_ckpt = self._check_save_ckpt(cb_params, force_to_save)
  277. step_num_in_epoch = int((cb_params.cur_step_num - 1) % cb_params.batch_num + 1)
  278. if save_ckpt:
  279. cur_ckpoint_file = self._prefix + "-" + str(cb_params.cur_epoch_num) + "_" \
  280. + str(step_num_in_epoch) + ".ckpt"
  281. # update checkpoint file list.
  282. self._manager.update_ckpoint_filelist(self._directory, self._prefix)
  283. # keep checkpoint files number equal max number.
  284. if self._config.keep_checkpoint_max and 0 < self._config.keep_checkpoint_max <= self._manager.ckpoint_num:
  285. self._manager.remove_oldest_ckpoint_file()
  286. elif self._config.keep_checkpoint_per_n_minutes and self._config.keep_checkpoint_per_n_minutes > 0:
  287. self._cur_time_for_keep = time.time()
  288. if (self._cur_time_for_keep - self._last_time_for_keep) \
  289. < self._config.keep_checkpoint_per_n_minutes * 60:
  290. self._manager.keep_one_ckpoint_per_minutes(self._config.keep_checkpoint_per_n_minutes,
  291. self._cur_time_for_keep)
  292. # generate the new checkpoint file and rename it.
  293. global _save_dir
  294. _save_dir = self._directory
  295. cur_file = os.path.join(self._directory, cur_ckpoint_file)
  296. self._last_time_for_keep = time.time()
  297. self._last_triggered_step = cb_params.cur_step_num
  298. if context.get_context("enable_ge"):
  299. set_cur_net(cb_params.train_network)
  300. cb_params.train_network.exec_checkpoint_graph()
  301. network = self._config.saved_network if self._config.saved_network is not None else cb_params.train_network
  302. save_checkpoint(network, cur_file, self._config.integrated_save,
  303. self._config.async_save)
  304. self._latest_ckpt_file_name = cur_file
  305. @property
  306. def latest_ckpt_file_name(self):
  307. """Return the latest checkpoint path and file name."""
  308. return self._latest_ckpt_file_name
  309. class CheckpointManager:
  310. """Manage checkpoint files according to train_config of checkpoint."""
  311. def __init__(self):
  312. self._ckpoint_filelist = []
  313. @property
  314. def ckpoint_filelist(self):
  315. """Get all the related checkpoint files managed here."""
  316. return self._ckpoint_filelist
  317. @property
  318. def ckpoint_num(self):
  319. """Get the number of the related checkpoint files managed here."""
  320. return len(self._ckpoint_filelist)
  321. def update_ckpoint_filelist(self, directory, prefix):
  322. """Update the checkpoint file list."""
  323. self._ckpoint_filelist = []
  324. files = os.listdir(directory)
  325. for filename in files:
  326. if os.path.splitext(filename)[-1] == ".ckpt" and filename.startswith(prefix):
  327. mid_name = filename[len(prefix):-5]
  328. flag = True
  329. for char in mid_name:
  330. if char.isalpha():
  331. flag = False
  332. if flag:
  333. self._ckpoint_filelist.append(directory + '/' + filename)
  334. def remove_ckpoint_file(self, file_name):
  335. """Remove the specified checkpoint file from this checkpoint manager and also from the directory."""
  336. try:
  337. os.chmod(file_name, stat.S_IWRITE)
  338. os.remove(file_name)
  339. self._ckpoint_filelist.remove(file_name)
  340. except OSError:
  341. logger.warning("OSError, failed to remove the older ckpt file %s.", file_name)
  342. except ValueError:
  343. logger.warning("ValueError, failed to remove the older ckpt file %s.", file_name)
  344. def remove_oldest_ckpoint_file(self):
  345. """Remove the oldest checkpoint file from this checkpoint manager and also from the directory."""
  346. ckpoint_files = sorted(self._ckpoint_filelist, key=os.path.getmtime)
  347. self.remove_ckpoint_file(ckpoint_files[0])
  348. def keep_one_ckpoint_per_minutes(self, minutes, cur_time):
  349. """Only keep the latest one ckpt file per minutes, remove other files generated in [last_time, cur_time]."""
  350. movs = []
  351. oldest_file = ''
  352. oldest_time = cur_time
  353. for ck_file in self._ckpoint_filelist:
  354. modify_time = os.path.getmtime(ck_file)
  355. if cur_time - modify_time < 60 * minutes:
  356. movs.append(ck_file)
  357. if modify_time < oldest_time:
  358. oldest_time = modify_time
  359. oldest_file = ck_file
  360. for mv_file in movs:
  361. if mv_file == oldest_file:
  362. continue
  363. self.remove_ckpoint_file(mv_file)