You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

_checkpoint.py 18 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Checkpoint related classes and functions."""
  16. import os
  17. import stat
  18. import time
  19. import threading
  20. import mindspore.context as context
  21. from mindspore import log as logger
  22. from mindspore import nn
  23. from mindspore._checkparam import Validator
  24. from mindspore.train._utils import _make_directory
  25. from mindspore.train.serialization import save_checkpoint, _save_graph
  26. from mindspore.parallel._ps_context import _is_role_pserver, _get_ps_mode_rank
  27. from ._callback import Callback, set_cur_net
  28. _cur_dir = os.getcwd()
  29. _save_dir = _cur_dir
  30. def _check_file_name_prefix(file_name_prefix):
  31. """
  32. Check file name valid or not.
  33. File name can't include '/'. This file name naming convention only apply to Linux.
  34. """
  35. if not isinstance(file_name_prefix, str) or file_name_prefix.find('/') >= 0:
  36. return False
  37. return True
  38. def _chg_ckpt_file_name_if_same_exist(directory, prefix):
  39. """Check if there is a file with the same name."""
  40. files = os.listdir(directory)
  41. suffix_num = 0
  42. pre_len = len(prefix)
  43. for filename in files:
  44. name_ext = os.path.splitext(filename)
  45. if name_ext[-1] != ".ckpt":
  46. continue
  47. # find same prefix file
  48. if filename.find(prefix) == 0 and not filename[pre_len].isalpha():
  49. # add the max suffix + 1
  50. index = filename[pre_len:].find("-")
  51. if index == 0:
  52. suffix_num = max(suffix_num, 1)
  53. elif index != -1:
  54. num = filename[pre_len+1:pre_len+index]
  55. if num.isdigit():
  56. suffix_num = max(suffix_num, int(num)+1)
  57. if suffix_num != 0:
  58. prefix = prefix + "_" + str(suffix_num)
  59. return prefix
  60. class CheckpointConfig:
  61. """
  62. The configuration of model checkpoint.
  63. Note:
  64. During the training process, if dataset is transmitted through the data channel,
  65. It is suggested to set 'save_checkpoint_steps' to an integer multiple of loop_size.
  66. Otherwise, the time to save the checkpoint may be biased.
  67. Args:
  68. save_checkpoint_steps (int): Steps to save checkpoint. Default: 1.
  69. save_checkpoint_seconds (int): Seconds to save checkpoint. Default: 0.
  70. Can't be used with save_checkpoint_steps at the same time.
  71. keep_checkpoint_max (int): Maximum number of checkpoint files can be saved. Default: 5.
  72. keep_checkpoint_per_n_minutes (int): Keep one checkpoint every n minutes. Default: 0.
  73. Can't be used with keep_checkpoint_max at the same time.
  74. integrated_save (bool): Whether to perform integrated save function in automatic model parallel scene.
  75. Default: True. Integrated save function is only supported in automatic parallel scene, not supported
  76. in manual parallel.
  77. async_save (bool): Whether asynchronous execution saves the checkpoint to a file. Default: False.
  78. saved_network (Cell): Network to be saved in checkpoint file. Default: None.
  79. Raises:
  80. ValueError: If the input_param is None or 0.
  81. Examples:
  82. >>> class LeNet5(nn.Cell):
  83. >>> def __init__(self, num_class=10, num_channel=1):
  84. >>> super(LeNet5, self).__init__()
  85. >>> self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')
  86. >>> self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
  87. >>> self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))
  88. >>> self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))
  89. >>> self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))
  90. >>> self.relu = nn.ReLU()
  91. >>> self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
  92. >>> self.flatten = nn.Flatten()
  93. >>>
  94. >>> def construct(self, x):
  95. >>> x = self.max_pool2d(self.relu(self.conv1(x)))
  96. >>> x = self.max_pool2d(self.relu(self.conv2(x)))
  97. >>> x = self.flatten(x)
  98. >>> x = self.relu(self.fc1(x))
  99. >>> x = self.relu(self.fc2(x))
  100. >>> x = self.fc3(x)
  101. >>> return x
  102. >>>
  103. >>> net = LeNet5()
  104. >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
  105. >>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9)
  106. >>> model = Model(net, loss_fn=loss, optimizer=optim)
  107. >>> data_path = './MNIST_Data'
  108. >>> dataset = create_dataset(data_path)
  109. >>> config = CheckpointConfig(saved_network=net)
  110. >>> ckpoint_cb = ModelCheckpoint(prefix='LeNet5', directory='./checkpoint', config=config)
  111. >>> model.train(10, dataset, callbacks=ckpoint_cb)
  112. """
  113. def __init__(self,
  114. save_checkpoint_steps=1,
  115. save_checkpoint_seconds=0,
  116. keep_checkpoint_max=5,
  117. keep_checkpoint_per_n_minutes=0,
  118. integrated_save=True,
  119. async_save=False,
  120. saved_network=None):
  121. if save_checkpoint_steps is not None:
  122. save_checkpoint_steps = Validator.check_non_negative_int(save_checkpoint_steps)
  123. if save_checkpoint_seconds is not None:
  124. save_checkpoint_seconds = Validator.check_non_negative_int(save_checkpoint_seconds)
  125. if keep_checkpoint_max is not None:
  126. keep_checkpoint_max = Validator.check_non_negative_int(keep_checkpoint_max)
  127. if keep_checkpoint_per_n_minutes is not None:
  128. keep_checkpoint_per_n_minutes = Validator.check_non_negative_int(keep_checkpoint_per_n_minutes)
  129. if saved_network is not None and not isinstance(saved_network, nn.Cell):
  130. raise TypeError(f"The type of saved_network must be None or Cell, but got {str(type(saved_network))}.")
  131. if not save_checkpoint_steps and not save_checkpoint_seconds and \
  132. not keep_checkpoint_max and not keep_checkpoint_per_n_minutes:
  133. raise ValueError("The input_param can't be all None or 0")
  134. self._save_checkpoint_steps = save_checkpoint_steps
  135. self._save_checkpoint_seconds = save_checkpoint_seconds
  136. if self._save_checkpoint_steps and self._save_checkpoint_steps > 0:
  137. self._save_checkpoint_seconds = None
  138. self._keep_checkpoint_max = keep_checkpoint_max
  139. self._keep_checkpoint_per_n_minutes = keep_checkpoint_per_n_minutes
  140. if self._keep_checkpoint_max and self._keep_checkpoint_max > 0:
  141. self._keep_checkpoint_per_n_minutes = None
  142. else:
  143. if not self._keep_checkpoint_per_n_minutes or self._keep_checkpoint_per_n_minutes == 0:
  144. self._keep_checkpoint_max = 1
  145. self._integrated_save = Validator.check_bool(integrated_save)
  146. self._async_save = Validator.check_bool(async_save)
  147. self._saved_network = saved_network
  148. @property
  149. def save_checkpoint_steps(self):
  150. """Get the value of _save_checkpoint_steps."""
  151. return self._save_checkpoint_steps
  152. @property
  153. def save_checkpoint_seconds(self):
  154. """Get the value of _save_checkpoint_seconds."""
  155. return self._save_checkpoint_seconds
  156. @property
  157. def keep_checkpoint_max(self):
  158. """Get the value of _keep_checkpoint_max."""
  159. return self._keep_checkpoint_max
  160. @property
  161. def keep_checkpoint_per_n_minutes(self):
  162. """Get the value of _keep_checkpoint_per_n_minutes."""
  163. return self._keep_checkpoint_per_n_minutes
  164. @property
  165. def integrated_save(self):
  166. """Get the value of _integrated_save."""
  167. return self._integrated_save
  168. @property
  169. def async_save(self):
  170. """Get the value of _async_save."""
  171. return self._async_save
  172. @property
  173. def saved_network(self):
  174. """Get the value of _saved_network"""
  175. return self._saved_network
  176. def get_checkpoint_policy(self):
  177. """Get the policy of checkpoint."""
  178. checkpoint_policy = {'save_checkpoint_steps': self.save_checkpoint_steps,
  179. 'save_checkpoint_seconds': self.save_checkpoint_seconds,
  180. 'keep_checkpoint_max': self.keep_checkpoint_max,
  181. 'keep_checkpoint_per_n_minutes': self.keep_checkpoint_per_n_minutes,
  182. 'saved_network': self.saved_network}
  183. return checkpoint_policy
  184. class ModelCheckpoint(Callback):
  185. """
  186. The checkpoint callback class.
  187. It is called to combine with train process and save the model and network parameters after traning.
  188. Args:
  189. prefix (str): The prefix name of checkpoint files. Default: "CKP".
  190. directory (str): The path of the folder which will be saved in the checkpoint file. Default: None.
  191. config (CheckpointConfig): Checkpoint strategy configuration. Default: None.
  192. Raises:
  193. ValueError: If the prefix is invalid.
  194. TypeError: If the config is not CheckpointConfig type.
  195. """
  196. def __init__(self, prefix='CKP', directory=None, config=None):
  197. super(ModelCheckpoint, self).__init__()
  198. self._latest_ckpt_file_name = ""
  199. self._init_time = time.time()
  200. self._last_time = time.time()
  201. self._last_time_for_keep = time.time()
  202. self._last_triggered_step = 0
  203. if _check_file_name_prefix(prefix):
  204. self._prefix = prefix
  205. else:
  206. raise ValueError("Prefix {} for checkpoint file name invalid, "
  207. "please check and correct it and then continue.".format(prefix))
  208. if directory is not None:
  209. self._directory = _make_directory(directory)
  210. else:
  211. self._directory = _cur_dir
  212. if config is None:
  213. self._config = CheckpointConfig()
  214. else:
  215. if not isinstance(config, CheckpointConfig):
  216. raise TypeError("config should be CheckpointConfig type.")
  217. self._config = config
  218. # get existing checkpoint files
  219. self._manager = CheckpointManager()
  220. self._prefix = _chg_ckpt_file_name_if_same_exist(self._directory, self._prefix)
  221. self._graph_saved = False
  222. def step_end(self, run_context):
  223. """
  224. Save the checkpoint at the end of step.
  225. Args:
  226. run_context (RunContext): Context of the train running.
  227. """
  228. if _is_role_pserver():
  229. self._prefix = "PServer_" + str(_get_ps_mode_rank()) + "_" + self._prefix
  230. cb_params = run_context.original_args()
  231. # save graph (only once)
  232. if not self._graph_saved:
  233. graph_file_name = os.path.join(self._directory, self._prefix + '-graph.meta')
  234. _save_graph(cb_params.train_network, graph_file_name)
  235. self._graph_saved = True
  236. self._save_ckpt(cb_params)
  237. def end(self, run_context):
  238. """
  239. Save the last checkpoint after training finished.
  240. Args:
  241. run_context (RunContext): Context of the train running.
  242. """
  243. cb_params = run_context.original_args()
  244. _to_save_last_ckpt = True
  245. self._save_ckpt(cb_params, _to_save_last_ckpt)
  246. thread_list = threading.enumerate()
  247. if len(thread_list) > 1:
  248. for thread in thread_list:
  249. if thread.getName() == "asyn_save_ckpt":
  250. thread.join()
  251. from mindspore.parallel._cell_wrapper import destroy_allgather_cell
  252. destroy_allgather_cell()
  253. def _check_save_ckpt(self, cb_params, force_to_save):
  254. """Check whether save checkpoint files or not."""
  255. if self._config.save_checkpoint_steps and self._config.save_checkpoint_steps > 0:
  256. if cb_params.cur_step_num >= self._last_triggered_step + self._config.save_checkpoint_steps \
  257. or force_to_save is True:
  258. return True
  259. elif self._config.save_checkpoint_seconds and self._config.save_checkpoint_seconds > 0:
  260. self._cur_time = time.time()
  261. if (self._cur_time - self._last_time) > self._config.save_checkpoint_seconds or force_to_save is True:
  262. self._last_time = self._cur_time
  263. return True
  264. return False
  265. def _save_ckpt(self, cb_params, force_to_save=False):
  266. """Save checkpoint files."""
  267. if cb_params.cur_step_num == self._last_triggered_step:
  268. return
  269. save_ckpt = self._check_save_ckpt(cb_params, force_to_save)
  270. step_num_in_epoch = int((cb_params.cur_step_num - 1) % cb_params.batch_num + 1)
  271. if save_ckpt:
  272. cur_ckpoint_file = self._prefix + "-" + str(cb_params.cur_epoch_num) + "_" \
  273. + str(step_num_in_epoch) + ".ckpt"
  274. # update checkpoint file list.
  275. self._manager.update_ckpoint_filelist(self._directory, self._prefix)
  276. # keep checkpoint files number equal max number.
  277. if self._config.keep_checkpoint_max and 0 < self._config.keep_checkpoint_max <= self._manager.ckpoint_num:
  278. self._manager.remove_oldest_ckpoint_file()
  279. elif self._config.keep_checkpoint_per_n_minutes and self._config.keep_checkpoint_per_n_minutes > 0:
  280. self._cur_time_for_keep = time.time()
  281. if (self._cur_time_for_keep - self._last_time_for_keep) \
  282. < self._config.keep_checkpoint_per_n_minutes * 60:
  283. self._manager.keep_one_ckpoint_per_minutes(self._config.keep_checkpoint_per_n_minutes,
  284. self._cur_time_for_keep)
  285. # generate the new checkpoint file and rename it.
  286. global _save_dir
  287. _save_dir = self._directory
  288. cur_file = os.path.join(self._directory, cur_ckpoint_file)
  289. self._last_time_for_keep = time.time()
  290. self._last_triggered_step = cb_params.cur_step_num
  291. if context.get_context("enable_ge"):
  292. set_cur_net(cb_params.train_network)
  293. cb_params.train_network.exec_checkpoint_graph()
  294. network = self._config.saved_network if self._config.saved_network is not None else cb_params.train_network
  295. save_checkpoint(network, cur_file, self._config.integrated_save,
  296. self._config.async_save)
  297. self._latest_ckpt_file_name = cur_file
  298. @property
  299. def latest_ckpt_file_name(self):
  300. """Return the latest checkpoint path and file name."""
  301. return self._latest_ckpt_file_name
  302. class CheckpointManager:
  303. """Manage checkpoint files according to train_config of checkpoint."""
  304. def __init__(self):
  305. self._ckpoint_filelist = []
  306. @property
  307. def ckpoint_filelist(self):
  308. """Get all the related checkpoint files managed here."""
  309. return self._ckpoint_filelist
  310. @property
  311. def ckpoint_num(self):
  312. """Get the number of the related checkpoint files managed here."""
  313. return len(self._ckpoint_filelist)
  314. def update_ckpoint_filelist(self, directory, prefix):
  315. """Update the checkpoint file list."""
  316. self._ckpoint_filelist = []
  317. files = os.listdir(directory)
  318. for filename in files:
  319. if os.path.splitext(filename)[-1] == ".ckpt" and filename.startswith(prefix):
  320. mid_name = filename[len(prefix):-5]
  321. flag = True
  322. for char in mid_name:
  323. if char.isalpha():
  324. flag = False
  325. if flag:
  326. self._ckpoint_filelist.append(directory + '/' + filename)
  327. def remove_ckpoint_file(self, file_name):
  328. """Remove the specified checkpoint file from this checkpoint manager and also from the directory."""
  329. try:
  330. os.chmod(file_name, stat.S_IWRITE)
  331. os.remove(file_name)
  332. self._ckpoint_filelist.remove(file_name)
  333. except OSError:
  334. logger.warning("OSError, failed to remove the older ckpt file %s.", file_name)
  335. except ValueError:
  336. logger.warning("ValueError, failed to remove the older ckpt file %s.", file_name)
  337. def remove_oldest_ckpoint_file(self):
  338. """Remove the oldest checkpoint file from this checkpoint manager and also from the directory."""
  339. ckpoint_files = sorted(self._ckpoint_filelist, key=os.path.getmtime)
  340. self.remove_ckpoint_file(ckpoint_files[0])
  341. def keep_one_ckpoint_per_minutes(self, minutes, cur_time):
  342. """Only keep the latest one ckpt file per minutes, remove other files generated in [last_time, cur_time]."""
  343. movs = []
  344. oldest_file = ''
  345. oldest_time = cur_time
  346. for ck_file in self._ckpoint_filelist:
  347. modify_time = os.path.getmtime(ck_file)
  348. if cur_time - modify_time < 60 * minutes:
  349. movs.append(ck_file)
  350. if modify_time < oldest_time:
  351. oldest_time = modify_time
  352. oldest_file = ck_file
  353. for mv_file in movs:
  354. if mv_file == oldest_file:
  355. continue
  356. self.remove_ckpoint_file(mv_file)