You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

run_pretrain.py 8.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. #################pre_train bert example on zh-wiki########################
  17. python run_pretrain.py
  18. """
  19. import os
  20. import argparse
  21. import mindspore.communication.management as D
  22. from mindspore import context
  23. from mindspore.train.model import Model
  24. from mindspore.train.parallel_utils import ParallelMode
  25. from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
  26. from mindspore.train.callback import Callback, ModelCheckpoint, CheckpointConfig, TimeMonitor
  27. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  28. from mindspore.nn.optim import Lamb, Momentum, AdamWeightDecayDynamicLR
  29. from src import BertNetworkWithLoss, BertTrainOneStepCell, BertTrainOneStepWithLossScaleCell
  30. from src.dataset import create_bert_dataset
  31. from src.config import cfg, bert_net_cfg
  32. _current_dir = os.path.dirname(os.path.realpath(__file__))
  33. class LossCallBack(Callback):
  34. """
  35. Monitor the loss in training.
  36. If the loss in NAN or INF terminating training.
  37. Note:
  38. if per_print_times is 0 do not print loss.
  39. Args:
  40. per_print_times (int): Print loss every times. Default: 1.
  41. """
  42. def __init__(self, per_print_times=1):
  43. super(LossCallBack, self).__init__()
  44. if not isinstance(per_print_times, int) or per_print_times < 0:
  45. raise ValueError("print_step must be int and >= 0")
  46. self._per_print_times = per_print_times
  47. def step_end(self, run_context):
  48. cb_params = run_context.original_args()
  49. print("epoch: {}, step: {}, outputs are {}".format(cb_params.cur_epoch_num, cb_params.cur_step_num,
  50. str(cb_params.net_outputs)))
  51. def run_pretrain():
  52. """pre-train bert_clue"""
  53. parser = argparse.ArgumentParser(description='bert pre_training')
  54. parser.add_argument("--distribute", type=str, default="false", help="Run distribute, default is false.")
  55. parser.add_argument("--epoch_size", type=int, default="1", help="Epoch size, default is 1.")
  56. parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
  57. parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.")
  58. parser.add_argument("--enable_save_ckpt", type=str, default="true", help="Enable save checkpoint, default is true.")
  59. parser.add_argument("--enable_lossscale", type=str, default="true", help="Use lossscale or not, default is not.")
  60. parser.add_argument("--do_shuffle", type=str, default="true", help="Enable shuffle for dataset, default is true.")
  61. parser.add_argument("--enable_data_sink", type=str, default="true", help="Enable data sink, default is true.")
  62. parser.add_argument("--data_sink_steps", type=int, default="1", help="Sink steps for each epoch, default is 1.")
  63. parser.add_argument("--checkpoint_path", type=str, default="", help="Checkpoint file path")
  64. parser.add_argument("--save_checkpoint_steps", type=int, default=1000, help="Save checkpoint steps, "
  65. "default is 1000.")
  66. parser.add_argument("--save_checkpoint_num", type=int, default=1, help="Save checkpoint numbers, default is 1.")
  67. parser.add_argument("--data_dir", type=str, default="", help="Data path, it is better to use absolute path")
  68. parser.add_argument("--schema_dir", type=str, default="", help="Schema path, it is better to use absolute path")
  69. args_opt = parser.parse_args()
  70. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id)
  71. context.set_context(reserve_class_name_in_scope=False)
  72. if args_opt.distribute == "true":
  73. device_num = args_opt.device_num
  74. context.reset_auto_parallel_context()
  75. context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, mirror_mean=True,
  76. device_num=device_num)
  77. from mindspore.parallel._auto_parallel_context import auto_parallel_context
  78. if bert_net_cfg.num_hidden_layers == 12:
  79. auto_parallel_context().set_all_reduce_fusion_split_indices([28, 55, 82, 109, 136, 163, 190, 205])
  80. elif bert_net_cfg.num_hidden_layers == 24:
  81. auto_parallel_context().set_all_reduce_fusion_split_indices([38, 93, 148, 203, 258, 313, 368, 397])
  82. D.init()
  83. rank = args_opt.device_id % device_num
  84. else:
  85. rank = 0
  86. device_num = 1
  87. ds, new_repeat_count = create_bert_dataset(args_opt.epoch_size, device_num, rank, args_opt.do_shuffle,
  88. args_opt.enable_data_sink, args_opt.data_sink_steps,
  89. args_opt.data_dir, args_opt.schema_dir)
  90. netwithloss = BertNetworkWithLoss(bert_net_cfg, True)
  91. if cfg.optimizer == 'Lamb':
  92. optimizer = Lamb(netwithloss.trainable_params(), decay_steps=ds.get_dataset_size() * ds.get_repeat_count(),
  93. start_learning_rate=cfg.Lamb.start_learning_rate, end_learning_rate=cfg.Lamb.end_learning_rate,
  94. power=cfg.Lamb.power, warmup_steps=cfg.Lamb.warmup_steps, weight_decay=cfg.Lamb.weight_decay,
  95. eps=cfg.Lamb.eps)
  96. elif cfg.optimizer == 'Momentum':
  97. optimizer = Momentum(netwithloss.trainable_params(), learning_rate=cfg.Momentum.learning_rate,
  98. momentum=cfg.Momentum.momentum)
  99. elif cfg.optimizer == 'AdamWeightDecayDynamicLR':
  100. optimizer = AdamWeightDecayDynamicLR(netwithloss.trainable_params(),
  101. decay_steps=ds.get_dataset_size() * ds.get_repeat_count(),
  102. learning_rate=cfg.AdamWeightDecayDynamicLR.learning_rate,
  103. end_learning_rate=cfg.AdamWeightDecayDynamicLR.end_learning_rate,
  104. power=cfg.AdamWeightDecayDynamicLR.power,
  105. weight_decay=cfg.AdamWeightDecayDynamicLR.weight_decay,
  106. eps=cfg.AdamWeightDecayDynamicLR.eps,
  107. warmup_steps=cfg.AdamWeightDecayDynamicLR.warmup_steps)
  108. else:
  109. raise ValueError("Don't support optimizer {}, only support [Lamb, Momentum, AdamWeightDecayDynamicLR]".
  110. format(cfg.optimizer))
  111. callback = [TimeMonitor(ds.get_dataset_size()), LossCallBack()]
  112. if args_opt.enable_save_ckpt == "true":
  113. config_ck = CheckpointConfig(save_checkpoint_steps=args_opt.save_checkpoint_steps,
  114. keep_checkpoint_max=args_opt.save_checkpoint_num)
  115. ckpoint_cb = ModelCheckpoint(prefix='checkpoint_bert', config=config_ck)
  116. callback.append(ckpoint_cb)
  117. if args_opt.checkpoint_path:
  118. param_dict = load_checkpoint(args_opt.checkpoint_path)
  119. load_param_into_net(netwithloss, param_dict)
  120. if args_opt.enable_lossscale == "true":
  121. update_cell = DynamicLossScaleUpdateCell(loss_scale_value=cfg.loss_scale_value,
  122. scale_factor=cfg.scale_factor,
  123. scale_window=cfg.scale_window)
  124. netwithgrads = BertTrainOneStepWithLossScaleCell(netwithloss, optimizer=optimizer,
  125. scale_update_cell=update_cell)
  126. else:
  127. netwithgrads = BertTrainOneStepCell(netwithloss, optimizer=optimizer)
  128. model = Model(netwithgrads)
  129. model.train(new_repeat_count, ds, callbacks=callback, dataset_sink_mode=(args_opt.enable_data_sink == "true"))
  130. if __name__ == '__main__':
  131. run_pretrain()