You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

run_pretrain.py 12 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. #################pre_train bert example on zh-wiki########################
  17. python run_pretrain.py
  18. """
  19. import os
  20. import argparse
  21. import mindspore.communication.management as D
  22. from mindspore.communication.management import get_rank
  23. import mindspore.common.dtype as mstype
  24. from mindspore import context
  25. from mindspore.train.model import Model
  26. from mindspore.context import ParallelMode
  27. from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
  28. from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor
  29. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  30. from mindspore.nn.optim import Lamb, Momentum, AdamWeightDecay
  31. from mindspore import log as logger
  32. from mindspore.common import set_seed
  33. from src import BertNetworkWithLoss, BertTrainOneStepCell, BertTrainOneStepWithLossScaleCell, \
  34. BertTrainAccumulateStepsWithLossScaleCell
  35. from src.dataset import create_bert_dataset
  36. from src.config import cfg, bert_net_cfg
  37. from src.utils import LossCallBack, BertLearningRate
  38. _current_dir = os.path.dirname(os.path.realpath(__file__))
  39. def _set_bert_all_reduce_split():
  40. """set bert all_reduce fusion split, support num_hidden_layers is 12 and 24."""
  41. if bert_net_cfg.num_hidden_layers == 12:
  42. if bert_net_cfg.use_relative_positions:
  43. context.set_auto_parallel_context(all_reduce_fusion_config=[29, 58, 87, 116, 145, 174, 203, 217])
  44. else:
  45. context.set_auto_parallel_context(all_reduce_fusion_config=[28, 55, 82, 109, 136, 163, 190, 205])
  46. elif bert_net_cfg.num_hidden_layers == 24:
  47. if bert_net_cfg.use_relative_positions:
  48. context.set_auto_parallel_context(all_reduce_fusion_config=[30, 90, 150, 210, 270, 330, 390, 421])
  49. else:
  50. context.set_auto_parallel_context(all_reduce_fusion_config=[38, 93, 148, 203, 258, 313, 368, 397])
  51. def _get_optimizer(args_opt, network):
  52. """get bert optimizer, support Lamb, Momentum, AdamWeightDecay."""
  53. if cfg.optimizer == 'Lamb':
  54. lr_schedule = BertLearningRate(learning_rate=cfg.Lamb.learning_rate,
  55. end_learning_rate=cfg.Lamb.end_learning_rate,
  56. warmup_steps=cfg.Lamb.warmup_steps,
  57. decay_steps=args_opt.train_steps,
  58. power=cfg.Lamb.power)
  59. params = network.trainable_params()
  60. decay_params = list(filter(cfg.Lamb.decay_filter, params))
  61. other_params = list(filter(lambda x: not cfg.Lamb.decay_filter(x), params))
  62. group_params = [{'params': decay_params, 'weight_decay': cfg.Lamb.weight_decay},
  63. {'params': other_params},
  64. {'order_params': params}]
  65. optimizer = Lamb(group_params, learning_rate=lr_schedule, eps=cfg.Lamb.eps)
  66. elif cfg.optimizer == 'Momentum':
  67. optimizer = Momentum(network.trainable_params(), learning_rate=cfg.Momentum.learning_rate,
  68. momentum=cfg.Momentum.momentum)
  69. elif cfg.optimizer == 'AdamWeightDecay':
  70. lr_schedule = BertLearningRate(learning_rate=cfg.AdamWeightDecay.learning_rate,
  71. end_learning_rate=cfg.AdamWeightDecay.end_learning_rate,
  72. warmup_steps=cfg.AdamWeightDecay.warmup_steps,
  73. decay_steps=args_opt.train_steps,
  74. power=cfg.AdamWeightDecay.power)
  75. params = network.trainable_params()
  76. decay_params = list(filter(cfg.AdamWeightDecay.decay_filter, params))
  77. other_params = list(filter(lambda x: not cfg.AdamWeightDecay.decay_filter(x), params))
  78. group_params = [{'params': decay_params, 'weight_decay': cfg.AdamWeightDecay.weight_decay},
  79. {'params': other_params, 'weight_decay': 0.0},
  80. {'order_params': params}]
  81. optimizer = AdamWeightDecay(group_params, learning_rate=lr_schedule, eps=cfg.AdamWeightDecay.eps)
  82. else:
  83. raise ValueError("Don't support optimizer {}, only support [Lamb, Momentum, AdamWeightDecay]".
  84. format(cfg.optimizer))
  85. return optimizer
  86. def run_pretrain():
  87. """pre-train bert_clue"""
  88. parser = argparse.ArgumentParser(description='bert pre_training')
  89. parser.add_argument('--device_target', type=str, default='Ascend', choices=['Ascend', 'GPU'],
  90. help='device where the code will be implemented. (Default: Ascend)')
  91. parser.add_argument("--distribute", type=str, default="false", choices=["true", "false"],
  92. help="Run distribute, default is false.")
  93. parser.add_argument("--epoch_size", type=int, default="1", help="Epoch size, default is 1.")
  94. parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
  95. parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.")
  96. parser.add_argument("--enable_save_ckpt", type=str, default="true", choices=["true", "false"],
  97. help="Enable save checkpoint, default is true.")
  98. parser.add_argument("--enable_lossscale", type=str, default="true", choices=["true", "false"],
  99. help="Use lossscale or not, default is not.")
  100. parser.add_argument("--do_shuffle", type=str, default="true", choices=["true", "false"],
  101. help="Enable shuffle for dataset, default is true.")
  102. parser.add_argument("--enable_data_sink", type=str, default="true", choices=["true", "false"],
  103. help="Enable data sink, default is true.")
  104. parser.add_argument("--data_sink_steps", type=int, default="1", help="Sink steps for each epoch, default is 1.")
  105. parser.add_argument("--accumulation_steps", type=int, default="1",
  106. help="Accumulating gradients N times before weight update, default is 1.")
  107. parser.add_argument("--save_checkpoint_path", type=str, default="", help="Save checkpoint path")
  108. parser.add_argument("--load_checkpoint_path", type=str, default="", help="Load checkpoint file path")
  109. parser.add_argument("--save_checkpoint_steps", type=int, default=1000, help="Save checkpoint steps, "
  110. "default is 1000.")
  111. parser.add_argument("--train_steps", type=int, default=-1, help="Training Steps, default is -1, "
  112. "meaning run all steps according to epoch number.")
  113. parser.add_argument("--save_checkpoint_num", type=int, default=1, help="Save checkpoint numbers, default is 1.")
  114. parser.add_argument("--data_dir", type=str, default="", help="Data path, it is better to use absolute path")
  115. parser.add_argument("--schema_dir", type=str, default="", help="Schema path, it is better to use absolute path")
  116. args_opt = parser.parse_args()
  117. context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, device_id=args_opt.device_id)
  118. context.set_context(reserve_class_name_in_scope=False)
  119. ckpt_save_dir = args_opt.save_checkpoint_path
  120. if args_opt.distribute == "true":
  121. if args_opt.device_target == 'Ascend':
  122. D.init()
  123. device_num = args_opt.device_num
  124. rank = args_opt.device_id % device_num
  125. else:
  126. D.init()
  127. device_num = D.get_group_size()
  128. rank = D.get_rank()
  129. ckpt_save_dir = args_opt.save_checkpoint_path + 'ckpt_' + str(get_rank()) + '/'
  130. context.reset_auto_parallel_context()
  131. context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
  132. device_num=device_num)
  133. _set_bert_all_reduce_split()
  134. else:
  135. rank = 0
  136. device_num = 1
  137. if args_opt.device_target == 'GPU' and bert_net_cfg.compute_type != mstype.float32:
  138. logger.warning('Gpu only support fp32 temporarily, run with fp32.')
  139. bert_net_cfg.compute_type = mstype.float32
  140. if args_opt.accumulation_steps > 1:
  141. logger.info("accumulation steps: {}".format(args_opt.accumulation_steps))
  142. logger.info("global batch size: {}".format(bert_net_cfg.batch_size * args_opt.accumulation_steps))
  143. if args_opt.enable_data_sink == "true":
  144. args_opt.data_sink_steps *= args_opt.accumulation_steps
  145. logger.info("data sink steps: {}".format(args_opt.data_sink_steps))
  146. if args_opt.enable_save_ckpt == "true":
  147. args_opt.save_checkpoint_steps *= args_opt.accumulation_steps
  148. logger.info("save checkpoint steps: {}".format(args_opt.save_checkpoint_steps))
  149. ds = create_bert_dataset(device_num, rank, args_opt.do_shuffle, args_opt.data_dir, args_opt.schema_dir)
  150. net_with_loss = BertNetworkWithLoss(bert_net_cfg, True)
  151. new_repeat_count = args_opt.epoch_size * ds.get_dataset_size() // args_opt.data_sink_steps
  152. if args_opt.train_steps > 0:
  153. train_steps = args_opt.train_steps * args_opt.accumulation_steps
  154. new_repeat_count = min(new_repeat_count, train_steps // args_opt.data_sink_steps)
  155. else:
  156. args_opt.train_steps = args_opt.epoch_size * ds.get_dataset_size() // args_opt.accumulation_steps
  157. logger.info("train steps: {}".format(args_opt.train_steps))
  158. optimizer = _get_optimizer(args_opt, net_with_loss)
  159. callback = [TimeMonitor(args_opt.data_sink_steps), LossCallBack(ds.get_dataset_size())]
  160. if args_opt.enable_save_ckpt == "true" and args_opt.device_id % min(8, device_num) == 0:
  161. config_ck = CheckpointConfig(save_checkpoint_steps=args_opt.save_checkpoint_steps,
  162. keep_checkpoint_max=args_opt.save_checkpoint_num)
  163. ckpoint_cb = ModelCheckpoint(prefix='checkpoint_bert',
  164. directory=None if ckpt_save_dir == "" else ckpt_save_dir, config=config_ck)
  165. callback.append(ckpoint_cb)
  166. if args_opt.load_checkpoint_path:
  167. param_dict = load_checkpoint(args_opt.load_checkpoint_path)
  168. load_param_into_net(net_with_loss, param_dict)
  169. if args_opt.enable_lossscale == "true":
  170. update_cell = DynamicLossScaleUpdateCell(loss_scale_value=cfg.loss_scale_value,
  171. scale_factor=cfg.scale_factor,
  172. scale_window=cfg.scale_window)
  173. if args_opt.accumulation_steps <= 1:
  174. net_with_grads = BertTrainOneStepWithLossScaleCell(net_with_loss, optimizer=optimizer,
  175. scale_update_cell=update_cell)
  176. else:
  177. accumulation_steps = args_opt.accumulation_steps
  178. net_with_grads = BertTrainAccumulateStepsWithLossScaleCell(net_with_loss, optimizer=optimizer,
  179. scale_update_cell=update_cell,
  180. accumulation_steps=accumulation_steps,
  181. enable_global_norm=cfg.enable_global_norm)
  182. else:
  183. net_with_grads = BertTrainOneStepCell(net_with_loss, optimizer=optimizer)
  184. model = Model(net_with_grads)
  185. model.train(new_repeat_count, ds, callbacks=callback,
  186. dataset_sink_mode=(args_opt.enable_data_sink == "true"), sink_size=args_opt.data_sink_steps)
  187. if __name__ == '__main__':
  188. set_seed(0)
  189. run_pretrain()