You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

finetune.py 8.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. '''
  16. Bert finetune script.
  17. '''
  18. import os
  19. import argparse
  20. from src.utils import BertFinetuneCell, BertCLS, BertNER, BertSquad, BertSquadCell
  21. from src.finetune_config import cfg, bert_net_cfg, tag_to_index
  22. import mindspore.common.dtype as mstype
  23. from mindspore import context
  24. from mindspore import log as logger
  25. import mindspore.dataset as de
  26. import mindspore.dataset.transforms.c_transforms as C
  27. from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
  28. from mindspore.nn.optim import AdamWeightDecayDynamicLR, Lamb, Momentum
  29. from mindspore.train.model import Model
  30. from mindspore.train.callback import Callback
  31. from mindspore.train.callback import CheckpointConfig, ModelCheckpoint
  32. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  33. class LossCallBack(Callback):
  34. '''
  35. Monitor the loss in training.
  36. If the loss is NAN or INF, terminate training.
  37. Note:
  38. If per_print_times is 0, do not print loss.
  39. Args:
  40. per_print_times (int): Print loss every times. Default: 1.
  41. '''
  42. def __init__(self, per_print_times=1):
  43. super(LossCallBack, self).__init__()
  44. if not isinstance(per_print_times, int) or per_print_times < 0:
  45. raise ValueError("print_step must be in and >= 0.")
  46. self._per_print_times = per_print_times
  47. def step_end(self, run_context):
  48. cb_params = run_context.original_args()
  49. with open("./loss.log", "a+") as f:
  50. f.write("epoch: {}, step: {}, outputs are {}".format(cb_params.cur_epoch_num, cb_params.cur_step_num,
  51. str(cb_params.net_outputs)))
  52. f.write("\n")
  53. def get_dataset(batch_size=1, repeat_count=1, distribute_file=''):
  54. '''
  55. get dataset
  56. '''
  57. ds = de.TFRecordDataset([cfg.data_file], cfg.schema_file, columns_list=["input_ids", "input_mask",
  58. "segment_ids", "label_ids"])
  59. type_cast_op = C.TypeCast(mstype.int32)
  60. ds = ds.map(input_columns="segment_ids", operations=type_cast_op)
  61. ds = ds.map(input_columns="input_mask", operations=type_cast_op)
  62. ds = ds.map(input_columns="input_ids", operations=type_cast_op)
  63. ds = ds.map(input_columns="label_ids", operations=type_cast_op)
  64. ds = ds.repeat(repeat_count)
  65. # apply shuffle operation
  66. buffer_size = 960
  67. ds = ds.shuffle(buffer_size=buffer_size)
  68. # apply batch operations
  69. ds = ds.batch(batch_size, drop_remainder=True)
  70. return ds
  71. def get_squad_dataset(batch_size=1, repeat_count=1, distribute_file=''):
  72. '''
  73. get SQuAD dataset
  74. '''
  75. ds = de.TFRecordDataset([cfg.data_file], cfg.schema_file, columns_list=["input_ids", "input_mask", "segment_ids",
  76. "start_positions", "end_positions",
  77. "unique_ids", "is_impossible"])
  78. type_cast_op = C.TypeCast(mstype.int32)
  79. ds = ds.map(input_columns="segment_ids", operations=type_cast_op)
  80. ds = ds.map(input_columns="input_ids", operations=type_cast_op)
  81. ds = ds.map(input_columns="input_mask", operations=type_cast_op)
  82. ds = ds.map(input_columns="start_positions", operations=type_cast_op)
  83. ds = ds.map(input_columns="end_positions", operations=type_cast_op)
  84. ds = ds.repeat(repeat_count)
  85. buffer_size = 960
  86. ds = ds.shuffle(buffer_size=buffer_size)
  87. ds = ds.batch(batch_size, drop_remainder=True)
  88. return ds
  89. def test_train():
  90. '''
  91. finetune function
  92. '''
  93. target = args_opt.device_target
  94. if target == "Ascend":
  95. devid = int(os.getenv('DEVICE_ID'))
  96. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=devid)
  97. elif target == "GPU":
  98. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  99. if bert_net_cfg.compute_type != mstype.float32:
  100. logger.warning('GPU only support fp32 temporarily, run with fp32.')
  101. bert_net_cfg.compute_type = mstype.float32
  102. else:
  103. raise Exception("Target error, GPU or Ascend is supported.")
  104. #BertCLSTrain for classification
  105. #BertNERTrain for sequence labeling
  106. if cfg.task == 'NER':
  107. if cfg.use_crf:
  108. netwithloss = BertNER(bert_net_cfg, True, num_labels=len(tag_to_index), use_crf=True,
  109. tag_to_index=tag_to_index, dropout_prob=0.1)
  110. else:
  111. netwithloss = BertNER(bert_net_cfg, True, num_labels=cfg.num_labels, dropout_prob=0.1)
  112. elif cfg.task == 'SQUAD':
  113. netwithloss = BertSquad(bert_net_cfg, True, 2, dropout_prob=0.1)
  114. else:
  115. netwithloss = BertCLS(bert_net_cfg, True, num_labels=cfg.num_labels, dropout_prob=0.1)
  116. if cfg.task == 'SQUAD':
  117. dataset = get_squad_dataset(bert_net_cfg.batch_size, cfg.epoch_num)
  118. else:
  119. dataset = get_dataset(bert_net_cfg.batch_size, cfg.epoch_num)
  120. # optimizer
  121. steps_per_epoch = dataset.get_dataset_size()
  122. if cfg.optimizer == 'AdamWeightDecayDynamicLR':
  123. optimizer = AdamWeightDecayDynamicLR(netwithloss.trainable_params(),
  124. decay_steps=steps_per_epoch * cfg.epoch_num,
  125. learning_rate=cfg.AdamWeightDecayDynamicLR.learning_rate,
  126. end_learning_rate=cfg.AdamWeightDecayDynamicLR.end_learning_rate,
  127. power=cfg.AdamWeightDecayDynamicLR.power,
  128. warmup_steps=int(steps_per_epoch * cfg.epoch_num * 0.1),
  129. weight_decay=cfg.AdamWeightDecayDynamicLR.weight_decay,
  130. eps=cfg.AdamWeightDecayDynamicLR.eps)
  131. elif cfg.optimizer == 'Lamb':
  132. optimizer = Lamb(netwithloss.trainable_params(), decay_steps=steps_per_epoch * cfg.epoch_num,
  133. start_learning_rate=cfg.Lamb.start_learning_rate, end_learning_rate=cfg.Lamb.end_learning_rate,
  134. power=cfg.Lamb.power, weight_decay=cfg.Lamb.weight_decay,
  135. warmup_steps=int(steps_per_epoch * cfg.epoch_num * 0.1), decay_filter=cfg.Lamb.decay_filter)
  136. elif cfg.optimizer == 'Momentum':
  137. optimizer = Momentum(netwithloss.trainable_params(), learning_rate=cfg.Momentum.learning_rate,
  138. momentum=cfg.Momentum.momentum)
  139. else:
  140. raise Exception("Optimizer not supported.")
  141. # load checkpoint into network
  142. ckpt_config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch, keep_checkpoint_max=1)
  143. ckpoint_cb = ModelCheckpoint(prefix=cfg.ckpt_prefix, directory=cfg.ckpt_dir, config=ckpt_config)
  144. param_dict = load_checkpoint(cfg.pre_training_ckpt)
  145. load_param_into_net(netwithloss, param_dict)
  146. update_cell = DynamicLossScaleUpdateCell(loss_scale_value=2**32, scale_factor=2, scale_window=1000)
  147. if cfg.task == 'SQUAD':
  148. netwithgrads = BertSquadCell(netwithloss, optimizer=optimizer, scale_update_cell=update_cell)
  149. else:
  150. netwithgrads = BertFinetuneCell(netwithloss, optimizer=optimizer, scale_update_cell=update_cell)
  151. model = Model(netwithgrads)
  152. model.train(cfg.epoch_num, dataset, callbacks=[LossCallBack(), ckpoint_cb])
  153. parser = argparse.ArgumentParser(description='Bert finetune')
  154. parser.add_argument('--device_target', type=str, default='Ascend', help='Device target')
  155. args_opt = parser.parse_args()
  156. if __name__ == "__main__":
  157. test_train()