You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

train.py 4.5 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. NEZHA (NEural contextualiZed representation for CHinese lAnguage understanding) is the Chinese pretrained language
  17. model currently based on BERT developed by Huawei.
  18. 1. Prepare data
  19. Following the data preparation as in BERT, run command as below to get dataset for training:
  20. python ./create_pretraining_data.py \
  21. --input_file=./sample_text.txt \
  22. --output_file=./examples.tfrecord \
  23. --vocab_file=./your/path/vocab.txt \
  24. --do_lower_case=True \
  25. --max_seq_length=128 \
  26. --max_predictions_per_seq=20 \
  27. --masked_lm_prob=0.15 \
  28. --random_seed=12345 \
  29. --dupe_factor=5
  30. 2. Pretrain
  31. First, prepare the distributed training environment, then adjust configurations in config.py, finally run train.py.
  32. """
  33. import os
  34. import numpy as np
  35. from config import bert_train_cfg, bert_net_cfg
  36. import mindspore.dataset.engine.datasets as de
  37. import mindspore.dataset.transforms.c_transforms as C
  38. from mindspore import context
  39. from mindspore.common.tensor import Tensor
  40. from mindspore.train.model import Model
  41. from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
  42. from mindspore.model_zoo.Bert_NEZHA import BertNetworkWithLoss, BertTrainOneStepCell
  43. from mindspore.nn.optim import Lamb
  44. _current_dir = os.path.dirname(os.path.realpath(__file__))
  45. def create_train_dataset(batch_size):
  46. """create train dataset"""
  47. # apply repeat operations
  48. repeat_count = bert_train_cfg.epoch_size
  49. ds = de.StorageDataset([bert_train_cfg.DATA_DIR], bert_train_cfg.SCHEMA_DIR,
  50. columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
  51. "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"])
  52. type_cast_op = C.TypeCast(mstype.int32)
  53. ds = ds.map(input_columns="masked_lm_ids", operations=type_cast_op)
  54. ds = ds.map(input_columns="masked_lm_positions", operations=type_cast_op)
  55. ds = ds.map(input_columns="next_sentence_labels", operations=type_cast_op)
  56. ds = ds.map(input_columns="segment_ids", operations=type_cast_op)
  57. ds = ds.map(input_columns="input_mask", operations=type_cast_op)
  58. ds = ds.map(input_columns="input_ids", operations=type_cast_op)
  59. # apply batch operations
  60. ds = ds.batch(batch_size, drop_remainder=True)
  61. ds = ds.repeat(repeat_count)
  62. return ds
  63. def weight_variable(shape):
  64. """weight variable"""
  65. np.random.seed(1)
  66. ones = np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32)
  67. return Tensor(ones)
  68. def train_bert():
  69. """train bert"""
  70. context.set_context(mode=context.GRAPH_MODE)
  71. context.set_context(device_target="Ascend")
  72. context.set_context(enable_task_sink=True)
  73. context.set_context(enable_loop_sink=True)
  74. context.set_context(enable_mem_reuse=True)
  75. ds = create_train_dataset(bert_net_cfg.batch_size)
  76. netwithloss = BertNetworkWithLoss(bert_net_cfg, True)
  77. optimizer = Lamb(netwithloss.trainable_params(), decay_steps=bert_train_cfg.decay_steps,
  78. start_learning_rate=bert_train_cfg.start_learning_rate,
  79. end_learning_rate=bert_train_cfg.end_learning_rate, power=bert_train_cfg.power,
  80. warmup_steps=bert_train_cfg.num_warmup_steps, decay_filter=lambda x: False)
  81. netwithgrads = BertTrainOneStepCell(netwithloss, optimizer=optimizer)
  82. netwithgrads.set_train(True)
  83. model = Model(netwithgrads)
  84. config_ck = CheckpointConfig(save_checkpoint_steps=bert_train_cfg.save_checkpoint_steps,
  85. keep_checkpoint_max=bert_train_cfg.keep_checkpoint_max)
  86. ckpoint_cb = ModelCheckpoint(prefix=bert_train_cfg.checkpoint_prefix, config=config_ck)
  87. model.train(ds.get_repeat_count(), ds, callbacks=[LossMonitor(), ckpoint_cb], dataset_sink_mode=False)
  88. if __name__ == '__main__':
  89. train_bert()