You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

config.py 4.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. network config setting, will be used in dataset.py, run_pretrain.py
  17. """
  18. from easydict import EasyDict as edict
  19. import mindspore.common.dtype as mstype
  20. from .bert_model import BertConfig
  21. cfg = edict({
  22. 'bert_network': 'base',
  23. 'loss_scale_value': 65536,
  24. 'scale_factor': 2,
  25. 'scale_window': 1000,
  26. 'optimizer': 'Lamb',
  27. 'enable_global_norm': False,
  28. 'AdamWeightDecay': edict({
  29. 'learning_rate': 3e-5,
  30. 'end_learning_rate': 0.0,
  31. 'power': 5.0,
  32. 'weight_decay': 1e-5,
  33. 'decay_filter': lambda x: 'layernorm' not in x.name.lower() and 'bias' not in x.name.lower(),
  34. 'eps': 1e-6,
  35. 'warmup_steps': 10000,
  36. }),
  37. 'Lamb': edict({
  38. 'learning_rate': 3e-5,
  39. 'end_learning_rate': 0.0,
  40. 'power': 10.0,
  41. 'warmup_steps': 10000,
  42. 'weight_decay': 0.01,
  43. 'decay_filter': lambda x: 'layernorm' not in x.name.lower() and 'bias' not in x.name.lower(),
  44. 'eps': 1e-6,
  45. }),
  46. 'Momentum': edict({
  47. 'learning_rate': 2e-5,
  48. 'momentum': 0.9,
  49. }),
  50. })
  51. '''
  52. Including two kinds of network: \
  53. base: Google BERT-base(the base version of BERT model).
  54. large: BERT-NEZHA(a Chinese pretrained language model developed by Huawei, which introduced a improvement of \
  55. Functional Relative Posetional Encoding as an effective positional encoding scheme).
  56. '''
  57. if cfg.bert_network == 'base':
  58. bert_net_cfg = BertConfig(
  59. batch_size=64,
  60. seq_length=128,
  61. vocab_size=21128,
  62. hidden_size=768,
  63. num_hidden_layers=12,
  64. num_attention_heads=12,
  65. intermediate_size=3072,
  66. hidden_act="gelu",
  67. hidden_dropout_prob=0.1,
  68. attention_probs_dropout_prob=0.1,
  69. max_position_embeddings=512,
  70. type_vocab_size=2,
  71. initializer_range=0.02,
  72. use_relative_positions=False,
  73. input_mask_from_dataset=True,
  74. token_type_ids_from_dataset=True,
  75. dtype=mstype.float32,
  76. compute_type=mstype.float16
  77. )
  78. if cfg.bert_network == 'nezha':
  79. bert_net_cfg = BertConfig(
  80. batch_size=96,
  81. seq_length=128,
  82. vocab_size=21128,
  83. hidden_size=1024,
  84. num_hidden_layers=24,
  85. num_attention_heads=16,
  86. intermediate_size=4096,
  87. hidden_act="gelu",
  88. hidden_dropout_prob=0.1,
  89. attention_probs_dropout_prob=0.1,
  90. max_position_embeddings=512,
  91. type_vocab_size=2,
  92. initializer_range=0.02,
  93. use_relative_positions=True,
  94. input_mask_from_dataset=True,
  95. token_type_ids_from_dataset=True,
  96. dtype=mstype.float32,
  97. compute_type=mstype.float16
  98. )
  99. if cfg.bert_network == 'large':
  100. bert_net_cfg = BertConfig(
  101. batch_size=24,
  102. seq_length=512,
  103. vocab_size=30522,
  104. hidden_size=1024,
  105. num_hidden_layers=24,
  106. num_attention_heads=16,
  107. intermediate_size=4096,
  108. hidden_act="gelu",
  109. hidden_dropout_prob=0.1,
  110. attention_probs_dropout_prob=0.1,
  111. max_position_embeddings=512,
  112. type_vocab_size=2,
  113. initializer_range=0.02,
  114. use_relative_positions=False,
  115. input_mask_from_dataset=True,
  116. token_type_ids_from_dataset=True,
  117. dtype=mstype.float32,
  118. compute_type=mstype.float16
  119. )