You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

config.py 3.1 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. network config setting, will be used in dataset.py, run_pretrain.py
  17. """
  18. from easydict import EasyDict as edict
  19. import mindspore.common.dtype as mstype
  20. from mindspore.model_zoo.Bert_NEZHA import BertConfig
  21. cfg = edict({
  22. 'bert_network': 'base',
  23. 'loss_scale_value': 2**32,
  24. 'scale_factor': 2,
  25. 'scale_window': 1000,
  26. 'optimizer': 'Lamb',
  27. 'AdamWeightDecayDynamicLR': edict({
  28. 'learning_rate': 3e-5,
  29. 'end_learning_rate': 1e-7,
  30. 'power': 5.0,
  31. 'weight_decay': 1e-5,
  32. 'eps': 1e-6,
  33. }),
  34. 'Lamb': edict({
  35. 'start_learning_rate': 3e-5,
  36. 'end_learning_rate': 1e-7,
  37. 'power': 10.0,
  38. 'warmup_steps': 10000,
  39. 'weight_decay': 0.01,
  40. 'eps': 1e-6,
  41. }),
  42. 'Momentum': edict({
  43. 'learning_rate': 2e-5,
  44. 'momentum': 0.9,
  45. }),
  46. })
  47. '''
  48. Including two kinds of network: \
  49. base: Goole BERT-base(the base version of BERT model).
  50. large: BERT-NEZHA(a Chinese pretrained language model developed by Huawei, which introduced a improvement of \
  51. Functional Relative Posetional Encoding as an effective positional encoding scheme).
  52. '''
  53. if cfg.bert_network == 'base':
  54. bert_net_cfg = BertConfig(
  55. batch_size=32,
  56. seq_length=128,
  57. vocab_size=21128,
  58. hidden_size=768,
  59. num_hidden_layers=12,
  60. num_attention_heads=12,
  61. intermediate_size=3072,
  62. hidden_act="gelu",
  63. hidden_dropout_prob=0.1,
  64. attention_probs_dropout_prob=0.1,
  65. max_position_embeddings=512,
  66. type_vocab_size=2,
  67. initializer_range=0.02,
  68. use_relative_positions=False,
  69. input_mask_from_dataset=True,
  70. token_type_ids_from_dataset=True,
  71. dtype=mstype.float32,
  72. compute_type=mstype.float16,
  73. )
  74. if cfg.bert_network == 'nezha':
  75. bert_net_cfg = BertConfig(
  76. batch_size=32,
  77. seq_length=128,
  78. vocab_size=21128,
  79. hidden_size=1024,
  80. num_hidden_layers=24,
  81. num_attention_heads=16,
  82. intermediate_size=4096,
  83. hidden_act="gelu",
  84. hidden_dropout_prob=0.1,
  85. attention_probs_dropout_prob=0.1,
  86. max_position_embeddings=512,
  87. type_vocab_size=2,
  88. initializer_range=0.02,
  89. use_relative_positions=True,
  90. input_mask_from_dataset=True,
  91. token_type_ids_from_dataset=True,
  92. dtype=mstype.float32,
  93. compute_type=mstype.float16,
  94. )