You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_bert_train.py 8.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Bert test."""
  16. # pylint: disable=missing-docstring, arguments-differ, W0612
  17. import os
  18. import mindspore.common.dtype as mstype
  19. import mindspore.context as context
  20. from mindspore import Tensor
  21. from mindspore.ops import operations as P
  22. from mindspore.nn.optim import AdamWeightDecay
  23. from mindspore.train.loss_scale_manager import DynamicLossScaleManager
  24. from mindspore.nn import learning_rate_schedule as lr_schedules
  25. from model_zoo.official.nlp.bert.src import BertConfig, BertNetworkWithLoss, BertTrainOneStepCell, BertTrainOneStepWithLossScaleCell
  26. from ...dataset_mock import MindData
  27. from ...ops_common import nn, np, batch_tuple_tensor, build_construct_graph
  28. _current_dir = os.path.dirname(os.path.realpath(__file__)) + "/../python/test_data"
  29. context.set_context(mode=context.GRAPH_MODE)
  30. def get_dataset(batch_size=1):
  31. dataset_types = (np.int32, np.int32, np.int32, np.int32, np.int32, np.int32, np.int32)
  32. dataset_shapes = ((batch_size, 128), (batch_size, 128), (batch_size, 128), (batch_size, 1), \
  33. (batch_size, 20), (batch_size, 20), (batch_size, 20))
  34. dataset = MindData(size=2, batch_size=batch_size,
  35. np_types=dataset_types,
  36. output_shapes=dataset_shapes,
  37. input_indexs=(0, 1))
  38. return dataset
  39. def load_test_data(batch_size=1):
  40. dataset = get_dataset(batch_size)
  41. ret = dataset.next()
  42. ret = batch_tuple_tensor(ret, batch_size)
  43. return ret
  44. def get_config(version='base'):
  45. """
  46. get_config definition
  47. """
  48. if version == 'base':
  49. return BertConfig(
  50. seq_length=128,
  51. vocab_size=21128,
  52. hidden_size=768,
  53. num_hidden_layers=12,
  54. num_attention_heads=12,
  55. intermediate_size=3072,
  56. hidden_act="gelu",
  57. hidden_dropout_prob=0.1,
  58. attention_probs_dropout_prob=0.1,
  59. max_position_embeddings=512,
  60. type_vocab_size=2,
  61. initializer_range=0.02,
  62. use_relative_positions=True,
  63. dtype=mstype.float32,
  64. compute_type=mstype.float32)
  65. if version == 'large':
  66. return BertConfig(
  67. seq_length=128,
  68. vocab_size=21128,
  69. hidden_size=1024,
  70. num_hidden_layers=24,
  71. num_attention_heads=16,
  72. intermediate_size=4096,
  73. hidden_act="gelu",
  74. hidden_dropout_prob=0.1,
  75. attention_probs_dropout_prob=0.1,
  76. max_position_embeddings=512,
  77. type_vocab_size=2,
  78. initializer_range=0.02,
  79. use_relative_positions=True,
  80. dtype=mstype.float32,
  81. compute_type=mstype.float32)
  82. return BertConfig()
  83. class BertLearningRate(lr_schedules.LearningRateSchedule):
  84. def __init__(self, decay_steps, warmup_steps=100, learning_rate=0.1, end_learning_rate=0.0001, power=1.0):
  85. super(BertLearningRate, self).__init__()
  86. self.warmup_lr = lr_schedules.WarmUpLR(learning_rate, warmup_steps)
  87. self.decay_lr = lr_schedules.PolynomialDecayLR(learning_rate, end_learning_rate, decay_steps, power)
  88. self.warmup_steps = Tensor(np.array([warmup_steps]).astype(np.float32))
  89. self.greater = P.Greater()
  90. self.one = Tensor(np.array([1.0]).astype(np.float32))
  91. self.cast = P.Cast()
  92. def construct(self, global_step):
  93. is_warmup = self.cast(self.greater(self.warmup_steps, global_step), mstype.float32)
  94. warmup_lr = self.warmup_lr(global_step)
  95. decay_lr = self.decay_lr(global_step)
  96. lr = (self.one - is_warmup) * decay_lr + is_warmup * warmup_lr
  97. return lr
  98. def test_bert_train():
  99. """
  100. the main function
  101. """
  102. class ModelBert(nn.Cell):
  103. """
  104. ModelBert definition
  105. """
  106. def __init__(self, network, optimizer=None):
  107. super(ModelBert, self).__init__()
  108. self.optimizer = optimizer
  109. self.train_network = BertTrainOneStepCell(network, self.optimizer)
  110. self.train_network.set_train()
  111. def construct(self, arg0, arg1, arg2, arg3, arg4, arg5, arg6):
  112. return self.train_network(arg0, arg1, arg2, arg3, arg4, arg5, arg6)
  113. version = os.getenv('VERSION', 'large')
  114. batch_size = int(os.getenv('BATCH_SIZE', '1'))
  115. inputs = load_test_data(batch_size)
  116. config = get_config(version=version)
  117. netwithloss = BertNetworkWithLoss(config, True)
  118. lr = BertLearningRate(10)
  119. optimizer = AdamWeightDecay(netwithloss.trainable_params(), lr)
  120. net = ModelBert(netwithloss, optimizer=optimizer)
  121. net.set_train()
  122. build_construct_graph(net, *inputs, execute=False)
  123. def test_bert_withlossscale_train():
  124. class ModelBert(nn.Cell):
  125. def __init__(self, network, optimizer=None):
  126. super(ModelBert, self).__init__()
  127. self.optimizer = optimizer
  128. self.train_network = BertTrainOneStepWithLossScaleCell(network, self.optimizer)
  129. self.train_network.set_train()
  130. def construct(self, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7):
  131. return self.train_network(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
  132. version = os.getenv('VERSION', 'base')
  133. batch_size = int(os.getenv('BATCH_SIZE', '1'))
  134. scaling_sens = Tensor(np.ones([1]).astype(np.float32))
  135. inputs = load_test_data(batch_size) + (scaling_sens,)
  136. config = get_config(version=version)
  137. netwithloss = BertNetworkWithLoss(config, True)
  138. lr = BertLearningRate(10)
  139. optimizer = AdamWeightDecay(netwithloss.trainable_params(), lr)
  140. net = ModelBert(netwithloss, optimizer=optimizer)
  141. net.set_train()
  142. build_construct_graph(net, *inputs, execute=True)
  143. def bert_withlossscale_manager_train():
  144. class ModelBert(nn.Cell):
  145. def __init__(self, network, optimizer=None):
  146. super(ModelBert, self).__init__()
  147. self.optimizer = optimizer
  148. manager = DynamicLossScaleManager()
  149. update_cell = LossScaleUpdateCell(manager)
  150. self.train_network = BertTrainOneStepWithLossScaleCell(network, self.optimizer,
  151. scale_update_cell=update_cell)
  152. self.train_network.set_train()
  153. def construct(self, arg0, arg1, arg2, arg3, arg4, arg5, arg6):
  154. return self.train_network(arg0, arg1, arg2, arg3, arg4, arg5, arg6)
  155. version = os.getenv('VERSION', 'base')
  156. batch_size = int(os.getenv('BATCH_SIZE', '1'))
  157. inputs = load_test_data(batch_size)
  158. config = get_config(version=version)
  159. netwithloss = BertNetworkWithLoss(config, True)
  160. lr = BertLearningRate(10)
  161. optimizer = AdamWeightDecay(netwithloss.trainable_params(), lr)
  162. net = ModelBert(netwithloss, optimizer=optimizer)
  163. net.set_train()
  164. build_construct_graph(net, *inputs, execute=True)
  165. def bert_withlossscale_manager_train_feed():
  166. class ModelBert(nn.Cell):
  167. def __init__(self, network, optimizer=None):
  168. super(ModelBert, self).__init__()
  169. self.optimizer = optimizer
  170. manager = DynamicLossScaleManager()
  171. update_cell = LossScaleUpdateCell(manager)
  172. self.train_network = BertTrainOneStepWithLossScaleCell(network, self.optimizer,
  173. scale_update_cell=update_cell)
  174. self.train_network.set_train()
  175. def construct(self, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7):
  176. return self.train_network(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
  177. version = os.getenv('VERSION', 'base')
  178. batch_size = int(os.getenv('BATCH_SIZE', '1'))
  179. scaling_sens = Tensor(np.ones([1]).astype(np.float32))
  180. inputs = load_test_data(batch_size) + (scaling_sens,)
  181. config = get_config(version=version)
  182. netwithloss = BertNetworkWithLoss(config, True)
  183. lr = BertLearningRate(10)
  184. optimizer = AdamWeightDecay(netwithloss.trainable_params(), lr)
  185. net = ModelBert(netwithloss, optimizer=optimizer)
  186. net.set_train()
  187. build_construct_graph(net, *inputs, execute=True)