You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_bert_train.py 8.1 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Bert test."""
  16. # pylint: disable=missing-docstring, arguments-differ, W0612
  17. import os
  18. import mindspore.common.dtype as mstype
  19. import mindspore.context as context
  20. from mindspore import Tensor
  21. from mindspore.nn.optim import AdamWeightDecayDynamicLR
  22. from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepCell, \
  23. BertTrainOneStepWithLossScaleCell
  24. from mindspore.nn.wrap.loss_scale import FixedLossScaleUpdateCell
  25. from mindspore.train.loss_scale_manager import DynamicLossScaleManager
  26. from ...dataset_mock import MindData
  27. from ...ops_common import nn, np, batch_tuple_tensor, build_construct_graph
  28. _current_dir = os.path.dirname(os.path.realpath(__file__)) + "/../python/test_data"
  29. context.set_context(mode=context.GRAPH_MODE)
  30. def get_dataset(batch_size=1):
  31. dataset_types = (np.int32, np.int32, np.int32, np.int32, np.int32, np.int32, np.int32)
  32. dataset_shapes = ((batch_size, 128), (batch_size, 128), (batch_size, 128), (batch_size, 1), \
  33. (batch_size, 20), (batch_size, 20), (batch_size, 20))
  34. dataset = MindData(size=2, batch_size=batch_size,
  35. np_types=dataset_types,
  36. output_shapes=dataset_shapes,
  37. input_indexs=(0, 1))
  38. return dataset
  39. def load_test_data(batch_size=1):
  40. dataset = get_dataset(batch_size)
  41. ret = dataset.next()
  42. ret = batch_tuple_tensor(ret, batch_size)
  43. return ret
  44. def get_config(version='base', batch_size=1):
  45. """
  46. get_config definition
  47. """
  48. if version == 'base':
  49. return BertConfig(
  50. batch_size=batch_size,
  51. seq_length=128,
  52. vocab_size=21128,
  53. hidden_size=768,
  54. num_hidden_layers=12,
  55. num_attention_heads=12,
  56. intermediate_size=3072,
  57. hidden_act="gelu",
  58. hidden_dropout_prob=0.1,
  59. attention_probs_dropout_prob=0.1,
  60. max_position_embeddings=512,
  61. type_vocab_size=2,
  62. initializer_range=0.02,
  63. use_relative_positions=True,
  64. input_mask_from_dataset=True,
  65. token_type_ids_from_dataset=True,
  66. dtype=mstype.float32,
  67. compute_type=mstype.float32)
  68. if version == 'large':
  69. return BertConfig(
  70. batch_size=batch_size,
  71. seq_length=128,
  72. vocab_size=21128,
  73. hidden_size=1024,
  74. num_hidden_layers=24,
  75. num_attention_heads=16,
  76. intermediate_size=4096,
  77. hidden_act="gelu",
  78. hidden_dropout_prob=0.1,
  79. attention_probs_dropout_prob=0.1,
  80. max_position_embeddings=512,
  81. type_vocab_size=2,
  82. initializer_range=0.02,
  83. use_relative_positions=True,
  84. input_mask_from_dataset=True,
  85. token_type_ids_from_dataset=True,
  86. dtype=mstype.float32,
  87. compute_type=mstype.float32)
  88. return BertConfig(batch_size=batch_size)
  89. def test_bert_train():
  90. """
  91. the main function
  92. """
  93. class ModelBert(nn.Cell):
  94. """
  95. ModelBert definition
  96. """
  97. def __init__(self, network, optimizer=None):
  98. super(ModelBert, self).__init__()
  99. self.optimizer = optimizer
  100. self.train_network = BertTrainOneStepCell(network, self.optimizer)
  101. self.train_network.set_train()
  102. def construct(self, arg0, arg1, arg2, arg3, arg4, arg5, arg6):
  103. return self.train_network(arg0, arg1, arg2, arg3, arg4, arg5, arg6)
  104. version = os.getenv('VERSION', 'large')
  105. batch_size = int(os.getenv('BATCH_SIZE', '1'))
  106. inputs = load_test_data(batch_size)
  107. config = get_config(version=version, batch_size=batch_size)
  108. netwithloss = BertNetworkWithLoss(config, True)
  109. optimizer = AdamWeightDecayDynamicLR(netwithloss.trainable_params(), 10)
  110. net = ModelBert(netwithloss, optimizer=optimizer)
  111. net.set_train()
  112. build_construct_graph(net, *inputs, execute=False)
  113. def test_bert_withlossscale_train():
  114. class ModelBert(nn.Cell):
  115. def __init__(self, network, optimizer=None):
  116. super(ModelBert, self).__init__()
  117. self.optimizer = optimizer
  118. self.train_network = BertTrainOneStepWithLossScaleCell(network, self.optimizer)
  119. self.train_network.set_train()
  120. def construct(self, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7):
  121. return self.train_network(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
  122. version = os.getenv('VERSION', 'base')
  123. batch_size = int(os.getenv('BATCH_SIZE', '1'))
  124. scaling_sens = Tensor(np.ones([1]).astype(np.float32))
  125. inputs = load_test_data(batch_size) + (scaling_sens,)
  126. config = get_config(version=version, batch_size=batch_size)
  127. netwithloss = BertNetworkWithLoss(config, True)
  128. optimizer = AdamWeightDecayDynamicLR(netwithloss.trainable_params(), 10)
  129. net = ModelBert(netwithloss, optimizer=optimizer)
  130. net.set_train()
  131. build_construct_graph(net, *inputs, execute=True)
  132. def bert_withlossscale_manager_train():
  133. class ModelBert(nn.Cell):
  134. def __init__(self, network, optimizer=None):
  135. super(ModelBert, self).__init__()
  136. self.optimizer = optimizer
  137. manager = DynamicLossScaleManager()
  138. update_cell = LossScaleUpdateCell(manager)
  139. self.train_network = BertTrainOneStepWithLossScaleCell(network, self.optimizer,
  140. scale_update_cell=update_cell)
  141. self.train_network.set_train()
  142. def construct(self, arg0, arg1, arg2, arg3, arg4, arg5, arg6):
  143. return self.train_network(arg0, arg1, arg2, arg3, arg4, arg5, arg6)
  144. version = os.getenv('VERSION', 'base')
  145. batch_size = int(os.getenv('BATCH_SIZE', '1'))
  146. inputs = load_test_data(batch_size)
  147. config = get_config(version=version, batch_size=batch_size)
  148. netwithloss = BertNetworkWithLoss(config, True)
  149. optimizer = AdamWeightDecayDynamicLR(netwithloss.trainable_params(), 10)
  150. net = ModelBert(netwithloss, optimizer=optimizer)
  151. net.set_train()
  152. build_construct_graph(net, *inputs, execute=True)
  153. def bert_withlossscale_manager_train_feed():
  154. class ModelBert(nn.Cell):
  155. def __init__(self, network, optimizer=None):
  156. super(ModelBert, self).__init__()
  157. self.optimizer = optimizer
  158. manager = DynamicLossScaleManager()
  159. update_cell = LossScaleUpdateCell(manager)
  160. self.train_network = BertTrainOneStepWithLossScaleCell(network, self.optimizer,
  161. scale_update_cell=update_cell)
  162. self.train_network.set_train()
  163. def construct(self, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7):
  164. return self.train_network(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
  165. version = os.getenv('VERSION', 'base')
  166. batch_size = int(os.getenv('BATCH_SIZE', '1'))
  167. scaling_sens = Tensor(np.ones([1]).astype(np.float32))
  168. inputs = load_test_data(batch_size) + (scaling_sens,)
  169. config = get_config(version=version, batch_size=batch_size)
  170. netwithloss = BertNetworkWithLoss(config, True)
  171. optimizer = AdamWeightDecayDynamicLR(netwithloss.trainable_params(), 10)
  172. net = ModelBert(netwithloss, optimizer=optimizer)
  173. net.set_train()
  174. build_construct_graph(net, *inputs, execute=True)