You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

train.py 2.1 kB

6 years ago
6 years ago
6 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162
  1. import sys
  2. sys.path.append('../..')
  3. import torch
  4. from torch.optim import Adam
  5. from fastNLP.core.callback import Callback, GradientClipCallback
  6. from fastNLP.core.trainer import Trainer
  7. from fastNLP.io.pipe.coreference import CoReferencePipe
  8. from fastNLP.core.const import Const
  9. from reproduction.coreference_resolution.model.config import Config
  10. from reproduction.coreference_resolution.model.model_re import Model
  11. from reproduction.coreference_resolution.model.softmax_loss import SoftmaxLoss
  12. from reproduction.coreference_resolution.model.metric import CRMetric
  13. class LRCallback(Callback):
  14. def __init__(self, parameters, decay_rate=1e-3):
  15. super().__init__()
  16. self.paras = parameters
  17. self.decay_rate = decay_rate
  18. def on_step_end(self):
  19. if self.step % 100 == 0:
  20. for para in self.paras:
  21. para['lr'] = para['lr'] * (1 - self.decay_rate)
  22. if __name__ == "__main__":
  23. config = Config()
  24. print(config)
  25. def cache():
  26. bundle = CoReferencePipe(config).process_from_file({'train': config.train_path, 'dev': config.dev_path,
  27. 'test': config.test_path})
  28. return bundle
  29. data_bundle = cache()
  30. print(data_bundle)
  31. model = Model(data_bundle.get_vocab(Const.INPUTS(0)), config)
  32. print(model)
  33. loss = SoftmaxLoss()
  34. metric = CRMetric()
  35. optim = Adam(model.parameters(), lr=config.lr)
  36. lr_decay_callback = LRCallback(optim.param_groups, config.lr_decay)
  37. trainer = Trainer(model=model, train_data=data_bundle.datasets["train"], dev_data=data_bundle.datasets["dev"],
  38. loss=loss, metrics=metric, check_code_level=-1, sampler=None,
  39. batch_size=1, device=torch.device("cuda:" + config.cuda) if torch.cuda.is_available() else None,
  40. metric_key='f', n_epochs=config.epoch,
  41. optimizer=optim,
  42. save_path=None,
  43. callbacks=[lr_decay_callback, GradientClipCallback(clip_value=5)])
  44. print()
  45. trainer.train()