You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

train_dpcnn.py 4.3 kB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. # 首先需要加入以下的路径到环境变量,因为当前只对内部测试开放,所以需要手动申明一下路径
  2. import torch.cuda
  3. from fastNLP.core.utils import cache_results
  4. from torch.optim import SGD
  5. from torch.optim.lr_scheduler import CosineAnnealingLR
  6. from fastNLP.core.trainer import Trainer
  7. from fastNLP import CrossEntropyLoss, AccuracyMetric
  8. from fastNLP.embeddings import StaticEmbedding
  9. from reproduction.text_classification.model.dpcnn import DPCNN
  10. from fastNLP.core.sampler import BucketSampler
  11. from fastNLP.core import LRScheduler
  12. from fastNLP.core.const import Const as C
  13. from fastNLP.core.vocabulary import VocabularyOption
  14. from utils.util_init import set_rng_seeds
  15. from fastNLP import logger
  16. import os
  17. from fastNLP.io import YelpFullPipe, YelpPolarityPipe
  18. os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
  19. # hyper
  20. logger.add_file('log', 'INFO')
  21. class Config():
  22. seed = 12345
  23. model_dir_or_name = "dpcnn-yelp-f"
  24. embedding_grad = True
  25. train_epoch = 30
  26. batch_size = 100
  27. task = "yelp_f"
  28. #datadir = 'workdir/datasets/SST'
  29. # datadir = 'workdir/datasets/yelp_polarity'
  30. datadir = 'workdir/datasets/yelp_full'
  31. #datafile = {"train": "train.txt", "dev": "dev.txt", "test": "test.txt"}
  32. datafile = {"train": "train.csv", "test": "test.csv"}
  33. lr = 1e-3
  34. src_vocab_op = VocabularyOption(max_size=100000)
  35. embed_dropout = 0.3
  36. cls_dropout = 0.1
  37. weight_decay = 1e-5
  38. def __init__(self):
  39. self.datadir = os.path.join(os.environ['HOME'], self.datadir)
  40. self.datapath = {k: os.path.join(self.datadir, v)
  41. for k, v in self.datafile.items()}
  42. ops = Config()
  43. set_rng_seeds(ops.seed)
  44. logger.info('RNG SEED %d'%ops.seed)
  45. # 1.task相关信息:利用dataloader载入dataInfo
  46. @cache_results(ops.model_dir_or_name+'-data-cache')
  47. def load_data():
  48. datainfo = YelpFullPipe(lower=True, tokenizer='raw').process_from_file(ops.datapath)
  49. for ds in datainfo.datasets.values():
  50. ds.apply_field(len, C.INPUT, C.INPUT_LEN)
  51. ds.set_input(C.INPUT, C.INPUT_LEN)
  52. ds.set_target(C.TARGET)
  53. return datainfo
  54. datainfo = load_data()
  55. embedding = StaticEmbedding(
  56. datainfo.vocabs['words'], model_dir_or_name='en-glove-6b-100d', requires_grad=ops.embedding_grad,
  57. normalize=False)
  58. embedding.embedding.weight.data /= embedding.embedding.weight.data.std()
  59. print(embedding.embedding.weight.data.mean(), embedding.embedding.weight.data.std())
  60. # 2.或直接复用fastNLP的模型
  61. # datainfo.datasets['train'] = datainfo.datasets['train'][:1000] # for debug purpose
  62. # datainfo.datasets['test'] = datainfo.datasets['test'][:1000]
  63. logger.info(datainfo)
  64. model = DPCNN(init_embed=embedding, num_cls=len(datainfo.vocabs[C.TARGET]),
  65. embed_dropout=ops.embed_dropout, cls_dropout=ops.cls_dropout)
  66. # print(model)
  67. # 3. 声明loss,metric,optimizer
  68. loss = CrossEntropyLoss(pred=C.OUTPUT, target=C.TARGET)
  69. metric = AccuracyMetric(pred=C.OUTPUT, target=C.TARGET)
  70. optimizer = SGD([param for param in model.parameters() if param.requires_grad == True],
  71. lr=ops.lr, momentum=0.9, weight_decay=ops.weight_decay)
  72. callbacks = []
  73. callbacks.append(LRScheduler(CosineAnnealingLR(optimizer, 5)))
  74. device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
  75. # print(device)
  76. logger.info(device)
  77. # 4.定义train方法
  78. # normal trainer
  79. trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss,
  80. sampler=BucketSampler(num_buckets=50, batch_size=ops.batch_size),
  81. metrics=[metric], use_tqdm=False, save_path='save',
  82. dev_data=datainfo.datasets['test'], device=device,
  83. check_code_level=-1, batch_size=ops.batch_size, callbacks=callbacks,
  84. n_epochs=ops.train_epoch, num_workers=4)
  85. # distributed trainer
  86. # trainer = DistTrainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss,
  87. # metrics=[metric],
  88. # dev_data=datainfo.datasets['test'], device='cuda',
  89. # batch_size_per_gpu=ops.batch_size, callbacks_all=callbacks,
  90. # n_epochs=ops.train_epoch, num_workers=4)
  91. if __name__ == "__main__":
  92. print(trainer.train())