You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

train_char_cnn.py 8.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. # 首先需要加入以下的路径到环境变量,因为当前只对内部测试开放,所以需要手动申明一下路径
  2. import os
  3. os.environ['FASTNLP_BASE_URL'] = 'http://10.141.222.118:8888/file/download/'
  4. os.environ['FASTNLP_CACHE_DIR'] = '/remote-home/hyan01/fastnlp_caches'
  5. import sys
  6. sys.path.append('../..')
  7. from fastNLP.core.const import Const as C
  8. import torch.nn as nn
  9. from fastNLP.io.data_loader import YelpLoader
  10. from fastNLP.io.pipe.classification import YelpFullPipe,YelpPolarityPipe,SST2Pipe,IMDBPipe
  11. #from data.sstLoader import sst2Loader
  12. from model.char_cnn import CharacterLevelCNN
  13. from fastNLP import CrossEntropyLoss, AccuracyMetric
  14. from fastNLP.core.trainer import Trainer
  15. from torch.optim import SGD
  16. from torch.autograd import Variable
  17. import torch
  18. from torch.optim.lr_scheduler import LambdaLR
  19. from fastNLP.core import LRScheduler
  20. ##hyper
  21. #todo 这里加入fastnlp的记录
  22. class Config():
  23. #seed=7777
  24. model_dir_or_name="en-base-uncased"
  25. embedding_grad= False,
  26. bert_embedding_larers= '4,-2,-1'
  27. train_epoch= 50
  28. num_classes=2
  29. task= "yelp_p"
  30. #yelp_p
  31. datapath = {"train": "/remote-home/ygwang/yelp_polarity/train.csv",
  32. "test": "/remote-home/ygwang/yelp_polarity/test.csv"}
  33. #IMDB
  34. #datapath = {"train": "/remote-home/ygwang/IMDB_data/train.csv",
  35. # "test": "/remote-home/ygwang/IMDB_data/test.csv"}
  36. # sst
  37. # datapath = {"train": "/remote-home/ygwang/workspace/GLUE/SST-2/train.tsv",
  38. # "dev": "/remote-home/ygwang/workspace/GLUE/SST-2/dev.tsv"}
  39. lr=0.01
  40. batch_size=128
  41. model_size="large"
  42. number_of_characters=69
  43. extra_characters=''
  44. max_length=1014
  45. weight_decay = 1e-5
  46. to_lower=True
  47. tokenizer = 'spacy' # 使用spacy进行分词
  48. char_cnn_config={
  49. "alphabet": {
  50. "en": {
  51. "lower": {
  52. "alphabet": "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}",
  53. "number_of_characters": 69
  54. },
  55. "both": {
  56. "alphabet": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}",
  57. "number_of_characters": 95
  58. }
  59. }
  60. },
  61. "model_parameters": {
  62. "small": {
  63. "conv": [
  64. #依次是channel,kennnel_size,maxpooling_size
  65. [256,7,3],
  66. [256,7,3],
  67. [256,3,-1],
  68. [256,3,-1],
  69. [256,3,-1],
  70. [256,3,3]
  71. ],
  72. "fc": [1024,1024]
  73. },
  74. "large":{
  75. "conv":[
  76. [1024, 7, 3],
  77. [1024, 7, 3],
  78. [1024, 3, -1],
  79. [1024, 3, -1],
  80. [1024, 3, -1],
  81. [1024, 3, 3]
  82. ],
  83. "fc": [2048,2048]
  84. }
  85. },
  86. "data": {
  87. "text_column": "SentimentText",
  88. "label_column": "Sentiment",
  89. "max_length": 1014,
  90. "num_of_classes": 2,
  91. "encoding": None,
  92. "chunksize": 50000,
  93. "max_rows": 100000,
  94. "preprocessing_steps": ["lower", "remove_hashtags", "remove_urls", "remove_user_mentions"]
  95. },
  96. "training": {
  97. "batch_size": 128,
  98. "learning_rate": 0.01,
  99. "epochs": 10,
  100. "optimizer": "sgd"
  101. }
  102. }
  103. ops=Config
  104. # set_rng_seeds(ops.seed)
  105. # print('RNG SEED: {}'.format(ops.seed))
  106. ##1.task相关信息:利用dataloader载入dataInfo
  107. #dataloader=SST2Loader()
  108. #dataloader=IMDBLoader()
  109. # dataloader=YelpLoader(fine_grained=True)
  110. # datainfo=dataloader.process(ops.datapath,char_level_op=True,split_dev_op=False)
  111. char_vocab=ops.char_cnn_config["alphabet"]["en"]["lower"]["alphabet"]
  112. ops.number_of_characters=len(char_vocab)
  113. ops.embedding_dim=ops.number_of_characters
  114. # load data set
  115. if ops.task == 'yelp_p':
  116. data_bundle = YelpPolarityPipe(lower=ops.to_lower, tokenizer=ops.tokenizer).process_from_file()
  117. elif ops.task == 'yelp_f':
  118. data_bundle = YelpFullPipe(lower=ops.to_lower, tokenizer=ops.tokenizer).process_from_file()
  119. elif ops.task == 'imdb':
  120. data_bundle = IMDBPipe(lower=ops.to_lower, tokenizer=ops.tokenizer).process_from_file()
  121. elif ops.task == 'sst-2':
  122. data_bundle = SST2Pipe(lower=ops.to_lower, tokenizer=ops.tokenizer).process_from_file()
  123. else:
  124. raise RuntimeError(f'NOT support {ops.task} task yet!')
  125. def wordtochar(words):
  126. chars = []
  127. for word in words:
  128. #word = word.lower()
  129. for char in word:
  130. chars.append(char)
  131. chars.append('')
  132. chars.pop()
  133. return chars
  134. #chartoindex
  135. def chartoindex(chars):
  136. max_seq_len=ops.max_length
  137. zero_index=len(char_vocab)
  138. char_index_list=[]
  139. for char in chars:
  140. if char in char_vocab:
  141. char_index_list.append(char_vocab.index(char))
  142. else:
  143. #<unk>和<pad>均使用最后一个作为embbeding
  144. char_index_list.append(zero_index)
  145. if len(char_index_list) > max_seq_len:
  146. char_index_list = char_index_list[:max_seq_len]
  147. elif 0 < len(char_index_list) < max_seq_len:
  148. char_index_list = char_index_list+[zero_index]*(max_seq_len-len(char_index_list))
  149. elif len(char_index_list) == 0:
  150. char_index_list=[zero_index]*max_seq_len
  151. return char_index_list
  152. for dataset in data_bundle.datasets.values():
  153. dataset.apply_field(wordtochar, field_name="raw_words", new_field_name='chars')
  154. dataset.apply_field(chartoindex,field_name='chars',new_field_name='chars')
  155. data_bundle.datasets['train'].set_input('chars')
  156. data_bundle.datasets['test'].set_input('chars')
  157. data_bundle.datasets['train'].set_target('target')
  158. data_bundle.datasets['test'].set_target('target')
  159. ##2. 定义/组装模型,这里可以随意,就如果是fastNLP封装好的,类似CNNText就直接用初始化调用就好了,这里只是给出一个伪框架表示占位,在这里建立符合fastNLP输入输出规范的model
  160. class ModelFactory(nn.Module):
  161. """
  162. 用于拼装embedding,encoder,decoder 以及设计forward过程
  163. :param embedding: embbeding model
  164. :param encoder: encoder model
  165. :param decoder: decoder model
  166. """
  167. def __int__(self,embedding,encoder,decoder,**kwargs):
  168. super(ModelFactory,self).__init__()
  169. self.embedding=embedding
  170. self.encoder=encoder
  171. self.decoder=decoder
  172. def forward(self,x):
  173. return {C.OUTPUT:None}
  174. ## 2.或直接复用fastNLP的模型
  175. #vocab=datainfo.vocabs['words']
  176. vocab_label=data_bundle.vocabs['target']
  177. '''
  178. # emded_char=CNNCharEmbedding(vocab)
  179. # embed_word = StaticEmbedding(vocab, model_dir_or_name='en-glove-6b-50', requires_grad=True)
  180. # embedding=StackEmbedding([emded_char, embed_word])
  181. # cnn_char_embed = CNNCharEmbedding(vocab)
  182. # lstm_char_embed = LSTMCharEmbedding(vocab)
  183. # embedding = StackEmbedding([cnn_char_embed, lstm_char_embed])
  184. '''
  185. #one-hot embedding
  186. embedding_weight= Variable(torch.zeros(len(char_vocab)+1, len(char_vocab)))
  187. for i in range(len(char_vocab)):
  188. embedding_weight[i][i]=1
  189. embedding=nn.Embedding(num_embeddings=len(char_vocab)+1,embedding_dim=len(char_vocab),padding_idx=len(char_vocab),_weight=embedding_weight)
  190. for para in embedding.parameters():
  191. para.requires_grad=False
  192. #CNNText太过于简单
  193. #model=CNNText(init_embed=embedding, num_classes=ops.num_classes)
  194. model=CharacterLevelCNN(ops,embedding)
  195. ## 3. 声明loss,metric,optimizer
  196. loss=CrossEntropyLoss
  197. metric=AccuracyMetric
  198. #optimizer= SGD([param for param in model.parameters() if param.requires_grad==True], lr=ops.lr)
  199. optimizer = SGD([param for param in model.parameters() if param.requires_grad == True],
  200. lr=ops.lr, momentum=0.9, weight_decay=ops.weight_decay)
  201. callbacks = []
  202. # callbacks.append(LRScheduler(CosineAnnealingLR(optimizer, 5)))
  203. callbacks.append(
  204. LRScheduler(LambdaLR(optimizer, lambda epoch: ops.lr if epoch <
  205. ops.train_epoch * 0.8 else ops.lr * 0.1))
  206. )
  207. ## 4.定义train方法
  208. def train(model,datainfo,loss,metrics,optimizer,num_epochs=100):
  209. trainer = Trainer(datainfo.datasets['train'], model, optimizer=optimizer, loss=loss(target='target'),batch_size=ops.batch_size,
  210. metrics=[metrics(target='target')], dev_data=datainfo.datasets['test'], device=[0,1,2], check_code_level=-1,
  211. n_epochs=num_epochs,callbacks=callbacks)
  212. print(trainer.train())
  213. if __name__=="__main__":
  214. #print(vocab_label)
  215. #print(datainfo.datasets["train"])
  216. train(model,data_bundle,loss,metric,optimizer,num_epochs=ops.train_epoch)