You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_tutorials.py 16 kB

6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. import unittest
  2. from fastNLP import DataSet
  3. from fastNLP import Instance
  4. from fastNLP import Vocabulary
  5. from fastNLP.core.losses import CrossEntropyLoss
  6. from fastNLP.core.metrics import AccuracyMetric
  7. class TestTutorial(unittest.TestCase):
  8. def test_fastnlp_10min_tutorial(self):
  9. # 从csv读取数据到DataSet
  10. sample_path = "test/data_for_tests/tutorial_sample_dataset.csv"
  11. dataset = DataSet.read_csv(sample_path, headers=('raw_sentence', 'label'),
  12. sep='\t')
  13. print(len(dataset))
  14. print(dataset[0])
  15. print(dataset[-3])
  16. dataset.append(Instance(raw_sentence='fake data', label='0'))
  17. # 将所有数字转为小写
  18. dataset.apply(lambda x: x['raw_sentence'].lower(), new_field_name='raw_sentence')
  19. # label转int
  20. dataset.apply(lambda x: int(x['label']), new_field_name='label')
  21. # 使用空格分割句子
  22. def split_sent(ins):
  23. return ins['raw_sentence'].split()
  24. dataset.apply(split_sent, new_field_name='words')
  25. # 增加长度信息
  26. dataset.apply(lambda x: len(x['words']), new_field_name='seq_len')
  27. print(len(dataset))
  28. print(dataset[0])
  29. # DataSet.drop(func)筛除数据
  30. dataset.drop(lambda x: x['seq_len'] <= 3, inplace=True)
  31. print(len(dataset))
  32. # 设置DataSet中,哪些field要转为tensor
  33. # set target,loss或evaluate中的golden,计算loss,模型评估时使用
  34. dataset.set_target("label")
  35. # set input,模型forward时使用
  36. dataset.set_input("words", "seq_len")
  37. # 分出测试集、训练集
  38. test_data, train_data = dataset.split(0.5)
  39. print(len(test_data))
  40. print(len(train_data))
  41. # 构建词表, Vocabulary.add(word)
  42. vocab = Vocabulary(min_freq=2)
  43. train_data.apply(lambda x: [vocab.add(word) for word in x['words']])
  44. vocab.build_vocab()
  45. # index句子, Vocabulary.to_index(word)
  46. train_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='words')
  47. test_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='words')
  48. print(test_data[0])
  49. # 如果你们需要做强化学习或者GAN之类的项目,你们也可以使用这些数据预处理的工具
  50. from fastNLP.core.batch import Batch
  51. from fastNLP.core.sampler import RandomSampler
  52. batch_iterator = Batch(dataset=train_data, batch_size=2, sampler=RandomSampler())
  53. for batch_x, batch_y in batch_iterator:
  54. print("batch_x has: ", batch_x)
  55. print("batch_y has: ", batch_y)
  56. break
  57. from fastNLP.models import CNNText
  58. model = CNNText((len(vocab), 50), num_classes=5, padding=2, dropout=0.1)
  59. from fastNLP import Trainer
  60. from copy import deepcopy
  61. # 更改DataSet中对应field的名称,要以模型的forward等参数名一致
  62. train_data.rename_field('label', 'label_seq')
  63. test_data.rename_field('label', 'label_seq')
  64. loss = CrossEntropyLoss(pred="output", target="label_seq")
  65. metric = AccuracyMetric(pred="predict", target="label_seq")
  66. # 实例化Trainer,传入模型和数据,进行训练
  67. # 先在test_data拟合(确保模型的实现是正确的)
  68. copy_model = deepcopy(model)
  69. overfit_trainer = Trainer(model=copy_model, train_data=test_data, dev_data=test_data,
  70. loss=loss,
  71. metrics=metric,
  72. save_path=None,
  73. batch_size=32,
  74. n_epochs=5)
  75. overfit_trainer.train()
  76. # 用train_data训练,在test_data验证
  77. trainer = Trainer(model=model, train_data=train_data, dev_data=test_data,
  78. loss=CrossEntropyLoss(pred="output", target="label_seq"),
  79. metrics=AccuracyMetric(pred="predict", target="label_seq"),
  80. save_path=None,
  81. batch_size=32,
  82. n_epochs=5)
  83. trainer.train()
  84. print('Train finished!')
  85. # 调用Tester在test_data上评价效果
  86. from fastNLP import Tester
  87. tester = Tester(data=test_data, model=model, metrics=AccuracyMetric(pred="predict", target="label_seq"),
  88. batch_size=4)
  89. acc = tester.test()
  90. print(acc)
  91. def test_fastnlp_1min_tutorial(self):
  92. # tutorials/fastnlp_1min_tutorial.ipynb
  93. data_path = "test/data_for_tests/tutorial_sample_dataset.csv"
  94. ds = DataSet.read_csv(data_path, headers=('raw_sentence', 'label'), sep='\t')
  95. print(ds[1])
  96. # 将所有数字转为小写
  97. ds.apply(lambda x: x['raw_sentence'].lower(), new_field_name='raw_sentence')
  98. # label转int
  99. ds.apply(lambda x: int(x['label']), new_field_name='target', is_target=True)
  100. def split_sent(ins):
  101. return ins['raw_sentence'].split()
  102. ds.apply(split_sent, new_field_name='words', is_input=True)
  103. # 分割训练集/验证集
  104. train_data, dev_data = ds.split(0.3)
  105. print("Train size: ", len(train_data))
  106. print("Test size: ", len(dev_data))
  107. from fastNLP import Vocabulary
  108. vocab = Vocabulary(min_freq=2)
  109. train_data.apply(lambda x: [vocab.add(word) for word in x['words']])
  110. # index句子, Vocabulary.to_index(word)
  111. train_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='words',
  112. is_input=True)
  113. dev_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='words',
  114. is_input=True)
  115. from fastNLP.models import CNNText
  116. model = CNNText((len(vocab), 50), num_classes=5, padding=2, dropout=0.1)
  117. from fastNLP import Trainer, CrossEntropyLoss, AccuracyMetric, Adam
  118. trainer = Trainer(model=model,
  119. train_data=train_data,
  120. dev_data=dev_data,
  121. loss=CrossEntropyLoss(),
  122. optimizer= Adam(),
  123. metrics=AccuracyMetric(target='target')
  124. )
  125. trainer.train()
  126. print('Train finished!')
  127. def test_fastnlp_advanced_tutorial(self):
  128. import os
  129. os.chdir("test/tutorials/fastnlp_advanced_tutorial")
  130. from fastNLP import DataSet
  131. from fastNLP import Instance
  132. from fastNLP import Vocabulary
  133. from fastNLP import Trainer
  134. from fastNLP import Tester
  135. # ### Instance
  136. # Instance表示一个样本,由一个或者多个field(域、属性、特征)组成,每个field具有自己的名字以及值
  137. # 在初始化Instance的时候可以定义它包含的field,使用"field_name=field_value"的写法
  138. # In[2]:
  139. # 组织一个Instance,这个Instance由premise、hypothesis、label三个field组成
  140. instance = Instance(premise='an premise example .', hypothesis='an hypothesis example.', label=1)
  141. instance
  142. # In[3]:
  143. data_set = DataSet([instance] * 5)
  144. data_set.append(instance)
  145. data_set[-2:]
  146. # In[4]:
  147. # 如果某一个field的类型与dataset对应的field类型不一样仍可被加入dataset中
  148. instance2 = Instance(premise='the second premise example .', hypothesis='the second hypothesis example.',
  149. label='1')
  150. try:
  151. data_set.append(instance2)
  152. except:
  153. pass
  154. data_set[-2:]
  155. # In[5]:
  156. # 如果某一个field的名字不对,则该instance不能被append到dataset中
  157. instance3 = Instance(premises='the third premise example .', hypothesis='the third hypothesis example.',
  158. label=1)
  159. try:
  160. data_set.append(instance3)
  161. except:
  162. print('cannot append instance')
  163. pass
  164. data_set[-2:]
  165. # In[6]:
  166. # 除了文本以外,还可以将tensor作为其中一个field的value
  167. import torch
  168. tensor_ins = Instance(image=torch.randn(5, 5), label=0)
  169. ds = DataSet()
  170. ds.append(tensor_ins)
  171. ds
  172. from fastNLP import DataSet
  173. from fastNLP import Instance
  174. # 从csv读取数据到DataSet
  175. # 类csv文件,即每一行为一个example的文件,都可以使用这种方法进行数据读取
  176. dataset = DataSet.read_csv('tutorial_sample_dataset.csv', headers=('raw_sentence', 'label'), sep='\t')
  177. # 查看DataSet的大小
  178. len(dataset)
  179. # In[8]:
  180. # 使用数字索引[k],获取第k个样本
  181. dataset[0]
  182. # In[9]:
  183. # 获取的样本是一个Instance
  184. type(dataset[0])
  185. # In[10]:
  186. # 使用数字索引[a: b],获取第a到第b个样本
  187. dataset[0: 3]
  188. # In[11]:
  189. # 索引也可以是负数
  190. dataset[-1]
  191. data_path = ['premise', 'hypothesis', 'label']
  192. # 读入文件
  193. with open(data_path[0]) as f:
  194. premise = f.readlines()
  195. with open(data_path[1]) as f:
  196. hypothesis = f.readlines()
  197. with open(data_path[2]) as f:
  198. label = f.readlines()
  199. assert len(premise) == len(hypothesis) and len(hypothesis) == len(label)
  200. # 组织DataSet
  201. data_set = DataSet()
  202. for p, h, l in zip(premise, hypothesis, label):
  203. p = p.strip() # 将行末空格去除
  204. h = h.strip() # 将行末空格去除
  205. data_set.append(Instance(premise=p, hypothesis=h, truth=l))
  206. data_set[0]
  207. # ### DataSet的其他操作
  208. # 在构建完毕DataSet后,仍然可以对DataSet的内容进行操作,函数接口为DataSet.apply()
  209. # In[13]:
  210. # 将premise域的所有文本转成小写
  211. data_set.apply(lambda x: x['premise'].lower(), new_field_name='premise')
  212. data_set[-2:]
  213. # In[14]:
  214. # label转int
  215. data_set.apply(lambda x: int(x['truth']), new_field_name='truth')
  216. data_set[-2:]
  217. # In[15]:
  218. # 使用空格分割句子
  219. def split_sent(ins):
  220. return ins['premise'].split()
  221. data_set.apply(split_sent, new_field_name='premise')
  222. data_set.apply(lambda x: x['hypothesis'].split(), new_field_name='hypothesis')
  223. data_set[-2:]
  224. # In[16]:
  225. # 筛选数据
  226. origin_data_set_len = len(data_set)
  227. data_set.drop(lambda x: len(x['premise']) <= 6, inplace=True)
  228. origin_data_set_len, len(data_set)
  229. # In[17]:
  230. # 增加长度信息
  231. data_set.apply(lambda x: [1] * len(x['premise']), new_field_name='premise_len')
  232. data_set.apply(lambda x: [1] * len(x['hypothesis']), new_field_name='hypothesis_len')
  233. data_set[-1]
  234. # In[18]:
  235. # 设定特征域、标签域
  236. data_set.set_input("premise", "premise_len", "hypothesis", "hypothesis_len")
  237. data_set.set_target("truth")
  238. # In[19]:
  239. # 重命名field
  240. data_set.rename_field('truth', 'label')
  241. data_set[-1]
  242. # In[20]:
  243. # 切分训练、验证集、测试集
  244. train_data, vad_data = data_set.split(0.5)
  245. dev_data, test_data = vad_data.split(0.4)
  246. len(train_data), len(dev_data), len(test_data)
  247. # In[21]:
  248. # 深拷贝一个数据集
  249. import copy
  250. train_data_2, dev_data_2 = copy.deepcopy(train_data), copy.deepcopy(dev_data)
  251. del copy
  252. # 初始化词表,该词表最大的vocab_size为10000,词表中每个词出现的最低频率为2,'<unk>'表示未知词语,'<pad>'表示padding词语
  253. # Vocabulary默认初始化参数为max_size=None, min_freq=None, unknown='<unk>', padding='<pad>'
  254. vocab = Vocabulary(max_size=10000, min_freq=2, unknown='<unk>', padding='<pad>')
  255. # 构建词表
  256. train_data.apply(lambda x: [vocab.add(word) for word in x['premise']])
  257. train_data.apply(lambda x: [vocab.add(word) for word in x['hypothesis']])
  258. vocab.build_vocab()
  259. # In[23]:
  260. # 根据词表index句子
  261. train_data.apply(lambda x: [vocab.to_index(word) for word in x['premise']], new_field_name='premise')
  262. train_data.apply(lambda x: [vocab.to_index(word) for word in x['hypothesis']], new_field_name='hypothesis')
  263. dev_data.apply(lambda x: [vocab.to_index(word) for word in x['premise']], new_field_name='premise')
  264. dev_data.apply(lambda x: [vocab.to_index(word) for word in x['hypothesis']], new_field_name='hypothesis')
  265. test_data.apply(lambda x: [vocab.to_index(word) for word in x['premise']], new_field_name='premise')
  266. test_data.apply(lambda x: [vocab.to_index(word) for word in x['hypothesis']], new_field_name='hypothesis')
  267. train_data[-1], dev_data[-1], test_data[-1]
  268. # 读入vocab文件
  269. with open('vocab.txt', encoding='utf-8') as f:
  270. lines = f.readlines()
  271. vocabs = []
  272. for line in lines:
  273. vocabs.append(line.strip())
  274. # 实例化Vocabulary
  275. vocab_bert = Vocabulary(unknown=None, padding=None)
  276. # 将vocabs列表加入Vocabulary
  277. vocab_bert.add_word_lst(vocabs)
  278. # 构建词表
  279. vocab_bert.build_vocab()
  280. # 更新unknown与padding的token文本
  281. vocab_bert.unknown = '[UNK]'
  282. vocab_bert.padding = '[PAD]'
  283. # In[25]:
  284. # 根据词表index句子
  285. train_data_2.apply(lambda x: [vocab_bert.to_index(word) for word in x['premise']], new_field_name='premise')
  286. train_data_2.apply(lambda x: [vocab_bert.to_index(word) for word in x['hypothesis']],
  287. new_field_name='hypothesis')
  288. dev_data_2.apply(lambda x: [vocab_bert.to_index(word) for word in x['premise']], new_field_name='premise')
  289. dev_data_2.apply(lambda x: [vocab_bert.to_index(word) for word in x['hypothesis']], new_field_name='hypothesis')
  290. train_data_2[-1], dev_data_2[-1]
  291. for data in [train_data, dev_data, test_data]:
  292. data.rename_field('premise', 'words1')
  293. data.rename_field('hypothesis', 'words2')
  294. data.rename_field('premise_len', 'seq_len1')
  295. data.rename_field('hypothesis_len', 'seq_len2')
  296. data.set_input('words1', 'words2', 'seq_len1', 'seq_len2')
  297. # step 1:加载模型参数(非必选)
  298. from fastNLP.io.config_io import ConfigSection, ConfigLoader
  299. args = ConfigSection()
  300. ConfigLoader().load_config("./data/config", {"esim_model": args})
  301. args["vocab_size"] = len(vocab)
  302. args.data
  303. # In[27]:
  304. # step 2:加载ESIM模型
  305. from fastNLP.models import ESIM
  306. model = ESIM(**args.data)
  307. model
  308. # In[28]:
  309. # 另一个例子:加载CNN文本分类模型
  310. from fastNLP.models import CNNText
  311. cnn_text_model = CNNText((len(vocab), 50), num_classes=5, padding=2, dropout=0.1)
  312. from fastNLP import CrossEntropyLoss
  313. from fastNLP import Adam
  314. from fastNLP import AccuracyMetric
  315. trainer = Trainer(
  316. train_data=train_data,
  317. model=model,
  318. loss=CrossEntropyLoss(pred='pred', target='label'),
  319. metrics=AccuracyMetric(target='label'),
  320. n_epochs=3,
  321. batch_size=16,
  322. print_every=-1,
  323. validate_every=-1,
  324. dev_data=dev_data,
  325. optimizer=Adam(lr=1e-3, weight_decay=0),
  326. check_code_level=-1,
  327. metric_key='acc',
  328. use_tqdm=False,
  329. )
  330. trainer.train()
  331. tester = Tester(
  332. data=test_data,
  333. model=model,
  334. metrics=AccuracyMetric(target='label'),
  335. batch_size=args["batch_size"],
  336. )
  337. tester.test()
  338. def setUp(self):
  339. import os
  340. self._init_wd = os.path.abspath(os.curdir)
  341. def tearDown(self):
  342. import os
  343. os.chdir(self._init_wd)