You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

fastnlp_tutorial_1203.ipynb 27 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "markdown",
  5. "metadata": {},
  6. "source": [
  7. "fastNLP上手教程\n",
  8. "-------\n",
  9. "\n",
  10. "fastNLP提供方便的数据预处理,训练和测试模型的功能"
  11. ]
  12. },
  13. {
  14. "cell_type": "code",
  15. "execution_count": 1,
  16. "metadata": {},
  17. "outputs": [
  18. {
  19. "name": "stderr",
  20. "output_type": "stream",
  21. "text": [
  22. "/Users/yh/miniconda2/envs/python3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n",
  23. " \" (e.g. in jupyter console)\", TqdmExperimentalWarning)\n"
  24. ]
  25. }
  26. ],
  27. "source": [
  28. "import sys\n",
  29. "sys.path.append('/Users/yh/Desktop/fastNLP/fastNLP/')\n",
  30. "\n",
  31. "import fastNLP as fnlp"
  32. ]
  33. },
  34. {
  35. "cell_type": "markdown",
  36. "metadata": {},
  37. "source": [
  38. "DataSet & Instance\n",
  39. "------\n",
  40. "\n",
  41. "fastNLP用DataSet和Instance保存和处理数据。每个DataSet表示一个数据集,每个Instance表示一个数据样本。一个DataSet存有多个Instance,每个Instance可以自定义存哪些内容。\n",
  42. "\n",
  43. "有一些read_*方法,可以轻松从文件读取数据,存成DataSet。"
  44. ]
  45. },
  46. {
  47. "cell_type": "code",
  48. "execution_count": 2,
  49. "metadata": {},
  50. "outputs": [
  51. {
  52. "name": "stdout",
  53. "output_type": "stream",
  54. "text": [
  55. "{'raw_sentence': A series of escapades demonstrating the adage that what is good for the goose is also good for the gander , some of which occasionally amuses but none of which amounts to much of a story .,\n",
  56. "'label': 1}\n"
  57. ]
  58. }
  59. ],
  60. "source": [
  61. "from fastNLP import DataSet\n",
  62. "from fastNLP import Instance\n",
  63. "\n",
  64. "# 从csv读取数据到DataSet\n",
  65. "dataset = DataSet.read_csv('sentence.csv', headers=('raw_sentence', 'label'), sep='\\t')\n",
  66. "print(dataset[0])"
  67. ]
  68. },
  69. {
  70. "cell_type": "code",
  71. "execution_count": 3,
  72. "metadata": {},
  73. "outputs": [
  74. {
  75. "data": {
  76. "text/plain": [
  77. "{'raw_sentence': fake data,\n",
  78. "'label': 0}"
  79. ]
  80. },
  81. "execution_count": 3,
  82. "metadata": {},
  83. "output_type": "execute_result"
  84. }
  85. ],
  86. "source": [
  87. "# DataSet.append(Instance)加入新数据\n",
  88. "\n",
  89. "dataset.append(Instance(raw_sentence='fake data', label='0'))\n",
  90. "dataset[-1]"
  91. ]
  92. },
  93. {
  94. "cell_type": "code",
  95. "execution_count": 4,
  96. "metadata": {},
  97. "outputs": [],
  98. "source": [
  99. "# DataSet.apply(func, new_field_name)对数据预处理\n",
  100. "\n",
  101. "# 将所有数字转为小写\n",
  102. "dataset.apply(lambda x: x['raw_sentence'].lower(), new_field_name='raw_sentence')\n",
  103. "# label转int\n",
  104. "dataset.apply(lambda x: int(x['label']), new_field_name='label_seq', is_target=True)\n",
  105. "# 使用空格分割句子\n",
  106. "dataset.drop(lambda x:len(x['raw_sentence'].split())==0)\n",
  107. "def split_sent(ins):\n",
  108. " return ins['raw_sentence'].split()\n",
  109. "dataset.apply(split_sent, new_field_name='words', is_input=True)"
  110. ]
  111. },
  112. {
  113. "cell_type": "code",
  114. "execution_count": 5,
  115. "metadata": {},
  116. "outputs": [],
  117. "source": [
  118. "# DataSet.drop(func)筛除数据\n",
  119. "# 删除低于某个长度的词语\n",
  120. "# dataset.drop(lambda x: len(x['words']) <= 3)"
  121. ]
  122. },
  123. {
  124. "cell_type": "code",
  125. "execution_count": 6,
  126. "metadata": {},
  127. "outputs": [
  128. {
  129. "name": "stdout",
  130. "output_type": "stream",
  131. "text": [
  132. "Train size: 5971\n",
  133. "Test size: 2558\n"
  134. ]
  135. }
  136. ],
  137. "source": [
  138. "# 分出测试集、训练集\n",
  139. "\n",
  140. "test_data, train_data = dataset.split(0.3)\n",
  141. "print(\"Train size: \", len(test_data))\n",
  142. "print(\"Test size: \", len(train_data))"
  143. ]
  144. },
  145. {
  146. "cell_type": "markdown",
  147. "metadata": {},
  148. "source": [
  149. "Vocabulary\n",
  150. "------\n",
  151. "\n",
  152. "fastNLP中的Vocabulary轻松构建词表,将词转成数字"
  153. ]
  154. },
  155. {
  156. "cell_type": "code",
  157. "execution_count": 7,
  158. "metadata": {},
  159. "outputs": [
  160. {
  161. "name": "stdout",
  162. "output_type": "stream",
  163. "text": [
  164. "{'raw_sentence': gussied up with so many distracting special effects and visual party tricks that it 's not clear whether we 're supposed to shriek or laugh .,\n",
  165. "'label': 1,\n",
  166. "'label_seq': 1,\n",
  167. "'words': ['gussied', 'up', 'with', 'so', 'many', 'distracting', 'special', 'effects', 'and', 'visual', 'party', 'tricks', 'that', 'it', \"'s\", 'not', 'clear', 'whether', 'we', \"'re\", 'supposed', 'to', 'shriek', 'or', 'laugh', '.'],\n",
  168. "'word_seq': [1, 65, 16, 43, 108, 1, 329, 433, 7, 319, 1313, 1, 12, 10, 11, 27, 1428, 567, 86, 134, 1949, 8, 1, 49, 506, 2]}\n"
  169. ]
  170. }
  171. ],
  172. "source": [
  173. "from fastNLP import Vocabulary\n",
  174. "\n",
  175. "# 构建词表, Vocabulary.add(word)\n",
  176. "vocab = Vocabulary(min_freq=2)\n",
  177. "train_data.apply(lambda x: [vocab.add(word) for word in x['words']])\n",
  178. "vocab.build_vocab()\n",
  179. "\n",
  180. "# index句子, Vocabulary.to_index(word)\n",
  181. "train_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='word_seq', is_input=True)\n",
  182. "test_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='word_seq', is_input=True)\n",
  183. "\n",
  184. "\n",
  185. "print(test_data[0])"
  186. ]
  187. },
  188. {
  189. "cell_type": "code",
  190. "execution_count": 8,
  191. "metadata": {
  192. "scrolled": true
  193. },
  194. "outputs": [
  195. {
  196. "name": "stdout",
  197. "output_type": "stream",
  198. "text": [
  199. "batch_x has: {'words': array([list(['this', 'kind', 'of', 'hands-on', 'storytelling', 'is', 'ultimately', 'what', 'makes', 'shanghai', 'ghetto', 'move', 'beyond', 'a', 'good', ',', 'dry', ',', 'reliable', 'textbook', 'and', 'what', 'allows', 'it', 'to', 'rank', 'with', 'its', 'worthy', 'predecessors', '.']),\n",
  200. " list(['the', 'entire', 'movie', 'is', 'filled', 'with', 'deja', 'vu', 'moments', '.'])],\n",
  201. " dtype=object), 'word_seq': tensor([[ 19, 184, 6, 1, 481, 9, 206, 50, 91, 1210, 1609, 1330,\n",
  202. " 495, 5, 63, 4, 1269, 4, 1, 1184, 7, 50, 1050, 10,\n",
  203. " 8, 1611, 16, 21, 1039, 1, 2],\n",
  204. " [ 3, 711, 22, 9, 1282, 16, 2482, 2483, 200, 2, 0, 0,\n",
  205. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  206. " 0, 0, 0, 0, 0, 0, 0]])}\n",
  207. "batch_y has: {'label_seq': tensor([3, 2])}\n"
  208. ]
  209. }
  210. ],
  211. "source": [
  212. "# 假设你们需要做强化学习或者gan之类的项目,也许你们可以使用这里的dataset\n",
  213. "from fastNLP.core.batch import Batch\n",
  214. "from fastNLP.core.sampler import RandomSampler\n",
  215. "\n",
  216. "batch_iterator = Batch(dataset=train_data, batch_size=2, sampler=RandomSampler())\n",
  217. "for batch_x, batch_y in batch_iterator:\n",
  218. " print(\"batch_x has: \", batch_x)\n",
  219. " print(\"batch_y has: \", batch_y)\n",
  220. " break"
  221. ]
  222. },
  223. {
  224. "cell_type": "markdown",
  225. "metadata": {},
  226. "source": [
  227. "# Model\n"
  228. ]
  229. },
  230. {
  231. "cell_type": "code",
  232. "execution_count": 9,
  233. "metadata": {},
  234. "outputs": [
  235. {
  236. "data": {
  237. "text/plain": [
  238. "CNNText(\n",
  239. " (embed): Embedding(\n",
  240. " (embed): Embedding(3470, 50, padding_idx=0)\n",
  241. " (dropout): Dropout(p=0.0)\n",
  242. " )\n",
  243. " (conv_pool): ConvMaxpool(\n",
  244. " (convs): ModuleList(\n",
  245. " (0): Conv1d(50, 3, kernel_size=(3,), stride=(1,), padding=(2,))\n",
  246. " (1): Conv1d(50, 4, kernel_size=(4,), stride=(1,), padding=(2,))\n",
  247. " (2): Conv1d(50, 5, kernel_size=(5,), stride=(1,), padding=(2,))\n",
  248. " )\n",
  249. " )\n",
  250. " (dropout): Dropout(p=0.1)\n",
  251. " (fc): Linear(\n",
  252. " (linear): Linear(in_features=12, out_features=5, bias=True)\n",
  253. " )\n",
  254. ")"
  255. ]
  256. },
  257. "execution_count": 9,
  258. "metadata": {},
  259. "output_type": "execute_result"
  260. }
  261. ],
  262. "source": [
  263. "# 定义一个简单的Pytorch模型\n",
  264. "\n",
  265. "from fastNLP.models import CNNText\n",
  266. "model = CNNText(embed_num=len(vocab), embed_dim=50, num_classes=5, padding=2, dropout=0.1)\n",
  267. "model"
  268. ]
  269. },
  270. {
  271. "cell_type": "markdown",
  272. "metadata": {},
  273. "source": [
  274. "Trainer & Tester\n",
  275. "------\n",
  276. "\n",
  277. "使用fastNLP的Trainer训练模型"
  278. ]
  279. },
  280. {
  281. "cell_type": "code",
  282. "execution_count": 10,
  283. "metadata": {},
  284. "outputs": [],
  285. "source": [
  286. "from fastNLP import Trainer\n",
  287. "from copy import deepcopy\n",
  288. "from fastNLP.core.losses import CrossEntropyLoss\n",
  289. "from fastNLP.core.metrics import AccuracyMetric"
  290. ]
  291. },
  292. {
  293. "cell_type": "code",
  294. "execution_count": 11,
  295. "metadata": {},
  296. "outputs": [
  297. {
  298. "name": "stdout",
  299. "output_type": "stream",
  300. "text": [
  301. "training epochs started 2018-12-05 15:37:15\n"
  302. ]
  303. },
  304. {
  305. "data": {
  306. "text/plain": [
  307. "HBox(children=(IntProgress(value=0, layout=Layout(flex='2'), max=1870), HTML(value='')), layout=Layout(display…"
  308. ]
  309. },
  310. "metadata": {},
  311. "output_type": "display_data"
  312. },
  313. {
  314. "name": "stdout",
  315. "output_type": "stream",
  316. "text": [
  317. "Epoch 1/10. Step:187/1870. AccuracyMetric: acc=0.351365\n",
  318. "Epoch 2/10. Step:374/1870. AccuracyMetric: acc=0.470943\n",
  319. "Epoch 3/10. Step:561/1870. AccuracyMetric: acc=0.600402\n",
  320. "Epoch 4/10. Step:748/1870. AccuracyMetric: acc=0.702227\n",
  321. "Epoch 5/10. Step:935/1870. AccuracyMetric: acc=0.79099\n",
  322. "Epoch 6/10. Step:1122/1870. AccuracyMetric: acc=0.846424\n",
  323. "Epoch 7/10. Step:1309/1870. AccuracyMetric: acc=0.874058\n",
  324. "Epoch 8/10. Step:1496/1870. AccuracyMetric: acc=0.898844\n",
  325. "Epoch 9/10. Step:1683/1870. AccuracyMetric: acc=0.910568\n",
  326. "Epoch 10/10. Step:1870/1870. AccuracyMetric: acc=0.921286\n",
  327. "\r"
  328. ]
  329. }
  330. ],
  331. "source": [
  332. "# 进行overfitting测试\n",
  333. "copy_model = deepcopy(model)\n",
  334. "overfit_trainer = Trainer(model=copy_model, \n",
  335. " train_data=test_data, \n",
  336. " dev_data=test_data,\n",
  337. " losser=CrossEntropyLoss(pred=\"output\", target=\"label_seq\"),\n",
  338. " metrics=AccuracyMetric(),\n",
  339. " n_epochs=10,\n",
  340. " save_path=None)\n",
  341. "overfit_trainer.train()"
  342. ]
  343. },
  344. {
  345. "cell_type": "code",
  346. "execution_count": 12,
  347. "metadata": {},
  348. "outputs": [
  349. {
  350. "name": "stdout",
  351. "output_type": "stream",
  352. "text": [
  353. "training epochs started 2018-12-05 15:37:41\n"
  354. ]
  355. },
  356. {
  357. "data": {
  358. "text/plain": [
  359. "HBox(children=(IntProgress(value=0, layout=Layout(flex='2'), max=400), HTML(value='')), layout=Layout(display=…"
  360. ]
  361. },
  362. "metadata": {},
  363. "output_type": "display_data"
  364. },
  365. {
  366. "name": "stdout",
  367. "output_type": "stream",
  368. "text": [
  369. "\r"
  370. ]
  371. },
  372. {
  373. "ename": "AttributeError",
  374. "evalue": "'NoneType' object has no attribute 'squeeze'",
  375. "traceback": [
  376. "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
  377. "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
  378. "\u001b[0;32m<ipython-input-12-5603b8b11a82>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0mn_epochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m save_path='save/')\n\u001b[0;32m----> 9\u001b[0;31m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 10\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Train finished!'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  379. "\u001b[0;32m~/Desktop/fastNLP/fastNLP/fastNLP/core/trainer.py\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_summary_writer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mSummaryWriter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 164\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muse_tqdm\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 165\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_tqdm_train\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 166\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 167\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_print_train\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  380. "\u001b[0;32m~/Desktop/fastNLP/fastNLP/fastNLP/core/trainer.py\u001b[0m in \u001b[0;36m_tqdm_train\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 206\u001b[0m \u001b[0mpbar\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0meval_str\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 207\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalidate_every\u001b[0m \u001b[0;34m<\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdev_data\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 208\u001b[0;31m \u001b[0meval_res\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_validation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 209\u001b[0m \u001b[0meval_str\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"Epoch {}/{}. Step:{}/{}. \"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepoch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mn_epochs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtotal_steps\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m\\\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 210\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtester\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_format_eval_results\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0meval_res\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  381. "\u001b[0;32m~/Desktop/fastNLP/fastNLP/fastNLP/core/trainer.py\u001b[0m in \u001b[0;36m_do_validation\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 265\u001b[0m \u001b[0mres\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtester\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtest\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 266\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mres\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 267\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_summary_writer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_scalar\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"valid_{}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mglobal_step\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 268\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msave_path\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_better_eval_result\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mres\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 269\u001b[0m \u001b[0mmetric_key\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmetric_key\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmetric_key\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;34m\"None\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  382. "\u001b[0;32m~/miniconda2/envs/python3/lib/python3.6/site-packages/tensorboardX/writer.py\u001b[0m in \u001b[0;36madd_scalar\u001b[0;34m(self, tag, scalar_value, global_step, walltime)\u001b[0m\n\u001b[1;32m 332\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_check_caffe2\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscalar_value\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 333\u001b[0m \u001b[0mscalar_value\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mworkspace\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mFetchBlob\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscalar_value\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 334\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfile_writer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_summary\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscalar\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtag\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mscalar_value\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mglobal_step\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwalltime\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 335\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 336\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0madd_scalars\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmain_tag\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtag_scalar_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mglobal_step\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwalltime\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  383. "\u001b[0;32m~/miniconda2/envs/python3/lib/python3.6/site-packages/tensorboardX/summary.py\u001b[0m in \u001b[0;36mscalar\u001b[0;34m(name, scalar, collections)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mname\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_clean_tag\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0mscalar\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmake_np\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscalar\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32massert\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscalar\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqueeze\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'scalar should be 0D'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0mscalar\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfloat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mscalar\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mSummary\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mSummary\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mValue\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtag\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msimple_value\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mscalar\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  384. "\u001b[0;31mAttributeError\u001b[0m: 'NoneType' object has no attribute 'squeeze'"
  385. ],
  386. "output_type": "error"
  387. }
  388. ],
  389. "source": [
  390. "# 实例化Trainer,传入模型和数据,进行训练\n",
  391. "trainer = Trainer(model=model, \n",
  392. " train_data=train_data, \n",
  393. " dev_data=test_data,\n",
  394. " losser=CrossEntropyLoss(pred=\"output\", target=\"label_seq\"),\n",
  395. " metrics=AccuracyMetric(),\n",
  396. " n_epochs=5,\n",
  397. " save_path='save/')\n",
  398. "trainer.train()\n",
  399. "print('Train finished!')"
  400. ]
  401. },
  402. {
  403. "cell_type": "code",
  404. "execution_count": null,
  405. "metadata": {},
  406. "outputs": [],
  407. "source": [
  408. "from fastNLP import Tester\n",
  409. "\n",
  410. "tester = Tester(data=test_data, model=model, metrics=AccuracyMetric())\n",
  411. "acc = tester.test()"
  412. ]
  413. },
  414. {
  415. "cell_type": "code",
  416. "execution_count": null,
  417. "metadata": {},
  418. "outputs": [],
  419. "source": []
  420. },
  421. {
  422. "cell_type": "markdown",
  423. "metadata": {},
  424. "source": [
  425. "# In summary\n",
  426. "\n",
  427. "## fastNLP Trainer的伪代码逻辑\n",
  428. "### 1. 准备DataSet,假设DataSet中共有如下的fields\n",
  429. " ['raw_sentence', 'word_seq1', 'word_seq2', 'raw_label','label']\n",
  430. " 通过\n",
  431. " DataSet.set_input('word_seq1', word_seq2', flag=True)将'word_seq1', 'word_seq2'设置为input\n",
  432. " 通过\n",
  433. " DataSet.set_target('label', flag=True)将'label'设置为target\n",
  434. "### 2. 初始化模型\n",
  435. " class Model(nn.Module):\n",
  436. " def __init__(self):\n",
  437. " xxx\n",
  438. " def forward(self, word_seq1, word_seq2):\n",
  439. " # (1) 这里使用的形参名必须和DataSet中的input field的名称对应。因为我们是通过形参名, 进行赋值的\n",
  440. " # (2) input field的数量可以多于这里的形参数量。但是不能少于。\n",
  441. " xxxx\n",
  442. " # 输出必须是一个dict\n",
  443. "### 3. Trainer的训练过程\n",
  444. " (1) 从DataSet中按照batch_size取出一个batch,调用Model.forward\n",
  445. " (2) 将 Model.forward的结果 与 标记为target的field 传入Losser当中。\n",
  446. " 由于每个人写的Model.forward的output的dict可能key并不一样,比如有人是{'pred':xxx}, {'output': xxx}; \n",
  447. " 另外每个人将target可能也会设置为不同的名称, 比如有人是label, 有人设置为target;\n",
  448. " 为了解决以上的问题,我们的loss提供映射机制\n",
  449. " 比如CrossEntropyLosser的需要的输入是(prediction, target)。但是forward的output是{'output': xxx}; 'label'是target\n",
  450. " 那么初始化losser的时候写为CrossEntropyLosser(prediction='output', target='label')即可\n",
  451. " (3) 对于Metric是同理的\n",
  452. " Metric计算也是从 forward的结果中取值 与 设置target的field中取值。 也是可以通过映射找到对应的值 \n",
  453. " \n",
  454. " \n",
  455. "\n",
  456. "## 一些问题.\n",
  457. "### 1. DataSet中为什么需要设置input和target\n",
  458. " 只有被设置为input或者target的数据才会在train的过程中被取出来\n",
  459. " (1.1) 我们只会在设置为input的field中寻找传递给Model.forward的参数。\n",
  460. " (1.2) 我们在传递值给losser或者metric的时候会使用来自: \n",
  461. " (a)Model.forward的output\n",
  462. " (b)被设置为target的field\n",
  463. " \n",
  464. "\n",
  465. "### 2. 我们是通过forwad中的形参名将DataSet中的field赋值给对应的参数\n",
  466. " (1.1) 构建模型过程中,\n",
  467. " 例如:\n",
  468. " DataSet中x,seq_lens是input,那么forward就应该是\n",
  469. " def forward(self, x, seq_lens):\n",
  470. " pass\n",
  471. " 我们是通过形参名称进行匹配的field的\n",
  472. " \n",
  473. "\n",
  474. "\n",
  475. "### 1. 加载数据到DataSet\n",
  476. "### 2. 使用apply操作对DataSet进行预处理\n",
  477. " (2.1) 处理过程中将某些field设置为input,某些field设置为target\n",
  478. "### 3. 构建模型\n",
  479. " (3.1) 构建模型过程中,需要注意forward函数的形参名需要和DataSet中设置为input的field名称是一致的。\n",
  480. " 例如:\n",
  481. " DataSet中x,seq_lens是input,那么forward就应该是\n",
  482. " def forward(self, x, seq_lens):\n",
  483. " pass\n",
  484. " 我们是通过形参名称进行匹配的field的\n",
  485. " (3.2) 模型的forward的output需要是dict类型的。\n",
  486. " 建议将输出设置为{\"pred\": xx}.\n",
  487. " \n"
  488. ]
  489. },
  490. {
  491. "cell_type": "code",
  492. "execution_count": null,
  493. "metadata": {},
  494. "outputs": [],
  495. "source": []
  496. },
  497. {
  498. "cell_type": "code",
  499. "execution_count": null,
  500. "metadata": {},
  501. "outputs": [],
  502. "source": []
  503. }
  504. ],
  505. "metadata": {
  506. "kernelspec": {
  507. "display_name": "Python 3",
  508. "language": "python",
  509. "name": "python3"
  510. },
  511. "language_info": {
  512. "codemirror_mode": {
  513. "name": "ipython",
  514. "version": 3
  515. },
  516. "file_extension": ".py",
  517. "mimetype": "text/x-python",
  518. "name": "python",
  519. "nbconvert_exporter": "python",
  520. "pygments_lexer": "ipython3",
  521. "version": "3.6.7"
  522. }
  523. },
  524. "nbformat": 4,
  525. "nbformat_minor": 2
  526. }