You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

fastnlp_10tmin_tutorial.ipynb 21 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "markdown",
  5. "metadata": {},
  6. "source": [
  7. "fastNLP上手教程\n",
  8. "-------\n",
  9. "\n",
  10. "fastNLP提供方便的数据预处理,训练和测试模型的功能"
  11. ]
  12. },
  13. {
  14. "cell_type": "markdown",
  15. "metadata": {},
  16. "source": [
  17. "DataSet & Instance\n",
  18. "------\n",
  19. "\n",
  20. "fastNLP用DataSet和Instance保存和处理数据。每个DataSet表示一个数据集,每个Instance表示一个数据样本。一个DataSet存有多个Instance,每个Instance可以自定义存哪些内容。\n",
  21. "\n",
  22. "有一些read_*方法,可以轻松从文件读取数据,存成DataSet。"
  23. ]
  24. },
  25. {
  26. "cell_type": "code",
  27. "execution_count": null,
  28. "metadata": {},
  29. "outputs": [
  30. {
  31. "name": "stdout",
  32. "output_type": "stream",
  33. "text": [
  34. "{'raw_sentence': A series of escapades demonstrating the adage that what is good for the goose is also good for the gander , some of which occasionally amuses but none of which amounts to much of a story .,\n'label': 1}"
  35. ]
  36. },
  37. {
  38. "name": "stdout",
  39. "output_type": "stream",
  40. "text": [
  41. "\n"
  42. ]
  43. }
  44. ],
  45. "source": [
  46. "from fastNLP import DataSet\n",
  47. "from fastNLP import Instance\n",
  48. "\n",
  49. "# 从csv读取数据到DataSet\n",
  50. "win_path = \"C:\\\\Users\\zyfeng\\Desktop\\FudanNLP\\\\fastNLP\\\\test\\\\data_for_tests\\\\tutorial_sample_dataset.csv\"\n",
  51. "dataset = DataSet.read_csv(win_path, headers=('raw_sentence', 'label'), sep='\\t')\n",
  52. "print(dataset[0])"
  53. ]
  54. },
  55. {
  56. "cell_type": "code",
  57. "execution_count": 2,
  58. "metadata": {},
  59. "outputs": [
  60. {
  61. "data": {
  62. "text/plain": [
  63. "{'raw_sentence': fake data,\n'label': 0}"
  64. ]
  65. },
  66. "execution_count": 2,
  67. "metadata": {},
  68. "output_type": "execute_result"
  69. }
  70. ],
  71. "source": [
  72. "# DataSet.append(Instance)加入新数据\n",
  73. "\n",
  74. "dataset.append(Instance(raw_sentence='fake data', label='0'))\n",
  75. "dataset[-1]"
  76. ]
  77. },
  78. {
  79. "cell_type": "code",
  80. "execution_count": 3,
  81. "metadata": {},
  82. "outputs": [],
  83. "source": [
  84. "# DataSet.apply(func, new_field_name)对数据预处理\n",
  85. "\n",
  86. "# 将所有数字转为小写\n",
  87. "dataset.apply(lambda x: x['raw_sentence'].lower(), new_field_name='raw_sentence')\n",
  88. "# label转int\n",
  89. "dataset.apply(lambda x: int(x['label']), new_field_name='label_seq', is_target=True)\n",
  90. "# 使用空格分割句子\n",
  91. "dataset.drop(lambda x: len(x['raw_sentence'].split()) == 0)\n",
  92. "def split_sent(ins):\n",
  93. " return ins['raw_sentence'].split()\n",
  94. "dataset.apply(split_sent, new_field_name='words', is_input=True)"
  95. ]
  96. },
  97. {
  98. "cell_type": "code",
  99. "execution_count": 4,
  100. "metadata": {},
  101. "outputs": [],
  102. "source": [
  103. "# DataSet.drop(func)筛除数据\n",
  104. "# 删除低于某个长度的词语\n",
  105. "dataset.drop(lambda x: len(x['words']) <= 3)"
  106. ]
  107. },
  108. {
  109. "cell_type": "code",
  110. "execution_count": 7,
  111. "metadata": {},
  112. "outputs": [
  113. {
  114. "name": "stdout",
  115. "output_type": "stream",
  116. "text": [
  117. "Train size: "
  118. ]
  119. },
  120. {
  121. "name": "stdout",
  122. "output_type": "stream",
  123. "text": [
  124. " "
  125. ]
  126. },
  127. {
  128. "name": "stdout",
  129. "output_type": "stream",
  130. "text": [
  131. "54"
  132. ]
  133. },
  134. {
  135. "name": "stdout",
  136. "output_type": "stream",
  137. "text": [
  138. "\n"
  139. ]
  140. },
  141. {
  142. "name": "stdout",
  143. "output_type": "stream",
  144. "text": [
  145. "Test size: "
  146. ]
  147. }
  148. ],
  149. "source": [
  150. "# 分出测试集、训练集\n",
  151. "\n",
  152. "test_data, train_data = dataset.split(0.3)\n",
  153. "print(\"Train size: \", len(test_data))\n",
  154. "print(\"Test size: \", len(train_data))"
  155. ]
  156. },
  157. {
  158. "cell_type": "markdown",
  159. "metadata": {},
  160. "source": [
  161. "Vocabulary\n",
  162. "------\n",
  163. "\n",
  164. "fastNLP中的Vocabulary轻松构建词表,将词转成数字"
  165. ]
  166. },
  167. {
  168. "cell_type": "code",
  169. "execution_count": 8,
  170. "metadata": {},
  171. "outputs": [
  172. {
  173. "name": "stdout",
  174. "output_type": "stream",
  175. "text": [
  176. "{'raw_sentence': the plot is romantic comedy boilerplate from start to finish .,\n'label': 2,\n'label_seq': 2,\n'words': ['the', 'plot', 'is', 'romantic', 'comedy', 'boilerplate', 'from', 'start', 'to', 'finish', '.'],\n'word_seq': [2, 13, 9, 24, 25, 26, 15, 27, 11, 28, 3]}"
  177. ]
  178. },
  179. {
  180. "name": "stdout",
  181. "output_type": "stream",
  182. "text": [
  183. "\n"
  184. ]
  185. }
  186. ],
  187. "source": [
  188. "from fastNLP import Vocabulary\n",
  189. "\n",
  190. "# 构建词表, Vocabulary.add(word)\n",
  191. "vocab = Vocabulary(min_freq=2)\n",
  192. "train_data.apply(lambda x: [vocab.add(word) for word in x['words']])\n",
  193. "vocab.build_vocab()\n",
  194. "\n",
  195. "# index句子, Vocabulary.to_index(word)\n",
  196. "train_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='word_seq', is_input=True)\n",
  197. "test_data.apply(lambda x: [vocab.to_index(word) for word in x['words']], new_field_name='word_seq', is_input=True)\n",
  198. "\n",
  199. "\n",
  200. "print(test_data[0])"
  201. ]
  202. },
  203. {
  204. "cell_type": "code",
  205. "execution_count": 8,
  206. "metadata": {
  207. "scrolled": true
  208. },
  209. "outputs": [
  210. {
  211. "name": "stdout",
  212. "output_type": "stream",
  213. "text": [
  214. "batch_x has: {'words': array([list(['this', 'kind', 'of', 'hands-on', 'storytelling', 'is', 'ultimately', 'what', 'makes', 'shanghai', 'ghetto', 'move', 'beyond', 'a', 'good', ',', 'dry', ',', 'reliable', 'textbook', 'and', 'what', 'allows', 'it', 'to', 'rank', 'with', 'its', 'worthy', 'predecessors', '.']),\n",
  215. " list(['the', 'entire', 'movie', 'is', 'filled', 'with', 'deja', 'vu', 'moments', '.'])],\n",
  216. " dtype=object), 'word_seq': tensor([[ 19, 184, 6, 1, 481, 9, 206, 50, 91, 1210, 1609, 1330,\n",
  217. " 495, 5, 63, 4, 1269, 4, 1, 1184, 7, 50, 1050, 10,\n",
  218. " 8, 1611, 16, 21, 1039, 1, 2],\n",
  219. " [ 3, 711, 22, 9, 1282, 16, 2482, 2483, 200, 2, 0, 0,\n",
  220. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  221. " 0, 0, 0, 0, 0, 0, 0]])}\n",
  222. "batch_y has: {'label_seq': tensor([3, 2])}\n"
  223. ]
  224. }
  225. ],
  226. "source": [
  227. "# 假设你们需要做强化学习或者gan之类的项目,也许你们可以使用这里的dataset\n",
  228. "from fastNLP.core.batch import Batch\n",
  229. "from fastNLP.core.sampler import RandomSampler\n",
  230. "\n",
  231. "batch_iterator = Batch(dataset=train_data, batch_size=2, sampler=RandomSampler())\n",
  232. "for batch_x, batch_y in batch_iterator:\n",
  233. " print(\"batch_x has: \", batch_x)\n",
  234. " print(\"batch_y has: \", batch_y)\n",
  235. " break"
  236. ]
  237. },
  238. {
  239. "cell_type": "markdown",
  240. "metadata": {},
  241. "source": [
  242. "# Model\n"
  243. ]
  244. },
  245. {
  246. "cell_type": "code",
  247. "execution_count": 9,
  248. "metadata": {
  249. "collapsed": false
  250. },
  251. "outputs": [
  252. {
  253. "data": {
  254. "text/plain": [
  255. "CNNText(\n (embed): Embedding(\n (embed): Embedding(77, 50, padding_idx=0)\n (dropout): Dropout(p=0.0)\n )\n (conv_pool): ConvMaxpool(\n (convs): ModuleList(\n (0): Conv1d(50, 3, kernel_size=(3,), stride=(1,), padding=(2,))\n (1): Conv1d(50, 4, kernel_size=(4,), stride=(1,), padding=(2,))\n (2): Conv1d(50, 5, kernel_size=(5,), stride=(1,), padding=(2,))\n )\n )\n (dropout): Dropout(p=0.1)\n (fc): Linear(\n (linear): Linear(in_features=12, out_features=5, bias=True)\n )\n)"
  256. ]
  257. },
  258. "execution_count": 9,
  259. "metadata": {},
  260. "output_type": "execute_result"
  261. }
  262. ],
  263. "source": [
  264. "# 定义一个简单的Pytorch模型\n",
  265. "\n",
  266. "from fastNLP.models import CNNText\n",
  267. "model = CNNText(embed_num=len(vocab), embed_dim=50, num_classes=5, padding=2, dropout=0.1)\n",
  268. "model"
  269. ]
  270. },
  271. {
  272. "cell_type": "markdown",
  273. "metadata": {},
  274. "source": [
  275. "Trainer & Tester\n",
  276. "------\n",
  277. "\n",
  278. "使用fastNLP的Trainer训练模型"
  279. ]
  280. },
  281. {
  282. "cell_type": "code",
  283. "execution_count": 11,
  284. "metadata": {},
  285. "outputs": [],
  286. "source": [
  287. "from fastNLP import Trainer\n",
  288. "from copy import deepcopy\n",
  289. "from fastNLP import CrossEntropyLoss\n",
  290. "from fastNLP import AccuracyMetric"
  291. ]
  292. },
  293. {
  294. "cell_type": "code",
  295. "execution_count": 12,
  296. "metadata": {},
  297. "outputs": [
  298. {
  299. "name": "stdout",
  300. "output_type": "stream",
  301. "text": [
  302. "training epochs started 2018-12-07 14:07:20"
  303. ]
  304. },
  305. {
  306. "name": "stdout",
  307. "output_type": "stream",
  308. "text": [
  309. "\n"
  310. ]
  311. },
  312. {
  313. "data": {
  314. "text/plain": [
  315. "HBox(children=(IntProgress(value=0, layout=Layout(flex='2'), max=20), HTML(value='')), layout=Layout(display='…"
  316. ]
  317. },
  318. "execution_count": 0,
  319. "metadata": {},
  320. "output_type": "execute_result"
  321. },
  322. {
  323. "name": "stdout",
  324. "output_type": "stream",
  325. "text": [
  326. "\r"
  327. ]
  328. },
  329. {
  330. "name": "stdout",
  331. "output_type": "stream",
  332. "text": [
  333. "Epoch 1/10. Step:2/20. AccuracyMetric: acc=0.037037"
  334. ]
  335. },
  336. {
  337. "name": "stdout",
  338. "output_type": "stream",
  339. "text": [
  340. "\n"
  341. ]
  342. },
  343. {
  344. "name": "stdout",
  345. "output_type": "stream",
  346. "text": [
  347. "\r"
  348. ]
  349. },
  350. {
  351. "name": "stdout",
  352. "output_type": "stream",
  353. "text": [
  354. "Epoch 2/10. Step:4/20. AccuracyMetric: acc=0.296296"
  355. ]
  356. },
  357. {
  358. "name": "stdout",
  359. "output_type": "stream",
  360. "text": [
  361. "\n"
  362. ]
  363. },
  364. {
  365. "name": "stdout",
  366. "output_type": "stream",
  367. "text": [
  368. "\r"
  369. ]
  370. },
  371. {
  372. "name": "stdout",
  373. "output_type": "stream",
  374. "text": [
  375. "Epoch 3/10. Step:6/20. AccuracyMetric: acc=0.333333"
  376. ]
  377. },
  378. {
  379. "name": "stdout",
  380. "output_type": "stream",
  381. "text": [
  382. "\n"
  383. ]
  384. },
  385. {
  386. "name": "stdout",
  387. "output_type": "stream",
  388. "text": [
  389. "\r"
  390. ]
  391. },
  392. {
  393. "name": "stdout",
  394. "output_type": "stream",
  395. "text": [
  396. "Epoch 4/10. Step:8/20. AccuracyMetric: acc=0.555556"
  397. ]
  398. },
  399. {
  400. "name": "stdout",
  401. "output_type": "stream",
  402. "text": [
  403. "\n"
  404. ]
  405. },
  406. {
  407. "name": "stdout",
  408. "output_type": "stream",
  409. "text": [
  410. "\r"
  411. ]
  412. },
  413. {
  414. "name": "stdout",
  415. "output_type": "stream",
  416. "text": [
  417. "Epoch 5/10. Step:10/20. AccuracyMetric: acc=0.611111"
  418. ]
  419. },
  420. {
  421. "name": "stdout",
  422. "output_type": "stream",
  423. "text": [
  424. "\n"
  425. ]
  426. },
  427. {
  428. "name": "stdout",
  429. "output_type": "stream",
  430. "text": [
  431. "\r"
  432. ]
  433. },
  434. {
  435. "name": "stdout",
  436. "output_type": "stream",
  437. "text": [
  438. "Epoch 6/10. Step:12/20. AccuracyMetric: acc=0.481481"
  439. ]
  440. },
  441. {
  442. "name": "stdout",
  443. "output_type": "stream",
  444. "text": [
  445. "\n"
  446. ]
  447. },
  448. {
  449. "name": "stdout",
  450. "output_type": "stream",
  451. "text": [
  452. "\r"
  453. ]
  454. },
  455. {
  456. "name": "stdout",
  457. "output_type": "stream",
  458. "text": [
  459. "Epoch 7/10. Step:14/20. AccuracyMetric: acc=0.62963"
  460. ]
  461. },
  462. {
  463. "name": "stdout",
  464. "output_type": "stream",
  465. "text": [
  466. "\n"
  467. ]
  468. },
  469. {
  470. "name": "stdout",
  471. "output_type": "stream",
  472. "text": [
  473. "\r"
  474. ]
  475. },
  476. {
  477. "name": "stdout",
  478. "output_type": "stream",
  479. "text": [
  480. "Epoch 8/10. Step:16/20. AccuracyMetric: acc=0.685185"
  481. ]
  482. },
  483. {
  484. "name": "stdout",
  485. "output_type": "stream",
  486. "text": [
  487. "\n"
  488. ]
  489. },
  490. {
  491. "name": "stdout",
  492. "output_type": "stream",
  493. "text": [
  494. "\r"
  495. ]
  496. },
  497. {
  498. "name": "stdout",
  499. "output_type": "stream",
  500. "text": [
  501. "Epoch 9/10. Step:18/20. AccuracyMetric: acc=0.722222"
  502. ]
  503. },
  504. {
  505. "name": "stdout",
  506. "output_type": "stream",
  507. "text": [
  508. "\n"
  509. ]
  510. },
  511. {
  512. "name": "stdout",
  513. "output_type": "stream",
  514. "text": [
  515. "\r"
  516. ]
  517. },
  518. {
  519. "name": "stdout",
  520. "output_type": "stream",
  521. "text": [
  522. "Epoch 10/10. Step:20/20. AccuracyMetric: acc=0.777778"
  523. ]
  524. },
  525. {
  526. "name": "stdout",
  527. "output_type": "stream",
  528. "text": [
  529. "\n"
  530. ]
  531. },
  532. {
  533. "name": "stdout",
  534. "output_type": "stream",
  535. "text": [
  536. "\r"
  537. ]
  538. }
  539. ],
  540. "source": [
  541. "# 进行overfitting测试\n",
  542. "copy_model = deepcopy(model)\n",
  543. "overfit_trainer = Trainer(model=copy_model, \n",
  544. " train_data=test_data, \n",
  545. " dev_data=test_data,\n",
  546. " loss=CrossEntropyLoss(pred=\"output\", target=\"label_seq\"),\n",
  547. " metrics=AccuracyMetric(),\n",
  548. " n_epochs=10,\n",
  549. " save_path=None)\n",
  550. "overfit_trainer.train()"
  551. ]
  552. },
  553. {
  554. "cell_type": "code",
  555. "execution_count": 14,
  556. "metadata": {},
  557. "outputs": [
  558. {
  559. "name": "stdout",
  560. "output_type": "stream",
  561. "text": [
  562. "training epochs started 2018-12-07 14:08:10"
  563. ]
  564. },
  565. {
  566. "name": "stdout",
  567. "output_type": "stream",
  568. "text": [
  569. "\n"
  570. ]
  571. },
  572. {
  573. "data": {
  574. "text/plain": [
  575. "HBox(children=(IntProgress(value=0, layout=Layout(flex='2'), max=5), HTML(value='')), layout=Layout(display='i…"
  576. ]
  577. },
  578. "execution_count": 0,
  579. "metadata": {},
  580. "output_type": "execute_result"
  581. },
  582. {
  583. "name": "stdout",
  584. "output_type": "stream",
  585. "text": [
  586. "\r"
  587. ]
  588. },
  589. {
  590. "name": "stdout",
  591. "output_type": "stream",
  592. "text": [
  593. "Epoch 1/5. Step:1/5. AccuracyMetric: acc=0.037037"
  594. ]
  595. },
  596. {
  597. "name": "stdout",
  598. "output_type": "stream",
  599. "text": [
  600. "\n"
  601. ]
  602. },
  603. {
  604. "name": "stdout",
  605. "output_type": "stream",
  606. "text": [
  607. "\r"
  608. ]
  609. },
  610. {
  611. "name": "stdout",
  612. "output_type": "stream",
  613. "text": [
  614. "Epoch 2/5. Step:2/5. AccuracyMetric: acc=0.037037"
  615. ]
  616. },
  617. {
  618. "name": "stdout",
  619. "output_type": "stream",
  620. "text": [
  621. "\n"
  622. ]
  623. },
  624. {
  625. "name": "stdout",
  626. "output_type": "stream",
  627. "text": [
  628. "\r"
  629. ]
  630. },
  631. {
  632. "name": "stdout",
  633. "output_type": "stream",
  634. "text": [
  635. "Epoch 3/5. Step:3/5. AccuracyMetric: acc=0.037037"
  636. ]
  637. },
  638. {
  639. "name": "stdout",
  640. "output_type": "stream",
  641. "text": [
  642. "\n"
  643. ]
  644. },
  645. {
  646. "name": "stdout",
  647. "output_type": "stream",
  648. "text": [
  649. "\r"
  650. ]
  651. },
  652. {
  653. "name": "stdout",
  654. "output_type": "stream",
  655. "text": [
  656. "Epoch 4/5. Step:4/5. AccuracyMetric: acc=0.185185"
  657. ]
  658. },
  659. {
  660. "name": "stdout",
  661. "output_type": "stream",
  662. "text": [
  663. "\n"
  664. ]
  665. },
  666. {
  667. "name": "stdout",
  668. "output_type": "stream",
  669. "text": [
  670. "\r"
  671. ]
  672. },
  673. {
  674. "name": "stdout",
  675. "output_type": "stream",
  676. "text": [
  677. "Epoch 5/5. Step:5/5. AccuracyMetric: acc=0.240741"
  678. ]
  679. },
  680. {
  681. "name": "stdout",
  682. "output_type": "stream",
  683. "text": [
  684. "\n"
  685. ]
  686. },
  687. {
  688. "name": "stdout",
  689. "output_type": "stream",
  690. "text": [
  691. "\r"
  692. ]
  693. },
  694. {
  695. "name": "stdout",
  696. "output_type": "stream",
  697. "text": [
  698. "Train finished!"
  699. ]
  700. },
  701. {
  702. "name": "stdout",
  703. "output_type": "stream",
  704. "text": [
  705. "\n"
  706. ]
  707. }
  708. ],
  709. "source": [
  710. "# 实例化Trainer,传入模型和数据,进行训练\n",
  711. "trainer = Trainer(model=model, \n",
  712. " train_data=train_data, \n",
  713. " dev_data=test_data,\n",
  714. " loss=CrossEntropyLoss(pred=\"output\", target=\"label_seq\"),\n",
  715. " metrics=AccuracyMetric(),\n",
  716. " n_epochs=5)\n",
  717. "trainer.train()\n",
  718. "print('Train finished!')"
  719. ]
  720. },
  721. {
  722. "cell_type": "code",
  723. "execution_count": 15,
  724. "metadata": {},
  725. "outputs": [
  726. {
  727. "name": "stdout",
  728. "output_type": "stream",
  729. "text": [
  730. "[tester] \nAccuracyMetric: acc=0.240741"
  731. ]
  732. },
  733. {
  734. "name": "stdout",
  735. "output_type": "stream",
  736. "text": [
  737. "\n"
  738. ]
  739. }
  740. ],
  741. "source": [
  742. "from fastNLP import Tester\n",
  743. "\n",
  744. "tester = Tester(data=test_data, model=model, metrics=AccuracyMetric())\n",
  745. "acc = tester.test()"
  746. ]
  747. },
  748. {
  749. "cell_type": "code",
  750. "execution_count": null,
  751. "metadata": {},
  752. "outputs": [],
  753. "source": []
  754. },
  755. {
  756. "cell_type": "markdown",
  757. "metadata": {},
  758. "source": [
  759. "# In summary\n",
  760. "\n",
  761. "## fastNLP Trainer的伪代码逻辑\n",
  762. "### 1. 准备DataSet,假设DataSet中共有如下的fields\n",
  763. " ['raw_sentence', 'word_seq1', 'word_seq2', 'raw_label','label']\n",
  764. " 通过\n",
  765. " DataSet.set_input('word_seq1', word_seq2', flag=True)将'word_seq1', 'word_seq2'设置为input\n",
  766. " 通过\n",
  767. " DataSet.set_target('label', flag=True)将'label'设置为target\n",
  768. "### 2. 初始化模型\n",
  769. " class Model(nn.Module):\n",
  770. " def __init__(self):\n",
  771. " xxx\n",
  772. " def forward(self, word_seq1, word_seq2):\n",
  773. " # (1) 这里使用的形参名必须和DataSet中的input field的名称对应。因为我们是通过形参名, 进行赋值的\n",
  774. " # (2) input field的数量可以多于这里的形参数量。但是不能少于。\n",
  775. " xxxx\n",
  776. " # 输出必须是一个dict\n",
  777. "### 3. Trainer的训练过程\n",
  778. " (1) 从DataSet中按照batch_size取出一个batch,调用Model.forward\n",
  779. " (2) 将 Model.forward的结果 与 标记为target的field 传入Losser当中。\n",
  780. " 由于每个人写的Model.forward的output的dict可能key并不一样,比如有人是{'pred':xxx}, {'output': xxx}; \n",
  781. " 另外每个人将target可能也会设置为不同的名称, 比如有人是label, 有人设置为target;\n",
  782. " 为了解决以上的问题,我们的loss提供映射机制\n",
  783. " 比如CrossEntropyLosser的需要的输入是(prediction, target)。但是forward的output是{'output': xxx}; 'label'是target\n",
  784. " 那么初始化losser的时候写为CrossEntropyLosser(prediction='output', target='label')即可\n",
  785. " (3) 对于Metric是同理的\n",
  786. " Metric计算也是从 forward的结果中取值 与 设置target的field中取值。 也是可以通过映射找到对应的值 \n",
  787. " \n",
  788. " \n",
  789. "\n",
  790. "## 一些问题.\n",
  791. "### 1. DataSet中为什么需要设置input和target\n",
  792. " 只有被设置为input或者target的数据才会在train的过程中被取出来\n",
  793. " (1.1) 我们只会在设置为input的field中寻找传递给Model.forward的参数。\n",
  794. " (1.2) 我们在传递值给losser或者metric的时候会使用来自: \n",
  795. " (a)Model.forward的output\n",
  796. " (b)被设置为target的field\n",
  797. " \n",
  798. "\n",
  799. "### 2. 我们是通过forwad中的形参名将DataSet中的field赋值给对应的参数\n",
  800. " (1.1) 构建模型过程中,\n",
  801. " 例如:\n",
  802. " DataSet中x,seq_lens是input,那么forward就应该是\n",
  803. " def forward(self, x, seq_lens):\n",
  804. " pass\n",
  805. " 我们是通过形参名称进行匹配的field的\n",
  806. " \n",
  807. "\n",
  808. "\n",
  809. "### 1. 加载数据到DataSet\n",
  810. "### 2. 使用apply操作对DataSet进行预处理\n",
  811. " (2.1) 处理过程中将某些field设置为input,某些field设置为target\n",
  812. "### 3. 构建模型\n",
  813. " (3.1) 构建模型过程中,需要注意forward函数的形参名需要和DataSet中设置为input的field名称是一致的。\n",
  814. " 例如:\n",
  815. " DataSet中x,seq_lens是input,那么forward就应该是\n",
  816. " def forward(self, x, seq_lens):\n",
  817. " pass\n",
  818. " 我们是通过形参名称进行匹配的field的\n",
  819. " (3.2) 模型的forward的output需要是dict类型的。\n",
  820. " 建议将输出设置为{\"pred\": xx}.\n",
  821. " \n"
  822. ]
  823. },
  824. {
  825. "cell_type": "code",
  826. "execution_count": null,
  827. "metadata": {},
  828. "outputs": [],
  829. "source": []
  830. },
  831. {
  832. "cell_type": "code",
  833. "execution_count": null,
  834. "metadata": {},
  835. "outputs": [],
  836. "source": []
  837. }
  838. ],
  839. "metadata": {
  840. "kernelspec": {
  841. "display_name": "Python 3",
  842. "language": "python",
  843. "name": "python3"
  844. },
  845. "language_info": {
  846. "codemirror_mode": {
  847. "name": "ipython",
  848. "version": 3
  849. },
  850. "file_extension": ".py",
  851. "mimetype": "text/x-python",
  852. "name": "python",
  853. "nbconvert_exporter": "python",
  854. "pygments_lexer": "ipython3",
  855. "version": "3.6.7"
  856. }
  857. },
  858. "nbformat": 4,
  859. "nbformat_minor": 2
  860. }