You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_fastNLP.py 2.7 kB

7 years ago
7 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. import sys
  2. sys.path.append("..")
  3. from fastNLP.fastnlp import FastNLP
  4. from fastNLP.fastnlp import interpret_word_seg_results, interpret_cws_pos_results
  5. PATH_TO_CWS_PICKLE_FILES = "/home/zyfeng/fastNLP/reproduction/chinese_word_segment/save/"
  6. PATH_TO_POS_TAG_PICKLE_FILES = "/home/zyfeng/data/crf_seg/"
  7. def word_seg():
  8. nlp = FastNLP(model_dir=PATH_TO_CWS_PICKLE_FILES)
  9. nlp.load("cws_basic_model", config_file="cws.cfg", section_name="POS_test")
  10. text = ["这是最好的基于深度学习的中文分词系统。",
  11. "大王叫我来巡山。",
  12. "我党多年来致力于改善人民生活水平。"]
  13. results = nlp.run(text)
  14. print(results)
  15. for example in results:
  16. words, labels = [], []
  17. for res in example:
  18. words.append(res[0])
  19. labels.append(res[1])
  20. print(interpret_word_seg_results(words, labels))
  21. def text_class():
  22. nlp = FastNLP("./data_for_tests/")
  23. nlp.load("text_class_model")
  24. text = "这是最好的基于深度学习的中文分词系统。"
  25. result = nlp.run(text)
  26. print(result)
  27. print("FastNLP finished!")
  28. def test_word_seg_interpret():
  29. foo = [[('这', 'S'), ('是', 'S'), ('最', 'S'), ('好', 'S'), ('的', 'S'), ('基', 'B'), ('于', 'E'), ('深', 'B'), ('度', 'E'),
  30. ('学', 'B'), ('习', 'E'), ('的', 'S'), ('中', 'B'), ('文', 'E'), ('分', 'B'), ('词', 'E'), ('系', 'B'), ('统', 'E'),
  31. ('。', 'S')]]
  32. chars = [x[0] for x in foo[0]]
  33. labels = [x[1] for x in foo[0]]
  34. print(interpret_word_seg_results(chars, labels))
  35. def test_interpret_cws_pos_results():
  36. foo = [
  37. [('这', 'S-r'), ('是', 'S-v'), ('最', 'S-d'), ('好', 'S-a'), ('的', 'S-u'), ('基', 'B-p'), ('于', 'E-p'), ('深', 'B-d'),
  38. ('度', 'E-d'), ('学', 'B-v'), ('习', 'E-v'), ('的', 'S-u'), ('中', 'B-nz'), ('文', 'E-nz'), ('分', 'B-vn'),
  39. ('词', 'E-vn'), ('系', 'B-n'), ('统', 'E-n'), ('。', 'S-w')]
  40. ]
  41. chars = [x[0] for x in foo[0]]
  42. labels = [x[1] for x in foo[0]]
  43. print(interpret_cws_pos_results(chars, labels))
  44. def pos_tag():
  45. nlp = FastNLP(model_dir=PATH_TO_POS_TAG_PICKLE_FILES)
  46. nlp.load("pos_tag_model", config_file="pos_tag.config", section_name="pos_tag_model")
  47. text = ["这是最好的基于深度学习的中文分词系统。",
  48. "大王叫我来巡山。",
  49. "我党多年来致力于改善人民生活水平。"]
  50. results = nlp.run(text)
  51. for example in results:
  52. words, labels = [], []
  53. for res in example:
  54. words.append(res[0])
  55. labels.append(res[1])
  56. print(interpret_cws_pos_results(words, labels))
  57. if __name__ == "__main__":
  58. pos_tag()

一款轻量级的自然语言处理(NLP)工具包,目标是减少用户项目中的工程型代码,例如数据处理循环、训练循环、多卡运行等