You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_nlp_jieop.py 8.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. import numpy as np
  16. import mindspore.dataset as ds
  17. from mindspore.dataset.text import JiebaTokenizer
  18. from mindspore.dataset.text import JiebaMode, to_str
  19. DATA_FILE = "../data/dataset/testJiebaDataset/3.txt"
  20. DATA_ALL_FILE = "../data/dataset/testJiebaDataset/*"
  21. HMM_FILE = "../data/dataset/jiebadict/hmm_model.utf8"
  22. MP_FILE = "../data/dataset/jiebadict/jieba.dict.utf8"
  23. def test_jieba_1():
  24. """Test jieba tokenizer with MP mode"""
  25. data = ds.TextFileDataset(DATA_FILE)
  26. jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
  27. data = data.map(input_columns=["text"],
  28. operations=jieba_op, num_parallel_workers=1)
  29. expect = ['今天天气', '太好了', '我们', '一起', '去', '外面', '玩吧']
  30. ret = []
  31. for i in data.create_dict_iterator():
  32. ret = to_str(i["text"])
  33. for index, item in enumerate(ret):
  34. assert item == expect[index]
  35. def test_jieba_1_1():
  36. """Test jieba tokenizer with HMM mode"""
  37. data = ds.TextFileDataset(DATA_FILE)
  38. jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.HMM)
  39. data = data.map(input_columns=["text"],
  40. operations=jieba_op, num_parallel_workers=1)
  41. expect = ['今天', '天气', '太', '好', '了', '我们', '一起', '去', '外面', '玩', '吧']
  42. for i in data.create_dict_iterator():
  43. ret = to_str(i["text"])
  44. for index, item in enumerate(ret):
  45. assert item == expect[index]
  46. def test_jieba_1_2():
  47. """Test jieba tokenizer with HMM MIX"""
  48. data = ds.TextFileDataset(DATA_FILE)
  49. jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MIX)
  50. data = data.map(input_columns=["text"],
  51. operations=jieba_op, num_parallel_workers=1)
  52. expect = ['今天天气', '太好了', '我们', '一起', '去', '外面', '玩吧']
  53. for i in data.create_dict_iterator():
  54. ret = to_str(i["text"])
  55. for index, item in enumerate(ret):
  56. assert item == expect[index]
  57. def test_jieba_2():
  58. """Test add_word"""
  59. DATA_FILE4 = "../data/dataset/testJiebaDataset/4.txt"
  60. data = ds.TextFileDataset(DATA_FILE4)
  61. jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
  62. jieba_op.add_word("男默女泪")
  63. expect = ['男默女泪', '市', '长江大桥']
  64. data = data.map(input_columns=["text"],
  65. operations=jieba_op, num_parallel_workers=2)
  66. for i in data.create_dict_iterator():
  67. ret = to_str(i["text"])
  68. for index, item in enumerate(ret):
  69. assert item == expect[index]
  70. def test_jieba_2_1():
  71. """Test add_word with freq"""
  72. DATA_FILE4 = "../data/dataset/testJiebaDataset/4.txt"
  73. data = ds.TextFileDataset(DATA_FILE4)
  74. jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
  75. jieba_op.add_word("男默女泪", 10)
  76. data = data.map(input_columns=["text"],
  77. operations=jieba_op, num_parallel_workers=2)
  78. expect = ['男默女泪', '市', '长江大桥']
  79. for i in data.create_dict_iterator():
  80. ret = to_str(i["text"])
  81. for index, item in enumerate(ret):
  82. assert item == expect[index]
  83. def test_jieba_2_2():
  84. """Test add_word with invalid None Input"""
  85. jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
  86. try:
  87. jieba_op.add_word(None)
  88. except ValueError:
  89. pass
  90. def test_jieba_2_3():
  91. """Test add_word with freq, the value of freq affects the result of segmentation"""
  92. DATA_FILE4 = "../data/dataset/testJiebaDataset/6.txt"
  93. data = ds.TextFileDataset(DATA_FILE4)
  94. jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
  95. jieba_op.add_word("江大桥", 20000)
  96. data = data.map(input_columns=["text"],
  97. operations=jieba_op, num_parallel_workers=2)
  98. expect = ['江州', '市长', '江大桥', '参加', '了', '长江大桥', '的', '通车', '仪式']
  99. for i in data.create_dict_iterator():
  100. ret = to_str(i["text"])
  101. for index, item in enumerate(ret):
  102. assert item == expect[index]
  103. def test_jieba_3():
  104. """Test add_dict with dict"""
  105. DATA_FILE4 = "../data/dataset/testJiebaDataset/4.txt"
  106. user_dict = {
  107. "男默女泪": 10
  108. }
  109. data = ds.TextFileDataset(DATA_FILE4)
  110. jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
  111. jieba_op.add_dict(user_dict)
  112. data = data.map(input_columns=["text"],
  113. operations=jieba_op, num_parallel_workers=1)
  114. expect = ['男默女泪', '市', '长江大桥']
  115. for i in data.create_dict_iterator():
  116. ret = to_str(i["text"])
  117. for index, item in enumerate(ret):
  118. assert item == expect[index]
  119. def test_jieba_3_1():
  120. """Test add_dict with dict"""
  121. DATA_FILE4 = "../data/dataset/testJiebaDataset/4.txt"
  122. user_dict = {
  123. "男默女泪": 10,
  124. "江大桥": 20000
  125. }
  126. data = ds.TextFileDataset(DATA_FILE4)
  127. jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
  128. jieba_op.add_dict(user_dict)
  129. data = data.map(input_columns=["text"],
  130. operations=jieba_op, num_parallel_workers=1)
  131. expect = ['男默女泪', '市长', '江大桥']
  132. for i in data.create_dict_iterator():
  133. ret = to_str(i["text"])
  134. for index, item in enumerate(ret):
  135. assert item == expect[index]
  136. def test_jieba_4():
  137. DATA_FILE4 = "../data/dataset/testJiebaDataset/3.txt"
  138. DICT_FILE = "../data/dataset/testJiebaDataset/user_dict.txt"
  139. data = ds.TextFileDataset(DATA_FILE4)
  140. jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
  141. jieba_op.add_dict(DICT_FILE)
  142. data = data.map(input_columns=["text"],
  143. operations=jieba_op, num_parallel_workers=1)
  144. expect = ['今天天气', '太好了', '我们', '一起', '去', '外面', '玩吧']
  145. for i in data.create_dict_iterator():
  146. ret = to_str(i["text"])
  147. for index, item in enumerate(ret):
  148. assert item == expect[index]
  149. def test_jieba_4_1():
  150. """Test add dict with invalid file path"""
  151. DICT_FILE = ""
  152. jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
  153. try:
  154. jieba_op.add_dict(DICT_FILE)
  155. except ValueError:
  156. pass
  157. def test_jieba_5():
  158. """Test add dict with file path"""
  159. DATA_FILE4 = "../data/dataset/testJiebaDataset/6.txt"
  160. data = ds.TextFileDataset(DATA_FILE4)
  161. jieba_op = JiebaTokenizer(HMM_FILE, MP_FILE, mode=JiebaMode.MP)
  162. jieba_op.add_word("江大桥", 20000)
  163. data = data.map(input_columns=["text"],
  164. operations=jieba_op, num_parallel_workers=1)
  165. expect = ['江州', '市长', '江大桥', '参加', '了', '长江大桥', '的', '通车', '仪式']
  166. for i in data.create_dict_iterator():
  167. ret = to_str(i["text"])
  168. for index, item in enumerate(ret):
  169. assert item == expect[index]
  170. def gen():
  171. text = np.array("今天天气太好了我们一起去外面玩吧".encode("UTF8"), dtype='S')
  172. yield (text,)
  173. def pytoken_op(input_data):
  174. te = str(to_str(input_data))
  175. tokens = []
  176. tokens.append(te[:5].encode("UTF8"))
  177. tokens.append(te[5:10].encode("UTF8"))
  178. tokens.append(te[10:].encode("UTF8"))
  179. return np.array(tokens, dtype='S')
  180. def test_jieba_6():
  181. data = ds.GeneratorDataset(gen, column_names=["text"])
  182. data = data.map(input_columns=["text"],
  183. operations=pytoken_op, num_parallel_workers=1)
  184. expect = ['今天天气太', '好了我们一', '起去外面玩吧']
  185. for i in data.create_dict_iterator():
  186. ret = to_str(i["text"])
  187. for index, item in enumerate(ret):
  188. assert item == expect[index]
  189. if __name__ == "__main__":
  190. test_jieba_1()
  191. test_jieba_1_1()
  192. test_jieba_1_2()
  193. test_jieba_2()
  194. test_jieba_2_1()
  195. test_jieba_2_2()
  196. test_jieba_3()
  197. test_jieba_3_1()
  198. test_jieba_4()
  199. test_jieba_4_1()
  200. test_jieba_5()
  201. test_jieba_5()
  202. test_jieba_6()