You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_text_bert_tokenizer.py 10 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """
  16. Testing BertTokenizer op in DE
  17. """
  18. import numpy as np
  19. import pytest
  20. import mindspore.dataset as ds
  21. from mindspore import log as logger
  22. import mindspore.dataset.text as text
  23. BERT_TOKENIZER_FILE = "../data/dataset/testTokenizerData/bert_tokenizer.txt"
  24. vocab_bert = [
  25. "床", "前", "明", "月", "光", "疑", "是", "地", "上", "霜", "举", "头", "望", "低", "思", "故", "乡",
  26. "繁", "體", "字", "嘿", "哈", "大", "笑", "嘻",
  27. "i", "am", "mak", "make", "small", "mistake", "##s", "during", "work", "##ing", "hour",
  28. "😀", "😃", "😄", "😁", "+", "/", "-", "=", "12", "28", "40", "16", " ", "I",
  29. "[CLS]", "[SEP]", "[UNK]", "[PAD]", "[MASK]", "[unused1]", "[unused10]"
  30. ]
  31. pad = '<pad>'
  32. test_paras = [
  33. # test chinese text
  34. dict(
  35. first=1,
  36. last=4,
  37. expect_str=[['床', '前', '明', '月', '光'],
  38. ['疑', '是', '地', '上', '霜'],
  39. ['举', '头', '望', '明', '月'],
  40. ['低', '头', '思', '故', '乡']],
  41. expected_offsets_start=[[0, 3, 6, 9, 12],
  42. [0, 3, 6, 9, 12],
  43. [0, 3, 6, 9, 12],
  44. [0, 3, 6, 9, 12]],
  45. expected_offsets_limit=[[3, 6, 9, 12, 15],
  46. [3, 6, 9, 12, 15],
  47. [3, 6, 9, 12, 15],
  48. [3, 6, 9, 12, 15]],
  49. vocab_list=vocab_bert
  50. ),
  51. # test english text
  52. dict(
  53. first=5,
  54. last=5,
  55. expect_str=[['i', 'am', 'mak', '##ing', 'small', 'mistake', '##s', 'during', 'work', '##ing', 'hour', '##s']],
  56. expected_offsets_start=[[0, 2, 5, 8, 12, 18, 25, 27, 34, 38, 42, 46]],
  57. expected_offsets_limit=[[1, 4, 8, 11, 17, 25, 26, 33, 38, 41, 46, 47]],
  58. lower_case=True,
  59. vocab_list=vocab_bert
  60. ),
  61. dict(
  62. first=5,
  63. last=5,
  64. expect_str=[['I', "am", 'mak', '##ing', 'small', 'mistake', '##s', 'during', 'work', '##ing', 'hour', '##s']],
  65. expected_offsets_start=[[0, 2, 5, 8, 12, 18, 25, 27, 34, 38, 42, 46]],
  66. expected_offsets_limit=[[1, 4, 8, 11, 17, 25, 26, 33, 38, 41, 46, 47]],
  67. lower_case=False,
  68. vocab_list=vocab_bert
  69. ),
  70. # test emoji tokens
  71. dict(
  72. first=6,
  73. last=7,
  74. expect_str=[
  75. ['😀', '嘿', '嘿', '😃', '哈', '哈', '😄', '大', '笑', '😁', '嘻', '嘻'],
  76. ['繁', '體', '字']],
  77. expected_offsets_start=[[0, 4, 7, 10, 14, 17, 20, 24, 27, 30, 34, 37], [0, 3, 6]],
  78. expected_offsets_limit=[[4, 7, 10, 14, 17, 20, 24, 27, 30, 34, 37, 40], [3, 6, 9]],
  79. normalization_form=text.utils.NormalizeForm.NFKC,
  80. vocab_list=vocab_bert
  81. ),
  82. # test preserved tokens
  83. dict(
  84. first=8,
  85. last=14,
  86. expect_str=[
  87. ['[UNK]', '[CLS]'],
  88. ['[UNK]', '[SEP]'],
  89. ['[UNK]', '[UNK]'],
  90. ['[UNK]', '[PAD]'],
  91. ['[UNK]', '[MASK]'],
  92. ['[unused1]'],
  93. ['[unused10]']
  94. ],
  95. expected_offsets_start=[[0, 7], [0, 7], [0, 7], [0, 7], [0, 7], [0], [0]],
  96. expected_offsets_limit=[[6, 12], [6, 12], [6, 12], [6, 12], [6, 13], [9], [10]],
  97. lower_case=False,
  98. vocab_list=vocab_bert,
  99. preserve_unused_token=True,
  100. ),
  101. dict(
  102. first=8,
  103. last=14,
  104. expect_str=[
  105. ['[UNK]', '[CLS]'],
  106. ['[UNK]', '[SEP]'],
  107. ['[UNK]', '[UNK]'],
  108. ['[UNK]', '[PAD]'],
  109. ['[UNK]', '[MASK]'],
  110. ['[unused1]'],
  111. ['[unused10]']
  112. ],
  113. expected_offsets_start=[[0, 7], [0, 7], [0, 7], [0, 7], [0, 7], [0], [0]],
  114. expected_offsets_limit=[[6, 12], [6, 12], [6, 12], [6, 12], [6, 13], [9], [10]],
  115. lower_case=True,
  116. vocab_list=vocab_bert,
  117. preserve_unused_token=True,
  118. ),
  119. # test special symbol
  120. dict(
  121. first=15,
  122. last=15,
  123. expect_str=[['12', '+', '/', '-', '28', '=', '40', '/', '-', '16']],
  124. expected_offsets_start=[[0, 2, 3, 4, 5, 7, 8, 10, 11, 12]],
  125. expected_offsets_limit=[[2, 3, 4, 5, 7, 8, 10, 11, 12, 14]],
  126. preserve_unused_token=True,
  127. vocab_list=vocab_bert
  128. ),
  129. # test non-default params
  130. dict(
  131. first=8,
  132. last=8,
  133. expect_str=[['[UNK]', ' ', '[CLS]']],
  134. expected_offsets_start=[[0, 6, 7]],
  135. expected_offsets_limit=[[6, 7, 12]],
  136. lower_case=False,
  137. vocab_list=vocab_bert,
  138. preserve_unused_token=True,
  139. keep_whitespace=True
  140. ),
  141. dict(
  142. first=8,
  143. last=8,
  144. expect_str=[['unused', ' ', '[CLS]']],
  145. expected_offsets_start=[[0, 6, 7]],
  146. expected_offsets_limit=[[6, 7, 12]],
  147. lower_case=False,
  148. vocab_list=vocab_bert,
  149. preserve_unused_token=True,
  150. keep_whitespace=True,
  151. unknown_token=''
  152. ),
  153. dict(
  154. first=8,
  155. last=8,
  156. expect_str=[['unused', ' ', '[', 'CLS', ']']],
  157. expected_offsets_start=[[0, 6, 7, 8, 11]],
  158. expected_offsets_limit=[[6, 7, 8, 11, 12]],
  159. lower_case=False,
  160. vocab_list=vocab_bert,
  161. preserve_unused_token=False,
  162. keep_whitespace=True,
  163. unknown_token=''
  164. ),
  165. ]
  166. def check_bert_tokenizer_default(first, last, expect_str,
  167. expected_offsets_start, expected_offsets_limit,
  168. vocab_list, suffix_indicator='##',
  169. max_bytes_per_token=100, unknown_token='[UNK]',
  170. lower_case=False, keep_whitespace=False,
  171. normalization_form=text.utils.NormalizeForm.NONE,
  172. preserve_unused_token=False):
  173. dataset = ds.TextFileDataset(BERT_TOKENIZER_FILE, shuffle=False)
  174. if first > 1:
  175. dataset = dataset.skip(first - 1)
  176. if last >= first:
  177. dataset = dataset.take(last - first + 1)
  178. vocab = text.Vocab.from_list(vocab_list)
  179. tokenizer_op = text.BertTokenizer(
  180. vocab=vocab, suffix_indicator=suffix_indicator,
  181. max_bytes_per_token=max_bytes_per_token, unknown_token=unknown_token,
  182. lower_case=lower_case, keep_whitespace=keep_whitespace,
  183. normalization_form=normalization_form,
  184. preserve_unused_token=preserve_unused_token)
  185. dataset = dataset.map(operations=tokenizer_op)
  186. count = 0
  187. for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
  188. token = text.to_str(i['text'])
  189. logger.info("Out:", token)
  190. logger.info("Exp:", expect_str[count])
  191. np.testing.assert_array_equal(token, expect_str[count])
  192. count = count + 1
  193. def check_bert_tokenizer_with_offsets(first, last, expect_str,
  194. expected_offsets_start, expected_offsets_limit,
  195. vocab_list, suffix_indicator='##',
  196. max_bytes_per_token=100, unknown_token='[UNK]',
  197. lower_case=False, keep_whitespace=False,
  198. normalization_form=text.utils.NormalizeForm.NONE,
  199. preserve_unused_token=False):
  200. dataset = ds.TextFileDataset(BERT_TOKENIZER_FILE, shuffle=False)
  201. if first > 1:
  202. dataset = dataset.skip(first - 1)
  203. if last >= first:
  204. dataset = dataset.take(last - first + 1)
  205. vocab = text.Vocab.from_list(vocab_list)
  206. tokenizer_op = text.BertTokenizer(
  207. vocab=vocab, suffix_indicator=suffix_indicator, max_bytes_per_token=max_bytes_per_token,
  208. unknown_token=unknown_token, lower_case=lower_case, keep_whitespace=keep_whitespace,
  209. normalization_form=normalization_form, preserve_unused_token=preserve_unused_token, with_offsets=True)
  210. dataset = dataset.map(operations=tokenizer_op, input_columns=['text'],
  211. output_columns=['token', 'offsets_start', 'offsets_limit'],
  212. column_order=['token', 'offsets_start', 'offsets_limit'])
  213. count = 0
  214. for i in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
  215. token = text.to_str(i['token'])
  216. logger.info("Out:", token)
  217. logger.info("Exp:", expect_str[count])
  218. np.testing.assert_array_equal(token, expect_str[count])
  219. np.testing.assert_array_equal(i['offsets_start'], expected_offsets_start[count])
  220. np.testing.assert_array_equal(i['offsets_limit'], expected_offsets_limit[count])
  221. count = count + 1
  222. def test_bert_tokenizer_default():
  223. """
  224. Test WordpieceTokenizer when with_offsets=False
  225. """
  226. for paras in test_paras:
  227. check_bert_tokenizer_default(**paras)
  228. def test_bert_tokenizer_with_offsets():
  229. """
  230. Test WordpieceTokenizer when with_offsets=True
  231. """
  232. for paras in test_paras:
  233. check_bert_tokenizer_with_offsets(**paras)
  234. def test_bert_tokenizer_callable_invalid_input():
  235. """
  236. Test WordpieceTokenizer in eager mode with invalid input
  237. """
  238. data = {'张三': 18, '王五': 20}
  239. vocab = text.Vocab.from_list(vocab_bert)
  240. tokenizer_op = text.BertTokenizer(vocab=vocab)
  241. with pytest.raises(TypeError) as info:
  242. _ = tokenizer_op(data)
  243. assert "Invalid user input. Got <class 'dict'>: {'张三': 18, '王五': 20}, cannot be converted into tensor." in str(info)
  244. if __name__ == '__main__':
  245. test_bert_tokenizer_callable_invalid_input()
  246. test_bert_tokenizer_default()
  247. test_bert_tokenizer_with_offsets()