You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_sentencepiece_tokenizer.py 5.7 kB

5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. import copy
  16. import mindspore.dataset.text as text
  17. import mindspore.dataset as ds
  18. from mindspore.dataset.text import SentencePieceModel, to_str, SPieceTokenizerOutType
  19. VOCAB_FILE = "../data/dataset/test_sentencepiece/botchan.txt"
  20. DATA_FILE = "../data/dataset/testTokenizerData/sentencepiece_tokenizer.txt"
  21. def test_from_vocab_to_str():
  22. vocab = text.SentencePieceVocab.from_file([VOCAB_FILE], 5000, 0.9995, SentencePieceModel.UNIGRAM, {})
  23. tokenizer = text.SentencePieceTokenizer(vocab, out_type=SPieceTokenizerOutType.STRING)
  24. dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
  25. dataset = dataset.map(operations=tokenizer)
  26. expect = ['▁I', '▁sa', 'w', '▁a', '▁girl', '▁with', '▁a', '▁te', 'les', 'co', 'pe', '.']
  27. for i in dataset.create_dict_iterator():
  28. ret = to_str(i["text"])
  29. for key, value in enumerate(ret):
  30. assert value == expect[key]
  31. def test_from_vocab_to_int():
  32. vocab = text.SentencePieceVocab.from_file([VOCAB_FILE], 5000, 0.9995, SentencePieceModel.UNIGRAM, {})
  33. tokenizer = text.SentencePieceTokenizer(vocab, out_type=SPieceTokenizerOutType.INT)
  34. dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
  35. dataset = dataset.map(operations=tokenizer)
  36. expect = [6, 329, 183, 8, 945, 23, 8, 3783, 4382, 4641, 1405, 4]
  37. for i in dataset.create_dict_iterator():
  38. ret = i["text"]
  39. for key, value in enumerate(ret):
  40. assert value == expect[key]
  41. def test_from_file_to_str():
  42. vocab = text.SentencePieceVocab.from_file([VOCAB_FILE], 5000, 0.9995, SentencePieceModel.UNIGRAM, {})
  43. text.SentencePieceVocab.save_model(vocab, "./", "m.model")
  44. tokenizer = text.SentencePieceTokenizer("./m.model", out_type=SPieceTokenizerOutType.STRING)
  45. dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
  46. dataset = dataset.map(operations=tokenizer)
  47. expect = ['▁I', '▁sa', 'w', '▁a', '▁girl', '▁with', '▁a', '▁te', 'les', 'co', 'pe', '.']
  48. for i in dataset.create_dict_iterator():
  49. ret = to_str(i["text"])
  50. for key, value in enumerate(ret):
  51. assert value == expect[key]
  52. def test_from_file_to_int():
  53. vocab = text.SentencePieceVocab.from_file([VOCAB_FILE], 5000, 0.9995, SentencePieceModel.UNIGRAM, {})
  54. text.SentencePieceVocab.save_model(vocab, "./", "m.model")
  55. tokenizer = text.SentencePieceTokenizer("./m.model", out_type=SPieceTokenizerOutType.INT)
  56. dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
  57. dataset = dataset.map(operations=tokenizer)
  58. expect = [6, 329, 183, 8, 945, 23, 8, 3783, 4382, 4641, 1405, 4]
  59. for i in dataset.create_dict_iterator():
  60. ret = i["text"]
  61. for key, value in enumerate(ret):
  62. assert value == expect[key]
  63. def test_build_from_dataset():
  64. data = ds.TextFileDataset(VOCAB_FILE, shuffle=False)
  65. vocab = text.SentencePieceVocab.from_dataset(data, [""], 5000, 0.9995, SentencePieceModel.UNIGRAM, {})
  66. tokenizer = text.SentencePieceTokenizer(vocab, out_type=SPieceTokenizerOutType.STRING)
  67. dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
  68. dataset = dataset.map(operations=tokenizer)
  69. expect = ['▁I', '▁sa', 'w', '▁a', '▁girl', '▁with', '▁a', '▁te', 'les', 'co', 'pe', '.']
  70. for i in dataset.create_dict_iterator():
  71. ret = to_str(i["text"])
  72. for key, value in enumerate(ret):
  73. assert value == expect[key]
  74. def apply_func(dataset):
  75. input_columns = ['text']
  76. output_columns = ['text2']
  77. dataset = dataset.rename(input_columns, output_columns)
  78. return dataset
  79. def zip_test(dataset):
  80. dataset_1 = copy.deepcopy(dataset)
  81. dataset_2 = copy.deepcopy(dataset)
  82. dataset_1 = dataset_1.apply(apply_func)
  83. dataset_zip = ds.zip((dataset_1, dataset_2))
  84. expect = ['▁I', '▁sa', 'w', '▁a', '▁girl', '▁with', '▁a', '▁te', 'les', 'co', 'pe', '.']
  85. for i in dataset_zip.create_dict_iterator():
  86. ret = to_str(i["text"])
  87. for key, value in enumerate(ret):
  88. assert value == expect[key]
  89. def concat_test(dataset):
  90. dataset_1 = copy.deepcopy(dataset)
  91. dataset = dataset.concat(dataset_1)
  92. expect = ['▁I', '▁sa', 'w', '▁a', '▁girl', '▁with', '▁a', '▁te', 'les', 'co', 'pe', '.']
  93. for i in dataset.create_dict_iterator():
  94. ret = to_str(i["text"])
  95. for key, value in enumerate(ret):
  96. assert value == expect[key]
  97. def test_with_zip_concat():
  98. data = ds.TextFileDataset(VOCAB_FILE, shuffle=False)
  99. vocab = text.SentencePieceVocab.from_dataset(data, [""], 5000, 0.9995, SentencePieceModel.UNIGRAM, {})
  100. tokenizer = text.SentencePieceTokenizer(vocab, out_type=SPieceTokenizerOutType.STRING)
  101. dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
  102. dataset = dataset.map(operations=tokenizer, num_parallel_workers=2)
  103. zip_test(dataset)
  104. concat_test(dataset)
  105. if __name__ == "__main__":
  106. test_from_vocab_to_str()
  107. test_from_vocab_to_int()
  108. test_from_file_to_str()
  109. test_from_file_to_int()
  110. test_build_from_dataset()
  111. test_with_zip_concat()