You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_tokenizer.py 1.8 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """
  16. Testing UnicodeCharTokenizer op in DE
  17. """
  18. import mindspore.dataset as ds
  19. from mindspore import log as logger
  20. import mindspore.dataset.transforms.text.c_transforms as nlp
  21. import mindspore.dataset.transforms.text.utils as nlp_util
  22. DATA_FILE = "../data/dataset/testTokenizerData/1.txt"
  23. def split_by_unicode_char(input_strs):
  24. """
  25. Split utf-8 strings to unicode characters
  26. """
  27. out = []
  28. for s in input_strs:
  29. out.append([c for c in s])
  30. return out
  31. def test_unicode_char_tokenizer():
  32. """
  33. Test UnicodeCharTokenizer
  34. """
  35. input_strs = ("Welcome to Beijing!", "北京欢迎您!", "我喜欢English!", " ")
  36. dataset = ds.TextFileDataset(DATA_FILE, shuffle=False)
  37. tokenizer = nlp.UnicodeCharTokenizer()
  38. dataset = dataset.map(operations=tokenizer)
  39. tokens = []
  40. for i in dataset.create_dict_iterator():
  41. text = nlp_util.as_text(i['text']).tolist()
  42. tokens.append(text)
  43. logger.info("The out tokens is : {}".format(tokens))
  44. assert split_by_unicode_char(input_strs) == tokens
  45. if __name__ == '__main__':
  46. test_unicode_char_tokenizer()