You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_datasets_textfileop.py 3.2 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. import mindspore.dataset as ds
  16. import mindspore.dataset.transforms.text.utils as nlp
  17. from mindspore import log as logger
  18. DATA_FILE = "../data/dataset/testTextFileDataset/1.txt"
  19. DATA_ALL_FILE = "../data/dataset/testTextFileDataset/*"
  20. def test_textline_dataset_one_file():
  21. data = ds.TextFileDataset(DATA_FILE)
  22. count = 0
  23. for i in data.create_dict_iterator():
  24. logger.info("{}".format(i["text"]))
  25. count += 1
  26. assert (count == 3)
  27. def test_textline_dataset_all_file():
  28. data = ds.TextFileDataset(DATA_ALL_FILE)
  29. count = 0
  30. for i in data.create_dict_iterator():
  31. logger.info("{}".format(i["text"]))
  32. count += 1
  33. assert (count == 5)
  34. def test_textline_dataset_totext():
  35. ds.config.set_num_parallel_workers(4)
  36. data = ds.TextFileDataset(DATA_ALL_FILE, shuffle=False)
  37. count = 0
  38. line = ["This is a text file.", "Another file.",
  39. "Be happy every day.", "End of file.", "Good luck to everyone."]
  40. for i in data.create_dict_iterator():
  41. str = i["text"].item().decode("utf8")
  42. assert (str == line[count])
  43. count += 1
  44. assert (count == 5)
  45. def test_textline_dataset_num_samples():
  46. data = ds.TextFileDataset(DATA_FILE, num_samples=2)
  47. count = 0
  48. for i in data.create_dict_iterator():
  49. count += 1
  50. assert (count == 2)
  51. def test_textline_dataset_distribution():
  52. data = ds.TextFileDataset(DATA_ALL_FILE, num_shards=2, shard_id=1)
  53. count = 0
  54. for i in data.create_dict_iterator():
  55. count += 1
  56. assert (count == 3)
  57. def test_textline_dataset_repeat():
  58. data = ds.TextFileDataset(DATA_FILE, shuffle=False)
  59. data = data.repeat(3)
  60. count = 0
  61. line = ["This is a text file.", "Be happy every day.", "Good luck to everyone.",
  62. "This is a text file.", "Be happy every day.", "Good luck to everyone.",
  63. "This is a text file.", "Be happy every day.", "Good luck to everyone."]
  64. for i in data.create_dict_iterator():
  65. str = i["text"].item().decode("utf8")
  66. assert (str == line[count])
  67. count += 1
  68. assert (count == 9)
  69. def test_textline_dataset_get_datasetsize():
  70. data = ds.TextFileDataset(DATA_FILE)
  71. size = data.get_dataset_size()
  72. assert (size == 3)
  73. if __name__ == "__main__":
  74. test_textline_dataset_one_file()
  75. test_textline_dataset_all_file()
  76. test_textline_dataset_totext()
  77. test_textline_dataset_num_samples()
  78. test_textline_dataset_distribution()
  79. test_textline_dataset_repeat()
  80. test_textline_dataset_get_datasetsize()