You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_readdir.py 2.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. import mindspore.dataset as ds
  16. from mindspore import log as logger
  17. DATA_DIR = "../data/dataset/test_tf_file_3_images/data"
  18. SCHEMA = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
  19. COLUMNS = ["label"]
  20. GENERATE_GOLDEN = False
  21. def test_case_0():
  22. logger.info("Test 0 readdir")
  23. # apply dataset operations
  24. data1 = ds.engine.Dataset.read_dir(DATA_DIR, SCHEMA, columns_list=None, num_parallel_workers=None,
  25. deterministic_output=True, prefetch_size=None, shuffle=False, seed=None)
  26. i = 0
  27. for item in data1.create_dict_iterator(): # each data is a dictionary
  28. logger.info("item[label] is {}".format(item["label"]))
  29. i = i + 1
  30. assert (i == 3)
  31. def test_case_1():
  32. logger.info("Test 1 readdir")
  33. # apply dataset operations
  34. data1 = ds.engine.Dataset.read_dir(DATA_DIR, SCHEMA, COLUMNS, num_parallel_workers=None,
  35. deterministic_output=True, prefetch_size=None, shuffle=True, seed=None)
  36. i = 0
  37. for item in data1.create_dict_iterator(): # each data is a dictionary
  38. logger.info("item[label] is {}".format(item["label"]))
  39. i = i + 1
  40. assert (i == 3)
  41. def test_case_2():
  42. logger.info("Test 2 readdir")
  43. # apply dataset operations
  44. data1 = ds.engine.Dataset.read_dir(DATA_DIR, SCHEMA, columns_list=None, num_parallel_workers=2,
  45. deterministic_output=False, prefetch_size=16, shuffle=True, seed=10)
  46. i = 0
  47. for item in data1.create_dict_iterator(): # each data is a dictionary
  48. logger.info("item[label] is {}".format(item["label"]))
  49. i = i + 1
  50. assert (i == 3)
  51. if __name__ == "__main__":
  52. test_case_0()
  53. test_case_1()
  54. test_case_2()