You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_random_dataset.py 2.7 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. from pathlib import Path
  16. import mindspore.common.dtype as mstype
  17. import mindspore.dataset as ds
  18. from mindspore import log as logger
  19. # just a basic test with parallel random data op
  20. def test_randomdataset_basic1():
  21. logger.info("Test randomdataset basic")
  22. schema = ds.Schema()
  23. schema.add_column('image', de_type=mstype.uint8, shape=[2])
  24. schema.add_column('label', de_type=mstype.uint8, shape=[1])
  25. # apply dataset operations
  26. ds1 = ds.RandomDataset(schema=schema, num_samples=50, num_parallel_workers=4)
  27. ds1 = ds1.repeat(4)
  28. num_iter = 0
  29. for data in ds1.create_dict_iterator(): # each data is a dictionary
  30. # in this example, each dictionary has keys "image" and "label"
  31. logger.info("{} image: {}".format(num_iter, data["image"]))
  32. logger.info("{} label: {}".format(num_iter, data["label"]))
  33. num_iter += 1
  34. logger.info("Number of data in ds1: ", num_iter)
  35. assert (num_iter == 200)
  36. # Another simple test
  37. def test_randomdataset_basic2():
  38. logger.info("Test randomdataset basic 2")
  39. schema = ds.Schema()
  40. schema.add_column('image', de_type=mstype.uint8,
  41. shape=[640, 480, 3]) # 921600 bytes (a bit less than 1 MB per image)
  42. schema.add_column('label', de_type=mstype.uint8, shape=[1])
  43. # Make up about 10 samples
  44. ds1 = ds.RandomDataset(schema=schema, num_samples=10, num_parallel_workers=1)
  45. # cache size allows for about 4 images since each image just a bit less than 1MB, after that we will have to spill
  46. ds1 = ds1.repeat(4)
  47. num_iter = 0
  48. for data in ds1.create_dict_iterator(): # each data is a dictionary
  49. # in this example, each dictionary has keys "image" and "label"
  50. # logger.info(data["image"])
  51. logger.info("printing the label: {}".format(data["label"]))
  52. num_iter += 1
  53. logger.info("Number of data in ds1: ", num_iter)
  54. assert (num_iter == 40)
  55. if __name__ == '__main__':
  56. test_randomdataset_basic1()
  57. test_randomdataset_basic2()
  58. logger.info('test_randomdataset_basic Ended.\n')