You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dataset.py 5.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. create train or eval dataset.
  17. """
  18. import os
  19. from functools import partial
  20. import mindspore.common.dtype as mstype
  21. import mindspore.dataset.engine as de
  22. import mindspore.dataset.transforms.vision.c_transforms as C
  23. import mindspore.dataset.transforms.c_transforms as C2
  24. import mindspore.dataset.transforms.vision.py_transforms as P
  25. from mindspore.communication.management import init, get_rank, get_group_size
  26. from src.config import quant_set, config_quant, config_noquant
  27. config = config_quant if quant_set.quantization_aware else config_noquant
  28. def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
  29. """
  30. create a train or eval dataset
  31. Args:
  32. dataset_path(string): the path of dataset.
  33. do_train(bool): whether dataset is used for train or eval.
  34. repeat_num(int): the repeat times of dataset. Default: 1
  35. batch_size(int): the batch size of dataset. Default: 32
  36. target(str): the device target. Default: Ascend
  37. Returns:
  38. dataset
  39. """
  40. if target == "Ascend":
  41. device_num = int(os.getenv("RANK_SIZE"))
  42. rank_id = int(os.getenv("RANK_ID"))
  43. else:
  44. init("nccl")
  45. rank_id = get_rank()
  46. device_num = get_group_size()
  47. columns_list = ['image', 'label']
  48. if config.data_load_mode == "mindrecord":
  49. load_func = partial(de.MindDataset, dataset_path, columns_list)
  50. else:
  51. load_func = partial(de.ImageFolderDatasetV2, dataset_path)
  52. if device_num == 1:
  53. ds = load_func(num_parallel_workers=8, shuffle=True)
  54. else:
  55. ds = load_func(num_parallel_workers=8, shuffle=True,
  56. num_shards=device_num, shard_id=rank_id)
  57. image_size = config.image_height
  58. mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
  59. std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
  60. # define map operations
  61. if do_train:
  62. trans = [
  63. C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
  64. C.RandomHorizontalFlip(prob=0.5),
  65. C.Normalize(mean=mean, std=std),
  66. C.HWC2CHW()
  67. ]
  68. else:
  69. trans = [
  70. C.Decode(),
  71. C.Resize(256),
  72. C.CenterCrop(image_size),
  73. C.Normalize(mean=mean, std=std),
  74. C.HWC2CHW()
  75. ]
  76. type_cast_op = C2.TypeCast(mstype.int32)
  77. ds = ds.map(input_columns="image", num_parallel_workers=8, operations=trans)
  78. ds = ds.map(input_columns="label", num_parallel_workers=8, operations=type_cast_op)
  79. # apply batch operations
  80. ds = ds.batch(batch_size, drop_remainder=True)
  81. # apply dataset repeat operation
  82. ds = ds.repeat(repeat_num)
  83. return ds
  84. def create_dataset_py(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
  85. """
  86. create a train or eval dataset
  87. Args:
  88. dataset_path(string): the path of dataset.
  89. do_train(bool): whether dataset is used for train or eval.
  90. repeat_num(int): the repeat times of dataset. Default: 1
  91. batch_size(int): the batch size of dataset. Default: 32
  92. target(str): the device target. Default: Ascend
  93. Returns:
  94. dataset
  95. """
  96. if target == "Ascend":
  97. device_num = int(os.getenv("RANK_SIZE"))
  98. rank_id = int(os.getenv("RANK_ID"))
  99. else:
  100. init("nccl")
  101. rank_id = get_rank()
  102. device_num = get_group_size()
  103. if do_train:
  104. if device_num == 1:
  105. ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True)
  106. else:
  107. ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True,
  108. num_shards=device_num, shard_id=rank_id)
  109. else:
  110. ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=False)
  111. image_size = 224
  112. # define map operations
  113. decode_op = P.Decode()
  114. resize_crop_op = P.RandomResizedCrop(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333))
  115. horizontal_flip_op = P.RandomHorizontalFlip(prob=0.5)
  116. resize_op = P.Resize(256)
  117. center_crop = P.CenterCrop(image_size)
  118. to_tensor = P.ToTensor()
  119. normalize_op = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  120. # define map operations
  121. if do_train:
  122. trans = [decode_op, resize_crop_op, horizontal_flip_op, to_tensor, normalize_op]
  123. else:
  124. trans = [decode_op, resize_op, center_crop, to_tensor, normalize_op]
  125. compose = P.ComposeOp(trans)
  126. ds = ds.map(input_columns="image", operations=compose(), num_parallel_workers=8, python_multiprocessing=True)
  127. # apply batch operations
  128. ds = ds.batch(batch_size, drop_remainder=True)
  129. # apply dataset repeat operation
  130. ds = ds.repeat(repeat_num)
  131. return ds