You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dataset.py 4.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. create train or eval dataset.
  17. """
  18. import os
  19. import mindspore.common.dtype as mstype
  20. import mindspore.dataset.engine as de
  21. import mindspore.dataset.transforms.vision.c_transforms as C
  22. import mindspore.dataset.transforms.vision.py_transforms as P
  23. import mindspore.dataset.transforms.c_transforms as C2
  24. from mindspore.dataset.transforms.vision import Inter
  25. def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch_size=100, model='ghsotnet'):
  26. """
  27. create a train or eval dataset
  28. Args:
  29. dataset_path(string): the path of dataset.
  30. do_train(bool): whether dataset is used for train or eval.
  31. repeat_num(int): the repeat times of dataset. Default: 1
  32. batch_size(int): the batch size of dataset. Default: 32
  33. Returns:
  34. dataset
  35. """
  36. if platform == "Ascend":
  37. rank_size = int(os.getenv("RANK_SIZE"))
  38. rank_id = int(os.getenv("RANK_ID"))
  39. if rank_size == 1:
  40. ds = de.MindDataset(
  41. dataset_path, num_parallel_workers=8, shuffle=True)
  42. else:
  43. ds = de.MindDataset(dataset_path, num_parallel_workers=8, shuffle=True,
  44. num_shards=rank_size, shard_id=rank_id)
  45. elif platform == "GPU":
  46. if do_train:
  47. from mindspore.communication.management import get_rank, get_group_size
  48. ds = de.MindDataset(dataset_path, num_parallel_workers=8, shuffle=True,
  49. num_shards=get_group_size(), shard_id=get_rank())
  50. else:
  51. ds = de.MindDataset(
  52. dataset_path, num_parallel_workers=8, shuffle=True)
  53. else:
  54. raise ValueError("Unsupport platform.")
  55. resize_height = config.image_height
  56. buffer_size = 1000
  57. # define map operations
  58. resize_crop_op = C.RandomCropDecodeResize(
  59. resize_height, scale=(0.08, 1.0), ratio=(0.75, 1.333))
  60. horizontal_flip_op = C.RandomHorizontalFlip(prob=0.5)
  61. color_op = C.RandomColorAdjust(
  62. brightness=0.4, contrast=0.4, saturation=0.4)
  63. rescale_op = C.Rescale(1/255.0, 0)
  64. normalize_op = C.Normalize(
  65. mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  66. change_swap_op = C.HWC2CHW()
  67. # define python operations
  68. decode_p = P.Decode()
  69. if model == 'ghostnet-600':
  70. s = 274
  71. c = 240
  72. else:
  73. s = 256
  74. c = 224
  75. resize_p = P.Resize(s, interpolation=Inter.BICUBIC)
  76. center_crop_p = P.CenterCrop(c)
  77. totensor = P.ToTensor()
  78. normalize_p = P.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
  79. composeop = P.ComposeOp(
  80. [decode_p, resize_p, center_crop_p, totensor, normalize_p])
  81. if do_train:
  82. trans = [resize_crop_op, horizontal_flip_op, color_op,
  83. rescale_op, normalize_op, change_swap_op]
  84. else:
  85. trans = composeop()
  86. type_cast_op = C2.TypeCast(mstype.int32)
  87. ds = ds.map(input_columns="image", operations=trans,
  88. num_parallel_workers=8)
  89. ds = ds.map(input_columns="label_list",
  90. operations=type_cast_op, num_parallel_workers=8)
  91. # apply shuffle operations
  92. ds = ds.shuffle(buffer_size=buffer_size)
  93. # apply batch operations
  94. ds = ds.batch(batch_size, drop_remainder=True)
  95. # apply dataset repeat operation
  96. ds = ds.repeat(repeat_num)
  97. return ds