You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_ms_dataset.py 5.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. # Copyright (c) Alibaba, Inc. and its affiliates.
  2. import unittest
  3. from modelscope.models import Model
  4. from modelscope.msdatasets import MsDataset
  5. from modelscope.preprocessors import SequenceClassificationPreprocessor
  6. from modelscope.preprocessors.base import Preprocessor
  7. from modelscope.utils.constant import DEFAULT_DATASET_NAMESPACE, DownloadMode
  8. from modelscope.utils.test_utils import require_tf, require_torch, test_level
  9. class ImgPreprocessor(Preprocessor):
  10. def __init__(self, *args, **kwargs):
  11. super().__init__(*args, **kwargs)
  12. self.path_field = kwargs.pop('image_path', 'image_path')
  13. self.width = kwargs.pop('width', 'width')
  14. self.height = kwargs.pop('height', 'width')
  15. def __call__(self, data):
  16. import cv2
  17. image_path = data.get(self.path_field)
  18. if not image_path:
  19. return None
  20. img = cv2.imread(image_path)
  21. return {
  22. 'image':
  23. cv2.resize(img,
  24. (data.get(self.height, 128), data.get(self.width, 128)))
  25. }
  26. class MsDatasetTest(unittest.TestCase):
  27. @unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
  28. def test_movie_scene_seg_toydata(self):
  29. ms_ds_train = MsDataset.load('movie_scene_seg_toydata', split='train')
  30. print(ms_ds_train._hf_ds.config_kwargs)
  31. assert next(iter(ms_ds_train.config_kwargs['split_config'].values()))
  32. @unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
  33. def test_coco(self):
  34. ms_ds_train = MsDataset.load(
  35. 'pets_small',
  36. namespace=DEFAULT_DATASET_NAMESPACE,
  37. download_mode=DownloadMode.FORCE_REDOWNLOAD,
  38. split='train')
  39. print(ms_ds_train.config_kwargs)
  40. assert next(iter(ms_ds_train.config_kwargs['split_config'].values()))
  41. @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
  42. def test_ms_csv_basic(self):
  43. ms_ds_train = MsDataset.load(
  44. 'afqmc_small', namespace='userxiaoming', split='train')
  45. print(next(iter(ms_ds_train)))
  46. @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
  47. def test_ds_basic(self):
  48. ms_ds_full = MsDataset.load(
  49. 'xcopa', subset_name='translation-et', namespace='damotest')
  50. ms_ds = MsDataset.load(
  51. 'xcopa',
  52. subset_name='translation-et',
  53. namespace='damotest',
  54. split='test')
  55. print(next(iter(ms_ds_full['test'])))
  56. print(next(iter(ms_ds)))
  57. @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
  58. @require_torch
  59. def test_to_torch_dataset_text(self):
  60. model_id = 'damo/bert-base-sst2'
  61. nlp_model = Model.from_pretrained(model_id)
  62. preprocessor = SequenceClassificationPreprocessor(
  63. nlp_model.model_dir,
  64. first_sequence='premise',
  65. second_sequence=None,
  66. padding='max_length')
  67. ms_ds_train = MsDataset.load(
  68. 'xcopa',
  69. subset_name='translation-et',
  70. namespace='damotest',
  71. split='test')
  72. pt_dataset = ms_ds_train.to_torch_dataset(preprocessors=preprocessor)
  73. import torch
  74. dataloader = torch.utils.data.DataLoader(pt_dataset, batch_size=5)
  75. print(next(iter(dataloader)))
  76. @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
  77. @require_tf
  78. def test_to_tf_dataset_text(self):
  79. import tensorflow as tf
  80. tf.compat.v1.enable_eager_execution()
  81. model_id = 'damo/bert-base-sst2'
  82. nlp_model = Model.from_pretrained(model_id)
  83. preprocessor = SequenceClassificationPreprocessor(
  84. nlp_model.model_dir,
  85. first_sequence='premise',
  86. second_sequence=None)
  87. ms_ds_train = MsDataset.load(
  88. 'xcopa',
  89. subset_name='translation-et',
  90. namespace='damotest',
  91. split='test')
  92. tf_dataset = ms_ds_train.to_tf_dataset(
  93. batch_size=5,
  94. shuffle=True,
  95. preprocessors=preprocessor,
  96. drop_remainder=True)
  97. print(next(iter(tf_dataset)))
  98. @unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
  99. @require_torch
  100. def test_to_torch_dataset_img(self):
  101. ms_image_train = MsDataset.load(
  102. 'fixtures_image_utils', namespace='damotest', split='test')
  103. pt_dataset = ms_image_train.to_torch_dataset(
  104. preprocessors=ImgPreprocessor(image_path='file'))
  105. import torch
  106. dataloader = torch.utils.data.DataLoader(pt_dataset, batch_size=5)
  107. print(next(iter(dataloader)))
  108. @unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
  109. @require_tf
  110. def test_to_tf_dataset_img(self):
  111. import tensorflow as tf
  112. tf.compat.v1.enable_eager_execution()
  113. ms_image_train = MsDataset.load(
  114. 'fixtures_image_utils', namespace='damotest', split='test')
  115. tf_dataset = ms_image_train.to_tf_dataset(
  116. batch_size=5,
  117. shuffle=True,
  118. preprocessors=ImgPreprocessor(image_path='file'),
  119. drop_remainder=True,
  120. )
  121. print(next(iter(tf_dataset)))
  122. if __name__ == '__main__':
  123. unittest.main()