You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_datasets_mnist.py 11 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """
  16. Test Mnist dataset operators
  17. """
  18. import os
  19. import pytest
  20. import numpy as np
  21. import matplotlib.pyplot as plt
  22. import mindspore.dataset as ds
  23. import mindspore.dataset.vision.c_transforms as vision
  24. from mindspore import log as logger
  25. DATA_DIR = "../data/dataset/testMnistData"
  26. def load_mnist(path):
  27. """
  28. load Mnist data
  29. """
  30. labels_path = os.path.join(path, 't10k-labels-idx1-ubyte')
  31. images_path = os.path.join(path, 't10k-images-idx3-ubyte')
  32. with open(labels_path, 'rb') as lbpath:
  33. lbpath.read(8)
  34. labels = np.fromfile(lbpath, dtype=np.uint8)
  35. with open(images_path, 'rb') as imgpath:
  36. imgpath.read(16)
  37. images = np.fromfile(imgpath, dtype=np.uint8)
  38. images = images.reshape(-1, 28, 28, 1)
  39. images[images > 0] = 255 # Perform binarization to maintain consistency with our API
  40. return images, labels
  41. def visualize_dataset(images, labels):
  42. """
  43. Helper function to visualize the dataset samples
  44. """
  45. num_samples = len(images)
  46. for i in range(num_samples):
  47. plt.subplot(1, num_samples, i + 1)
  48. plt.imshow(images[i].squeeze(), cmap=plt.cm.gray)
  49. plt.title(labels[i])
  50. plt.show()
  51. def test_mnist_content_check():
  52. """
  53. Validate MnistDataset image readings
  54. """
  55. logger.info("Test MnistDataset Op with content check")
  56. data1 = ds.MnistDataset(DATA_DIR, num_samples=100, shuffle=False)
  57. images, labels = load_mnist(DATA_DIR)
  58. num_iter = 0
  59. # in this example, each dictionary has keys "image" and "label"
  60. image_list, label_list = [], []
  61. for i, data in enumerate(data1.create_dict_iterator(num_epochs=1, output_numpy=True)):
  62. image_list.append(data["image"])
  63. label_list.append("label {}".format(data["label"]))
  64. np.testing.assert_array_equal(data["image"], images[i])
  65. np.testing.assert_array_equal(data["label"], labels[i])
  66. num_iter += 1
  67. assert num_iter == 100
  68. def test_mnist_basic():
  69. """
  70. Validate MnistDataset
  71. """
  72. logger.info("Test MnistDataset Op")
  73. # case 1: test loading whole dataset
  74. data1 = ds.MnistDataset(DATA_DIR)
  75. num_iter1 = 0
  76. for _ in data1.create_dict_iterator(num_epochs=1):
  77. num_iter1 += 1
  78. assert num_iter1 == 10000
  79. # case 2: test num_samples
  80. data2 = ds.MnistDataset(DATA_DIR, num_samples=500)
  81. num_iter2 = 0
  82. for _ in data2.create_dict_iterator(num_epochs=1):
  83. num_iter2 += 1
  84. assert num_iter2 == 500
  85. # case 3: test repeat
  86. data3 = ds.MnistDataset(DATA_DIR, num_samples=200)
  87. data3 = data3.repeat(5)
  88. num_iter3 = 0
  89. for _ in data3.create_dict_iterator(num_epochs=1):
  90. num_iter3 += 1
  91. assert num_iter3 == 1000
  92. # case 4: test batch with drop_remainder=False
  93. data4 = ds.MnistDataset(DATA_DIR, num_samples=100)
  94. assert data4.get_dataset_size() == 100
  95. assert data4.get_batch_size() == 1
  96. data4 = data4.batch(batch_size=7) # drop_remainder is default to be False
  97. assert data4.get_dataset_size() == 15
  98. assert data4.get_batch_size() == 7
  99. num_iter4 = 0
  100. for _ in data4.create_dict_iterator(num_epochs=1):
  101. num_iter4 += 1
  102. assert num_iter4 == 15
  103. # case 5: test batch with drop_remainder=True
  104. data5 = ds.MnistDataset(DATA_DIR, num_samples=100)
  105. assert data5.get_dataset_size() == 100
  106. assert data5.get_batch_size() == 1
  107. data5 = data5.batch(batch_size=7, drop_remainder=True) # the rest of incomplete batch will be dropped
  108. assert data5.get_dataset_size() == 14
  109. assert data5.get_batch_size() == 7
  110. num_iter5 = 0
  111. for _ in data5.create_dict_iterator(num_epochs=1):
  112. num_iter5 += 1
  113. assert num_iter5 == 14
  114. def test_mnist_pk_sampler():
  115. """
  116. Test MnistDataset with PKSampler
  117. """
  118. logger.info("Test MnistDataset Op with PKSampler")
  119. golden = [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4,
  120. 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9]
  121. sampler = ds.PKSampler(3)
  122. data = ds.MnistDataset(DATA_DIR, sampler=sampler)
  123. num_iter = 0
  124. label_list = []
  125. for item in data.create_dict_iterator(num_epochs=1, output_numpy=True):
  126. label_list.append(item["label"])
  127. num_iter += 1
  128. np.testing.assert_array_equal(golden, label_list)
  129. assert num_iter == 30
  130. def test_mnist_sequential_sampler():
  131. """
  132. Test MnistDataset with SequentialSampler
  133. """
  134. logger.info("Test MnistDataset Op with SequentialSampler")
  135. num_samples = 50
  136. sampler = ds.SequentialSampler(num_samples=num_samples)
  137. data1 = ds.MnistDataset(DATA_DIR, sampler=sampler)
  138. data2 = ds.MnistDataset(DATA_DIR, shuffle=False, num_samples=num_samples)
  139. label_list1, label_list2 = [], []
  140. num_iter = 0
  141. for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)):
  142. label_list1.append(item1["label"].asnumpy())
  143. label_list2.append(item2["label"].asnumpy())
  144. num_iter += 1
  145. np.testing.assert_array_equal(label_list1, label_list2)
  146. assert num_iter == num_samples
  147. def test_mnist_exception():
  148. """
  149. Test error cases for MnistDataset
  150. """
  151. logger.info("Test error cases for MnistDataset")
  152. error_msg_1 = "sampler and shuffle cannot be specified at the same time"
  153. with pytest.raises(RuntimeError, match=error_msg_1):
  154. ds.MnistDataset(DATA_DIR, shuffle=False, sampler=ds.PKSampler(3))
  155. error_msg_2 = "sampler and sharding cannot be specified at the same time"
  156. with pytest.raises(RuntimeError, match=error_msg_2):
  157. ds.MnistDataset(DATA_DIR, sampler=ds.PKSampler(3), num_shards=2, shard_id=0)
  158. error_msg_3 = "num_shards is specified and currently requires shard_id as well"
  159. with pytest.raises(RuntimeError, match=error_msg_3):
  160. ds.MnistDataset(DATA_DIR, num_shards=10)
  161. error_msg_4 = "shard_id is specified but num_shards is not"
  162. with pytest.raises(RuntimeError, match=error_msg_4):
  163. ds.MnistDataset(DATA_DIR, shard_id=0)
  164. error_msg_5 = "Input shard_id is not within the required interval"
  165. with pytest.raises(ValueError, match=error_msg_5):
  166. ds.MnistDataset(DATA_DIR, num_shards=5, shard_id=-1)
  167. with pytest.raises(ValueError, match=error_msg_5):
  168. ds.MnistDataset(DATA_DIR, num_shards=5, shard_id=5)
  169. with pytest.raises(ValueError, match=error_msg_5):
  170. ds.MnistDataset(DATA_DIR, num_shards=2, shard_id=5)
  171. error_msg_6 = "num_parallel_workers exceeds"
  172. with pytest.raises(ValueError, match=error_msg_6):
  173. ds.MnistDataset(DATA_DIR, shuffle=False, num_parallel_workers=0)
  174. with pytest.raises(ValueError, match=error_msg_6):
  175. ds.MnistDataset(DATA_DIR, shuffle=False, num_parallel_workers=256)
  176. with pytest.raises(ValueError, match=error_msg_6):
  177. ds.MnistDataset(DATA_DIR, shuffle=False, num_parallel_workers=-2)
  178. error_msg_7 = "Argument shard_id"
  179. with pytest.raises(TypeError, match=error_msg_7):
  180. ds.MnistDataset(DATA_DIR, num_shards=2, shard_id="0")
  181. def exception_func(item):
  182. raise Exception("Error occur!")
  183. error_msg_8 = "The corresponding data files"
  184. with pytest.raises(RuntimeError, match=error_msg_8):
  185. data = ds.MnistDataset(DATA_DIR)
  186. data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
  187. for _ in data.__iter__():
  188. pass
  189. with pytest.raises(RuntimeError, match=error_msg_8):
  190. data = ds.MnistDataset(DATA_DIR)
  191. data = data.map(operations=vision.Decode(), input_columns=["image"], num_parallel_workers=1)
  192. data = data.map(operations=exception_func, input_columns=["image"], num_parallel_workers=1)
  193. for _ in data.__iter__():
  194. pass
  195. with pytest.raises(RuntimeError, match=error_msg_8):
  196. data = ds.MnistDataset(DATA_DIR)
  197. data = data.map(operations=exception_func, input_columns=["label"], num_parallel_workers=1)
  198. for _ in data.__iter__():
  199. pass
  200. def test_mnist_visualize(plot=False):
  201. """
  202. Visualize MnistDataset results
  203. """
  204. logger.info("Test MnistDataset visualization")
  205. data1 = ds.MnistDataset(DATA_DIR, num_samples=10, shuffle=False)
  206. num_iter = 0
  207. image_list, label_list = [], []
  208. for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
  209. image = item["image"]
  210. label = item["label"]
  211. image_list.append(image)
  212. label_list.append("label {}".format(label))
  213. assert isinstance(image, np.ndarray)
  214. assert image.shape == (28, 28, 1)
  215. assert image.dtype == np.uint8
  216. assert label.dtype == np.uint32
  217. num_iter += 1
  218. assert num_iter == 10
  219. if plot:
  220. visualize_dataset(image_list, label_list)
  221. def test_mnist_usage():
  222. """
  223. Validate MnistDataset image readings
  224. """
  225. logger.info("Test MnistDataset usage flag")
  226. def test_config(usage, mnist_path=None):
  227. mnist_path = DATA_DIR if mnist_path is None else mnist_path
  228. try:
  229. data = ds.MnistDataset(mnist_path, usage=usage, shuffle=False)
  230. num_rows = 0
  231. for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):
  232. num_rows += 1
  233. except (ValueError, TypeError, RuntimeError) as e:
  234. return str(e)
  235. return num_rows
  236. assert test_config("test") == 10000
  237. assert test_config("all") == 10000
  238. assert " no valid data matching the dataset API MnistDataset" in test_config("train")
  239. assert "usage is not within the valid set of ['train', 'test', 'all']" in test_config("invalid")
  240. assert "Argument usage with value ['list'] is not of type [<class 'str'>]" in test_config(["list"])
  241. # change this directory to the folder that contains all mnist files
  242. all_files_path = None
  243. # the following tests on the entire datasets
  244. if all_files_path is not None:
  245. assert test_config("train", all_files_path) == 60000
  246. assert test_config("test", all_files_path) == 10000
  247. assert test_config("all", all_files_path) == 70000
  248. assert ds.MnistDataset(all_files_path, usage="train").get_dataset_size() == 60000
  249. assert ds.MnistDataset(all_files_path, usage="test").get_dataset_size() == 10000
  250. assert ds.MnistDataset(all_files_path, usage="all").get_dataset_size() == 70000
  251. if __name__ == '__main__':
  252. test_mnist_content_check()
  253. test_mnist_basic()
  254. test_mnist_pk_sampler()
  255. test_mnist_sequential_sampler()
  256. test_mnist_exception()
  257. test_mnist_visualize(plot=True)
  258. test_mnist_usage()