You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_dataset_numpy_slices.py 8.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. import sys
  16. import pytest
  17. import numpy as np
  18. import pandas as pd
  19. import mindspore.dataset as de
  20. from mindspore import log as logger
  21. import mindspore.dataset.vision.c_transforms as vision
  22. def test_numpy_slices_list_1():
  23. logger.info("Test Slicing a 1D list.")
  24. np_data = [1, 2, 3]
  25. ds = de.NumpySlicesDataset(np_data, shuffle=False)
  26. for i, data in enumerate(ds):
  27. assert data[0].asnumpy() == np_data[i]
  28. def test_numpy_slices_list_2():
  29. logger.info("Test Slicing a 2D list into 1D list.")
  30. np_data = [[1, 2], [3, 4]]
  31. ds = de.NumpySlicesDataset(np_data, column_names=["col1"], shuffle=False)
  32. for i, data in enumerate(ds):
  33. assert np.equal(data[0].asnumpy(), np_data[i]).all()
  34. def test_numpy_slices_list_3():
  35. logger.info("Test Slicing list in the first dimension.")
  36. np_data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
  37. ds = de.NumpySlicesDataset(np_data, column_names=["col1"], shuffle=False)
  38. for i, data in enumerate(ds):
  39. assert np.equal(data[0].asnumpy(), np_data[i]).all()
  40. def test_numpy_slices_list_append():
  41. logger.info("Test reading data of image list.")
  42. DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
  43. resize_height, resize_width = 2, 2
  44. data1 = de.TFRecordDataset(DATA_DIR)
  45. resize_op = vision.Resize((resize_height, resize_width))
  46. data1 = data1.map(operations=[vision.Decode(True), resize_op], input_columns=["image"])
  47. res = []
  48. for data in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
  49. res.append(data["image"])
  50. ds = de.NumpySlicesDataset(res, column_names=["col1"], shuffle=False)
  51. for i, data in enumerate(ds.create_tuple_iterator(output_numpy=True)):
  52. assert np.equal(data, res[i]).all()
  53. def test_numpy_slices_dict_1():
  54. logger.info("Test Dictionary structure data.")
  55. np_data = {"a": [1, 2], "b": [3, 4]}
  56. ds = de.NumpySlicesDataset(np_data, shuffle=False)
  57. res = [[1, 3], [2, 4]]
  58. for i, data in enumerate(ds):
  59. assert data[0].asnumpy() == res[i][0]
  60. assert data[1].asnumpy() == res[i][1]
  61. def test_numpy_slices_tuple_1():
  62. logger.info("Test slicing a list of tuple.")
  63. np_data = [([1, 2], [3, 4]), ([11, 12], [13, 14]), ([21, 22], [23, 24])]
  64. ds = de.NumpySlicesDataset(np_data, shuffle=False)
  65. for i, data in enumerate(ds.create_tuple_iterator(output_numpy=True)):
  66. assert np.equal(data, np_data[i]).all()
  67. assert sum([1 for _ in ds]) == 3
  68. def test_numpy_slices_tuple_2():
  69. logger.info("Test slicing a tuple of list.")
  70. np_data = ([1, 2], [3, 4], [5, 6])
  71. expected = [[1, 3, 5], [2, 4, 6]]
  72. ds = de.NumpySlicesDataset(np_data, shuffle=False)
  73. for i, data in enumerate(ds.create_tuple_iterator(output_numpy=True)):
  74. assert np.equal(data, expected[i]).all()
  75. assert sum([1 for _ in ds]) == 2
  76. def test_numpy_slices_tuple_3():
  77. logger.info("Test reading different dimension of tuple data.")
  78. features, labels = np.random.sample((5, 2)), np.random.sample((5, 1))
  79. data = (features, labels)
  80. ds = de.NumpySlicesDataset(data, column_names=["col1", "col2"], shuffle=False)
  81. for i, data in enumerate(ds):
  82. assert np.equal(data[0].asnumpy(), features[i]).all()
  83. assert data[1].asnumpy() == labels[i]
  84. def test_numpy_slices_csv_value():
  85. logger.info("Test loading value of csv file.")
  86. csv_file = "../data/dataset/testNumpySlicesDataset/heart.csv"
  87. df = pd.read_csv(csv_file)
  88. target = df.pop("target")
  89. df.pop("state")
  90. np_data = (df.values, target.values)
  91. ds = de.NumpySlicesDataset(np_data, column_names=["col1", "col2"], shuffle=False)
  92. for i, data in enumerate(ds):
  93. assert np.equal(np_data[0][i], data[0].asnumpy()).all()
  94. assert np.equal(np_data[1][i], data[1].asnumpy()).all()
  95. def test_numpy_slices_csv_dict():
  96. logger.info("Test loading csv file as dict.")
  97. csv_file = "../data/dataset/testNumpySlicesDataset/heart.csv"
  98. df = pd.read_csv(csv_file)
  99. df.pop("state")
  100. res = df.values
  101. ds = de.NumpySlicesDataset(dict(df), shuffle=False)
  102. for i, data in enumerate(ds.create_tuple_iterator(output_numpy=True)):
  103. assert np.equal(data, res[i]).all()
  104. def test_numpy_slices_num_samplers():
  105. logger.info("Test num_samplers.")
  106. np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
  107. ds = de.NumpySlicesDataset(np_data, shuffle=False, num_samples=2)
  108. for i, data in enumerate(ds):
  109. assert np.equal(data[0].asnumpy(), np_data[i]).all()
  110. assert sum([1 for _ in ds]) == 2
  111. def test_numpy_slices_distributed_sampler():
  112. logger.info("Test distributed sampler.")
  113. np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
  114. ds = de.NumpySlicesDataset(np_data, shuffle=False, shard_id=0, num_shards=4)
  115. for i, data in enumerate(ds):
  116. assert np.equal(data[0].asnumpy(), np_data[i * 4]).all()
  117. assert sum([1 for _ in ds]) == 2
  118. def test_numpy_slices_distributed_shard_limit():
  119. logger.info("Test Slicing a 1D list.")
  120. np_data = [1, 2, 3]
  121. num = sys.maxsize
  122. with pytest.raises(ValueError) as err:
  123. de.NumpySlicesDataset(np_data, num_shards=num, shard_id=0, shuffle=False)
  124. assert "Input num_shards is not within the required interval of (1 to 2147483647)." in str(err.value)
  125. def test_numpy_slices_distributed_zero_shard():
  126. logger.info("Test Slicing a 1D list.")
  127. np_data = [1, 2, 3]
  128. with pytest.raises(ValueError) as err:
  129. de.NumpySlicesDataset(np_data, num_shards=0, shard_id=0, shuffle=False)
  130. assert "Input num_shards is not within the required interval of (1 to 2147483647)." in str(err.value)
  131. def test_numpy_slices_sequential_sampler():
  132. logger.info("Test numpy_slices_dataset with SequentialSampler and repeat.")
  133. np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
  134. ds = de.NumpySlicesDataset(np_data, sampler=de.SequentialSampler()).repeat(2)
  135. for i, data in enumerate(ds):
  136. assert np.equal(data[0].asnumpy(), np_data[i % 8]).all()
  137. def test_numpy_slices_invalid_column_names_type():
  138. logger.info("Test incorrect column_names input")
  139. np_data = [1, 2, 3]
  140. with pytest.raises(TypeError) as err:
  141. de.NumpySlicesDataset(np_data, column_names=[1], shuffle=False)
  142. assert "Argument column_names[0] with value 1 is not of type (<class 'str'>,)." in str(err.value)
  143. def test_numpy_slices_invalid_column_names_string():
  144. logger.info("Test incorrect column_names input")
  145. np_data = [1, 2, 3]
  146. with pytest.raises(ValueError) as err:
  147. de.NumpySlicesDataset(np_data, column_names=[""], shuffle=False)
  148. assert "column_names[0] should not be empty" in str(err.value)
  149. def test_numpy_slices_invalid_empty_column_names():
  150. logger.info("Test incorrect column_names input")
  151. np_data = [1, 2, 3]
  152. with pytest.raises(ValueError) as err:
  153. de.NumpySlicesDataset(np_data, column_names=[], shuffle=False)
  154. assert "column_names should not be empty" in str(err.value)
  155. def test_numpy_slices_invalid_empty_data_column():
  156. logger.info("Test incorrect column_names input")
  157. np_data = []
  158. with pytest.raises(ValueError) as err:
  159. de.NumpySlicesDataset(np_data, shuffle=False)
  160. assert "Argument data cannot be empty" in str(err.value)
  161. if __name__ == "__main__":
  162. test_numpy_slices_list_1()
  163. test_numpy_slices_list_2()
  164. test_numpy_slices_list_3()
  165. test_numpy_slices_list_append()
  166. test_numpy_slices_dict_1()
  167. test_numpy_slices_tuple_1()
  168. test_numpy_slices_tuple_2()
  169. test_numpy_slices_tuple_3()
  170. test_numpy_slices_csv_value()
  171. test_numpy_slices_csv_dict()
  172. test_numpy_slices_num_samplers()
  173. test_numpy_slices_distributed_sampler()
  174. test_numpy_slices_distributed_shard_limit()
  175. test_numpy_slices_distributed_zero_shard()
  176. test_numpy_slices_sequential_sampler()
  177. test_numpy_slices_invalid_column_names_type()
  178. test_numpy_slices_invalid_column_names_string()
  179. test_numpy_slices_invalid_empty_column_names()
  180. test_numpy_slices_invalid_empty_data_column()