You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_minddataset_padded.py 34 kB

5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """
  16. This is the test module for mindrecord
  17. """
  18. import collections
  19. import os
  20. import re
  21. import string
  22. import numpy as np
  23. import pytest
  24. import mindspore.dataset as ds
  25. from mindspore import log as logger
  26. from mindspore.mindrecord import FileWriter
  27. FILES_NUM = 4
  28. CV_DIR_NAME = "../data/mindrecord/testImageNetData"
  29. NLP_FILE_POS = "../data/mindrecord/testAclImdbData/pos"
  30. NLP_FILE_VOCAB = "../data/mindrecord/testAclImdbData/vocab.txt"
  31. @pytest.fixture
  32. def add_and_remove_cv_file():
  33. """add/remove cv file"""
  34. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  35. paths = ["{}{}".format(file_name, str(x).rjust(1, '0'))
  36. for x in range(FILES_NUM)]
  37. try:
  38. for x in paths:
  39. os.remove("{}".format(x)) if os.path.exists("{}".format(x)) else None
  40. os.remove("{}.db".format(x)) if os.path.exists(
  41. "{}.db".format(x)) else None
  42. writer = FileWriter(file_name, FILES_NUM)
  43. data = get_data(CV_DIR_NAME)
  44. cv_schema_json = {"id": {"type": "int32"},
  45. "file_name": {"type": "string"},
  46. "label": {"type": "int32"},
  47. "data": {"type": "bytes"}}
  48. writer.add_schema(cv_schema_json, "img_schema")
  49. writer.add_index(["file_name", "label"])
  50. writer.write_raw_data(data)
  51. writer.commit()
  52. yield "yield_cv_data"
  53. except Exception as error:
  54. for x in paths:
  55. os.remove("{}".format(x))
  56. os.remove("{}.db".format(x))
  57. raise error
  58. else:
  59. for x in paths:
  60. os.remove("{}".format(x))
  61. os.remove("{}.db".format(x))
  62. @pytest.fixture
  63. def add_and_remove_nlp_file():
  64. """add/remove nlp file"""
  65. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  66. paths = ["{}{}".format(file_name, str(x).rjust(1, '0'))
  67. for x in range(FILES_NUM)]
  68. try:
  69. for x in paths:
  70. if os.path.exists("{}".format(x)):
  71. os.remove("{}".format(x))
  72. if os.path.exists("{}.db".format(x)):
  73. os.remove("{}.db".format(x))
  74. writer = FileWriter(file_name, FILES_NUM)
  75. data = [x for x in get_nlp_data(NLP_FILE_POS, NLP_FILE_VOCAB, 10)]
  76. nlp_schema_json = {"id": {"type": "string"}, "label": {"type": "int32"},
  77. "rating": {"type": "float32"},
  78. "input_ids": {"type": "int64",
  79. "shape": [-1]},
  80. "input_mask": {"type": "int64",
  81. "shape": [1, -1]},
  82. "segment_ids": {"type": "int64",
  83. "shape": [2, -1]}
  84. }
  85. writer.set_header_size(1 << 14)
  86. writer.set_page_size(1 << 15)
  87. writer.add_schema(nlp_schema_json, "nlp_schema")
  88. writer.add_index(["id", "rating"])
  89. writer.write_raw_data(data)
  90. writer.commit()
  91. yield "yield_nlp_data"
  92. except Exception as error:
  93. for x in paths:
  94. os.remove("{}".format(x))
  95. os.remove("{}.db".format(x))
  96. raise error
  97. else:
  98. for x in paths:
  99. os.remove("{}".format(x))
  100. os.remove("{}.db".format(x))
  101. def test_cv_minddataset_reader_basic_padded_samples(add_and_remove_cv_file):
  102. """tutorial for cv minderdataset."""
  103. columns_list = ["label", "file_name", "data"]
  104. data = get_data(CV_DIR_NAME)
  105. padded_sample = data[0]
  106. padded_sample['label'] = -1
  107. padded_sample['file_name'] = 'dummy.jpg'
  108. num_readers = 4
  109. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  110. data_set = ds.MindDataset(file_name + "0", columns_list, num_readers, padded_sample=padded_sample, num_padded=5)
  111. assert data_set.get_dataset_size() == 15
  112. num_iter = 0
  113. num_padded_iter = 0
  114. for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
  115. logger.info("-------------- cv reader basic: {} ------------------------".format(num_iter))
  116. logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
  117. logger.info("-------------- item[label]: {} ----------------------------".format(item["label"]))
  118. if item['label'] == -1:
  119. num_padded_iter += 1
  120. assert item['file_name'] == bytes(padded_sample['file_name'],
  121. encoding='utf8')
  122. assert item['label'] == padded_sample['label']
  123. assert (item['data'] == np.array(list(padded_sample['data']))).all()
  124. num_iter += 1
  125. assert num_padded_iter == 5
  126. assert num_iter == 15
  127. def test_cv_minddataset_reader_basic_padded_samples_type_cast(add_and_remove_cv_file):
  128. """tutorial for cv minderdataset."""
  129. columns_list = ["label", "file_name", "data"]
  130. data = get_data(CV_DIR_NAME)
  131. padded_sample = data[0]
  132. padded_sample['label'] = -1
  133. padded_sample['file_name'] = 99999
  134. num_readers = 4
  135. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  136. data_set = ds.MindDataset(file_name + "0", columns_list, num_readers, padded_sample=padded_sample, num_padded=5)
  137. assert data_set.get_dataset_size() == 15
  138. num_iter = 0
  139. num_padded_iter = 0
  140. for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
  141. logger.info("-------------- cv reader basic: {} ------------------------".format(num_iter))
  142. logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
  143. logger.info("-------------- item[label]: {} ----------------------------".format(item["label"]))
  144. if item['label'] == -1:
  145. num_padded_iter += 1
  146. assert item['file_name'] == bytes(str(padded_sample['file_name']),
  147. encoding='utf8')
  148. assert item['label'] == padded_sample['label']
  149. assert (item['data'] == np.array(list(padded_sample['data']))).all()
  150. num_iter += 1
  151. assert num_padded_iter == 5
  152. assert num_iter == 15
  153. def test_cv_minddataset_partition_padded_samples(add_and_remove_cv_file):
  154. """tutorial for cv minddataset."""
  155. columns_list = ["data", "file_name", "label"]
  156. data = get_data(CV_DIR_NAME)
  157. padded_sample = data[0]
  158. padded_sample['label'] = -2
  159. padded_sample['file_name'] = 'dummy.jpg'
  160. num_readers = 4
  161. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  162. def partitions(num_shards, num_padded, dataset_size):
  163. num_padded_iter = 0
  164. num_iter = 0
  165. for partition_id in range(num_shards):
  166. data_set = ds.MindDataset(file_name + "0", columns_list, num_readers,
  167. num_shards=num_shards,
  168. shard_id=partition_id,
  169. padded_sample=padded_sample,
  170. num_padded=num_padded)
  171. assert data_set.get_dataset_size() == dataset_size
  172. for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
  173. logger.info("-------------- partition : {} ------------------------".format(partition_id))
  174. logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"])))
  175. logger.info("-------------- item[data]: {} -----------------------------".format(item["data"]))
  176. logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
  177. logger.info("-------------- item[label]: {} -----------------------".format(item["label"]))
  178. if item['label'] == -2:
  179. num_padded_iter += 1
  180. assert item['file_name'] == bytes(padded_sample['file_name'], encoding='utf8')
  181. assert item['label'] == padded_sample['label']
  182. assert (item['data'] == np.array(list(padded_sample['data']))).all()
  183. num_iter += 1
  184. assert num_padded_iter == num_padded
  185. return num_iter == dataset_size * num_shards
  186. partitions(4, 2, 3)
  187. partitions(5, 5, 3)
  188. partitions(9, 8, 2)
  189. def test_cv_minddataset_partition_padded_samples_multi_epoch(add_and_remove_cv_file):
  190. """tutorial for cv minddataset."""
  191. columns_list = ["data", "file_name", "label"]
  192. data = get_data(CV_DIR_NAME)
  193. padded_sample = data[0]
  194. padded_sample['label'] = -2
  195. padded_sample['file_name'] = 'dummy.jpg'
  196. num_readers = 4
  197. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  198. def partitions(num_shards, num_padded, dataset_size):
  199. repeat_size = 5
  200. num_padded_iter = 0
  201. num_iter = 0
  202. for partition_id in range(num_shards):
  203. epoch1_shuffle_result = []
  204. epoch2_shuffle_result = []
  205. epoch3_shuffle_result = []
  206. epoch4_shuffle_result = []
  207. epoch5_shuffle_result = []
  208. data_set = ds.MindDataset(file_name + "0", columns_list, num_readers,
  209. num_shards=num_shards,
  210. shard_id=partition_id,
  211. padded_sample=padded_sample,
  212. num_padded=num_padded)
  213. assert data_set.get_dataset_size() == dataset_size
  214. data_set = data_set.repeat(repeat_size)
  215. local_index = 0
  216. for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
  217. logger.info("-------------- partition : {} ------------------------".format(partition_id))
  218. logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"])))
  219. logger.info("-------------- item[data]: {} -----------------------------".format(item["data"]))
  220. logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
  221. logger.info("-------------- item[label]: {} -----------------------".format(item["label"]))
  222. if item['label'] == -2:
  223. num_padded_iter += 1
  224. assert item['file_name'] == bytes(padded_sample['file_name'], encoding='utf8')
  225. assert item['label'] == padded_sample['label']
  226. assert (item['data'] == np.array(list(padded_sample['data']))).all()
  227. if local_index < dataset_size:
  228. epoch1_shuffle_result.append(item["file_name"])
  229. elif local_index < dataset_size * 2:
  230. epoch2_shuffle_result.append(item["file_name"])
  231. elif local_index < dataset_size * 3:
  232. epoch3_shuffle_result.append(item["file_name"])
  233. elif local_index < dataset_size * 4:
  234. epoch4_shuffle_result.append(item["file_name"])
  235. elif local_index < dataset_size * 5:
  236. epoch5_shuffle_result.append(item["file_name"])
  237. local_index += 1
  238. num_iter += 1
  239. assert len(epoch1_shuffle_result) == dataset_size
  240. assert len(epoch2_shuffle_result) == dataset_size
  241. assert len(epoch3_shuffle_result) == dataset_size
  242. assert len(epoch4_shuffle_result) == dataset_size
  243. assert len(epoch5_shuffle_result) == dataset_size
  244. assert local_index == dataset_size * repeat_size
  245. # When dataset_size is equal to 2, too high probability is the same result after shuffle operation
  246. if dataset_size > 2:
  247. assert epoch1_shuffle_result != epoch2_shuffle_result
  248. assert epoch2_shuffle_result != epoch3_shuffle_result
  249. assert epoch3_shuffle_result != epoch4_shuffle_result
  250. assert epoch4_shuffle_result != epoch5_shuffle_result
  251. assert num_padded_iter == num_padded * repeat_size
  252. assert num_iter == dataset_size * num_shards * repeat_size
  253. partitions(4, 2, 3)
  254. partitions(5, 5, 3)
  255. partitions(9, 8, 2)
  256. def test_cv_minddataset_partition_padded_samples_no_dividsible(add_and_remove_cv_file):
  257. """tutorial for cv minddataset."""
  258. columns_list = ["data", "file_name", "label"]
  259. data = get_data(CV_DIR_NAME)
  260. padded_sample = data[0]
  261. padded_sample['label'] = -2
  262. padded_sample['file_name'] = 'dummy.jpg'
  263. num_readers = 4
  264. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  265. def partitions(num_shards, num_padded):
  266. for partition_id in range(num_shards):
  267. data_set = ds.MindDataset(file_name + "0", columns_list, num_readers,
  268. num_shards=num_shards,
  269. shard_id=partition_id,
  270. padded_sample=padded_sample,
  271. num_padded=num_padded)
  272. num_iter = 0
  273. for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
  274. num_iter += 1
  275. return num_iter
  276. with pytest.raises(RuntimeError):
  277. partitions(4, 1)
  278. def test_cv_minddataset_partition_padded_samples_dataset_size_no_divisible(add_and_remove_cv_file):
  279. columns_list = ["data", "file_name", "label"]
  280. data = get_data(CV_DIR_NAME)
  281. padded_sample = data[0]
  282. padded_sample['label'] = -2
  283. padded_sample['file_name'] = 'dummy.jpg'
  284. num_readers = 4
  285. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  286. def partitions(num_shards, num_padded):
  287. for partition_id in range(num_shards):
  288. data_set = ds.MindDataset(file_name + "0", columns_list, num_readers,
  289. num_shards=num_shards,
  290. shard_id=partition_id,
  291. padded_sample=padded_sample,
  292. num_padded=num_padded)
  293. with pytest.raises(RuntimeError):
  294. data_set.get_dataset_size() == 3
  295. partitions(4, 1)
  296. def test_cv_minddataset_partition_padded_samples_no_equal_column_list(add_and_remove_cv_file):
  297. columns_list = ["data", "file_name", "label"]
  298. data = get_data(CV_DIR_NAME)
  299. padded_sample = data[0]
  300. padded_sample.pop('label', None)
  301. padded_sample['file_name'] = 'dummy.jpg'
  302. num_readers = 4
  303. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  304. def partitions(num_shards, num_padded):
  305. for partition_id in range(num_shards):
  306. data_set = ds.MindDataset(file_name + "0", columns_list, num_readers,
  307. num_shards=num_shards,
  308. shard_id=partition_id,
  309. padded_sample=padded_sample,
  310. num_padded=num_padded)
  311. for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
  312. logger.info("-------------- partition : {} ------------------------".format(partition_id))
  313. logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"])))
  314. logger.info("-------------- item[data]: {} -----------------------------".format(item["data"]))
  315. logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
  316. with pytest.raises(Exception, match="padded_sample cannot match columns_list."):
  317. partitions(4, 2)
  318. def test_cv_minddataset_partition_padded_samples_no_column_list(add_and_remove_cv_file):
  319. data = get_data(CV_DIR_NAME)
  320. padded_sample = data[0]
  321. padded_sample['label'] = -2
  322. padded_sample['file_name'] = 'dummy.jpg'
  323. num_readers = 4
  324. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  325. def partitions(num_shards, num_padded):
  326. for partition_id in range(num_shards):
  327. data_set = ds.MindDataset(file_name + "0", None, num_readers,
  328. num_shards=num_shards,
  329. shard_id=partition_id,
  330. padded_sample=padded_sample,
  331. num_padded=num_padded)
  332. for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
  333. logger.info("-------------- partition : {} ------------------------".format(partition_id))
  334. logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"])))
  335. logger.info("-------------- item[data]: {} -----------------------------".format(item["data"]))
  336. logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
  337. with pytest.raises(Exception, match="padded_sample is specified and requires columns_list as well."):
  338. partitions(4, 2)
  339. def test_cv_minddataset_partition_padded_samples_no_num_padded(add_and_remove_cv_file):
  340. columns_list = ["data", "file_name", "label"]
  341. data = get_data(CV_DIR_NAME)
  342. padded_sample = data[0]
  343. padded_sample['file_name'] = 'dummy.jpg'
  344. num_readers = 4
  345. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  346. def partitions(num_shards, num_padded):
  347. for partition_id in range(num_shards):
  348. data_set = ds.MindDataset(file_name + "0", None, num_readers,
  349. num_shards=num_shards,
  350. shard_id=partition_id,
  351. padded_sample=padded_sample)
  352. for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
  353. logger.info("-------------- partition : {} ------------------------".format(partition_id))
  354. logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"])))
  355. logger.info("-------------- item[data]: {} -----------------------------".format(item["data"]))
  356. logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
  357. with pytest.raises(Exception, match="padded_sample is specified and requires num_padded as well."):
  358. partitions(4, 2)
  359. def test_cv_minddataset_partition_padded_samples_no_padded_samples(add_and_remove_cv_file):
  360. columns_list = ["data", "file_name", "label"]
  361. data = get_data(CV_DIR_NAME)
  362. padded_sample = data[0]
  363. padded_sample['file_name'] = 'dummy.jpg'
  364. num_readers = 4
  365. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  366. def partitions(num_shards, num_padded):
  367. for partition_id in range(num_shards):
  368. data_set = ds.MindDataset(file_name + "0", None, num_readers,
  369. num_shards=num_shards,
  370. shard_id=partition_id,
  371. num_padded=num_padded)
  372. for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
  373. logger.info("-------------- partition : {} ------------------------".format(partition_id))
  374. logger.info("-------------- len(item[data]): {} ------------------------".format(len(item["data"])))
  375. logger.info("-------------- item[data]: {} -----------------------------".format(item["data"]))
  376. logger.info("-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
  377. with pytest.raises(Exception, match="num_padded is specified but padded_sample is not."):
  378. partitions(4, 2)
  379. def test_nlp_minddataset_reader_basic_padded_samples(add_and_remove_nlp_file):
  380. columns_list = ["input_ids", "id", "rating"]
  381. data = [x for x in get_nlp_data(NLP_FILE_POS, NLP_FILE_VOCAB, 10)]
  382. padded_sample = data[0]
  383. padded_sample['id'] = "-1"
  384. padded_sample['input_ids'] = np.array([-1, -1, -1, -1], dtype=np.int64)
  385. padded_sample['rating'] = 1.0
  386. num_readers = 4
  387. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  388. def partitions(num_shards, num_padded, dataset_size):
  389. num_padded_iter = 0
  390. num_iter = 0
  391. for partition_id in range(num_shards):
  392. data_set = ds.MindDataset(file_name + "0", columns_list, num_readers,
  393. num_shards=num_shards,
  394. shard_id=partition_id,
  395. padded_sample=padded_sample,
  396. num_padded=num_padded)
  397. assert data_set.get_dataset_size() == dataset_size
  398. for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
  399. logger.info("-------------- item[id]: {} ------------------------".format(item["id"]))
  400. logger.info("-------------- item[rating]: {} --------------------".format(item["rating"]))
  401. logger.info("-------------- item[input_ids]: {}, shape: {} -----------------".format(
  402. item["input_ids"],
  403. item["input_ids"].shape))
  404. if item['id'] == bytes('-1', encoding='utf-8'):
  405. num_padded_iter += 1
  406. assert item['id'] == bytes(padded_sample['id'], encoding='utf-8')
  407. assert (item['input_ids'] == padded_sample['input_ids']).all()
  408. assert (item['rating'] == padded_sample['rating']).all()
  409. num_iter += 1
  410. assert num_padded_iter == num_padded
  411. assert num_iter == dataset_size * num_shards
  412. partitions(4, 6, 4)
  413. partitions(5, 5, 3)
  414. partitions(9, 8, 2)
  415. def test_nlp_minddataset_reader_basic_padded_samples_multi_epoch(add_and_remove_nlp_file):
  416. columns_list = ["input_ids", "id", "rating"]
  417. data = [x for x in get_nlp_data(NLP_FILE_POS, NLP_FILE_VOCAB, 10)]
  418. padded_sample = data[0]
  419. padded_sample['id'] = "-1"
  420. padded_sample['input_ids'] = np.array([-1, -1, -1, -1], dtype=np.int64)
  421. padded_sample['rating'] = 1.0
  422. num_readers = 4
  423. repeat_size = 3
  424. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  425. def partitions(num_shards, num_padded, dataset_size):
  426. num_padded_iter = 0
  427. num_iter = 0
  428. for partition_id in range(num_shards):
  429. epoch1_shuffle_result = []
  430. epoch2_shuffle_result = []
  431. epoch3_shuffle_result = []
  432. data_set = ds.MindDataset(file_name + "0", columns_list, num_readers,
  433. num_shards=num_shards,
  434. shard_id=partition_id,
  435. padded_sample=padded_sample,
  436. num_padded=num_padded)
  437. assert data_set.get_dataset_size() == dataset_size
  438. data_set = data_set.repeat(repeat_size)
  439. local_index = 0
  440. for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
  441. logger.info("-------------- item[id]: {} ------------------------".format(item["id"]))
  442. logger.info("-------------- item[rating]: {} --------------------".format(item["rating"]))
  443. logger.info("-------------- item[input_ids]: {}, shape: {} -----------------".format(
  444. item["input_ids"],
  445. item["input_ids"].shape))
  446. if item['id'] == bytes('-1', encoding='utf-8'):
  447. num_padded_iter += 1
  448. assert item['id'] == bytes(padded_sample['id'], encoding='utf-8')
  449. assert (item['input_ids'] == padded_sample['input_ids']).all()
  450. assert (item['rating'] == padded_sample['rating']).all()
  451. if local_index < dataset_size:
  452. epoch1_shuffle_result.append(item['id'])
  453. elif local_index < dataset_size * 2:
  454. epoch2_shuffle_result.append(item['id'])
  455. elif local_index < dataset_size * 3:
  456. epoch3_shuffle_result.append(item['id'])
  457. local_index += 1
  458. num_iter += 1
  459. assert len(epoch1_shuffle_result) == dataset_size
  460. assert len(epoch2_shuffle_result) == dataset_size
  461. assert len(epoch3_shuffle_result) == dataset_size
  462. assert local_index == dataset_size * repeat_size
  463. # When dataset_size is equal to 2, too high probability is the same result after shuffle operation
  464. if dataset_size > 2:
  465. assert epoch1_shuffle_result != epoch2_shuffle_result
  466. assert epoch2_shuffle_result != epoch3_shuffle_result
  467. assert num_padded_iter == num_padded * repeat_size
  468. assert num_iter == dataset_size * num_shards * repeat_size
  469. partitions(4, 6, 4)
  470. partitions(5, 5, 3)
  471. partitions(9, 8, 2)
  472. def test_nlp_minddataset_reader_basic_padded_samples_check_whole_reshuffle_result_per_epoch(add_and_remove_nlp_file):
  473. columns_list = ["input_ids", "id", "rating"]
  474. padded_sample = {}
  475. padded_sample['id'] = "-1"
  476. padded_sample['input_ids'] = np.array([-1, -1, -1, -1], dtype=np.int64)
  477. padded_sample['rating'] = 1.0
  478. num_readers = 4
  479. repeat_size = 3
  480. file_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
  481. def partitions(num_shards, num_padded, dataset_size):
  482. num_padded_iter = 0
  483. num_iter = 0
  484. epoch_result = [[["" for i in range(dataset_size)] for i in range(repeat_size)] for i in range(num_shards)]
  485. for partition_id in range(num_shards):
  486. data_set = ds.MindDataset(file_name + "0", columns_list, num_readers,
  487. num_shards=num_shards,
  488. shard_id=partition_id,
  489. padded_sample=padded_sample,
  490. num_padded=num_padded)
  491. assert data_set.get_dataset_size() == dataset_size
  492. data_set = data_set.repeat(repeat_size)
  493. inner_num_iter = 0
  494. for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
  495. logger.info("-------------- item[id]: {} ------------------------".format(item["id"]))
  496. logger.info("-------------- item[rating]: {} --------------------".format(item["rating"]))
  497. logger.info("-------------- item[input_ids]: {}, shape: {} -----------------"
  498. .format(item["input_ids"], item["input_ids"].shape))
  499. if item['id'] == bytes('-1', encoding='utf-8'):
  500. num_padded_iter += 1
  501. assert item['id'] == bytes(padded_sample['id'], encoding='utf-8')
  502. assert (item['input_ids'] == padded_sample['input_ids']).all()
  503. assert (item['rating'] == padded_sample['rating']).all()
  504. # save epoch result
  505. epoch_result[partition_id][int(inner_num_iter / dataset_size)][inner_num_iter % dataset_size] = item[
  506. "id"]
  507. num_iter += 1
  508. inner_num_iter += 1
  509. assert epoch_result[partition_id][0] not in (epoch_result[partition_id][1], epoch_result[partition_id][2])
  510. assert epoch_result[partition_id][1] not in (epoch_result[partition_id][0], epoch_result[partition_id][2])
  511. assert epoch_result[partition_id][2] not in (epoch_result[partition_id][1], epoch_result[partition_id][0])
  512. if dataset_size > 2:
  513. epoch_result[partition_id][0].sort()
  514. epoch_result[partition_id][1].sort()
  515. epoch_result[partition_id][2].sort()
  516. assert epoch_result[partition_id][0] != epoch_result[partition_id][1]
  517. assert epoch_result[partition_id][1] != epoch_result[partition_id][2]
  518. assert epoch_result[partition_id][2] != epoch_result[partition_id][0]
  519. assert num_padded_iter == num_padded * repeat_size
  520. assert num_iter == dataset_size * num_shards * repeat_size
  521. partitions(4, 6, 4)
  522. partitions(5, 5, 3)
  523. partitions(9, 8, 2)
  524. def get_data(dir_name):
  525. """
  526. usage: get data from imagenet dataset
  527. params:
  528. dir_name: directory containing folder images and annotation information
  529. """
  530. if not os.path.isdir(dir_name):
  531. raise IOError("Directory {} not exists".format(dir_name))
  532. img_dir = os.path.join(dir_name, "images")
  533. ann_file = os.path.join(dir_name, "annotation.txt")
  534. with open(ann_file, "r") as file_reader:
  535. lines = file_reader.readlines()
  536. data_list = []
  537. for i, line in enumerate(lines):
  538. try:
  539. filename, label = line.split(",")
  540. label = label.strip("\n")
  541. with open(os.path.join(img_dir, filename), "rb") as file_reader:
  542. img = file_reader.read()
  543. data_json = {"id": i,
  544. "file_name": filename,
  545. "data": img,
  546. "label": int(label)}
  547. data_list.append(data_json)
  548. except FileNotFoundError:
  549. continue
  550. return data_list
  551. def get_nlp_data(dir_name, vocab_file, num):
  552. """
  553. Return raw data of aclImdb dataset.
  554. Args:
  555. dir_name (str): String of aclImdb dataset's path.
  556. vocab_file (str): String of dictionary's path.
  557. num (int): Number of sample.
  558. Returns:
  559. List
  560. """
  561. if not os.path.isdir(dir_name):
  562. raise IOError("Directory {} not exists".format(dir_name))
  563. for root, dirs, files in os.walk(dir_name):
  564. for index, file_name_extension in enumerate(files):
  565. if index < num:
  566. file_path = os.path.join(root, file_name_extension)
  567. file_name, _ = file_name_extension.split('.', 1)
  568. id_, rating = file_name.split('_', 1)
  569. with open(file_path, 'r') as f:
  570. raw_content = f.read()
  571. dictionary = load_vocab(vocab_file)
  572. vectors = [dictionary.get('[CLS]')]
  573. vectors += [dictionary.get(i) if i in dictionary
  574. else dictionary.get('[UNK]')
  575. for i in re.findall(r"[\w']+|[{}]"
  576. .format(string.punctuation),
  577. raw_content)]
  578. vectors += [dictionary.get('[SEP]')]
  579. input_, mask, segment = inputs(vectors)
  580. input_ids = np.reshape(np.array(input_), [-1])
  581. input_mask = np.reshape(np.array(mask), [1, -1])
  582. segment_ids = np.reshape(np.array(segment), [2, -1])
  583. data = {
  584. "label": 1,
  585. "id": id_,
  586. "rating": float(rating),
  587. "input_ids": input_ids,
  588. "input_mask": input_mask,
  589. "segment_ids": segment_ids
  590. }
  591. yield data
  592. def convert_to_uni(text):
  593. if isinstance(text, str):
  594. return text
  595. if isinstance(text, bytes):
  596. return text.decode('utf-8', 'ignore')
  597. raise Exception("The type %s does not convert!" % type(text))
  598. def load_vocab(vocab_file):
  599. """load vocabulary to translate statement."""
  600. vocab = collections.OrderedDict()
  601. vocab.setdefault('blank', 2)
  602. index = 0
  603. with open(vocab_file) as reader:
  604. while True:
  605. tmp = reader.readline()
  606. if not tmp:
  607. break
  608. token = convert_to_uni(tmp)
  609. token = token.strip()
  610. vocab[token] = index
  611. index += 1
  612. return vocab
  613. def inputs(vectors, maxlen=50):
  614. length = len(vectors)
  615. if length > maxlen:
  616. return vectors[0:maxlen], [1] * maxlen, [0] * maxlen
  617. input_ = vectors + [0] * (maxlen - length)
  618. mask = [1] * length + [0] * (maxlen - length)
  619. segment = [0] * maxlen
  620. return input_, mask, segment
  621. if __name__ == '__main__':
  622. test_cv_minddataset_reader_basic_padded_samples(add_and_remove_cv_file)
  623. test_cv_minddataset_partition_padded_samples(add_and_remove_cv_file)
  624. test_cv_minddataset_partition_padded_samples_multi_epoch(add_and_remove_cv_file)
  625. test_cv_minddataset_partition_padded_samples_no_dividsible(add_and_remove_cv_file)
  626. test_cv_minddataset_partition_padded_samples_dataset_size_no_divisible(add_and_remove_cv_file)
  627. test_cv_minddataset_partition_padded_samples_no_equal_column_list(add_and_remove_cv_file)
  628. test_cv_minddataset_partition_padded_samples_no_column_list(add_and_remove_cv_file)
  629. test_cv_minddataset_partition_padded_samples_no_num_padded(add_and_remove_cv_file)
  630. test_cv_minddataset_partition_padded_samples_no_padded_samples(add_and_remove_cv_file)
  631. test_nlp_minddataset_reader_basic_padded_samples(add_and_remove_nlp_file)
  632. test_nlp_minddataset_reader_basic_padded_samples_multi_epoch(add_and_remove_nlp_file)
  633. test_nlp_minddataset_reader_basic_padded_samples_check_whole_reshuffle_result_per_epoch(add_and_remove_nlp_file)