|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730 |
- # Copyright 2019 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ==============================================================================
- """
- This is the test module for mindrecord
- """
- import collections
- import json
- import math
- import os
- import re
- import string
- import pytest
- import numpy as np
-
- import mindspore.dataset as ds
- import mindspore.dataset.vision.c_transforms as vision
- from mindspore import log as logger
- from mindspore.dataset.vision import Inter
- from mindspore.mindrecord import FileWriter
-
- FILES_NUM = 4
- CV_FILE_NAME = "../data/mindrecord/imagenet.mindrecord"
- CV1_FILE_NAME = "../data/mindrecord/imagenet1.mindrecord"
- CV2_FILE_NAME = "../data/mindrecord/imagenet2.mindrecord"
- CV_DIR_NAME = "../data/mindrecord/testImageNetData"
- NLP_FILE_NAME = "../data/mindrecord/aclImdb.mindrecord"
- OLD_NLP_FILE_NAME = "../data/mindrecord/testOldVersion/aclImdb.mindrecord"
- NLP_FILE_POS = "../data/mindrecord/testAclImdbData/pos"
- NLP_FILE_VOCAB = "../data/mindrecord/testAclImdbData/vocab.txt"
-
-
- @pytest.fixture
- def add_and_remove_cv_file():
- """add/remove cv file"""
- paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(1, '0'))
- for x in range(FILES_NUM)]
- try:
- for x in paths:
- if os.path.exists("{}".format(x)):
- os.remove("{}".format(x))
- if os.path.exists("{}.db".format(x)):
- os.remove("{}.db".format(x))
- writer = FileWriter(CV_FILE_NAME, FILES_NUM)
- data = get_data(CV_DIR_NAME)
- cv_schema_json = {"id": {"type": "int32"},
- "file_name": {"type": "string"},
- "label": {"type": "int32"},
- "data": {"type": "bytes"}}
- writer.add_schema(cv_schema_json, "img_schema")
- writer.add_index(["file_name", "label"])
- writer.write_raw_data(data)
- writer.commit()
- yield "yield_cv_data"
- except Exception as error:
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
- raise error
- else:
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
-
-
- @pytest.fixture
- def add_and_remove_nlp_file():
- """add/remove nlp file"""
- paths = ["{}{}".format(NLP_FILE_NAME, str(x).rjust(1, '0'))
- for x in range(FILES_NUM)]
- try:
- for x in paths:
- if os.path.exists("{}".format(x)):
- os.remove("{}".format(x))
- if os.path.exists("{}.db".format(x)):
- os.remove("{}.db".format(x))
- writer = FileWriter(NLP_FILE_NAME, FILES_NUM)
- data = [x for x in get_nlp_data(NLP_FILE_POS, NLP_FILE_VOCAB, 10)]
- nlp_schema_json = {"id": {"type": "string"}, "label": {"type": "int32"},
- "rating": {"type": "float32"},
- "input_ids": {"type": "int64",
- "shape": [-1]},
- "input_mask": {"type": "int64",
- "shape": [1, -1]},
- "segment_ids": {"type": "int64",
- "shape": [2, -1]}
- }
- writer.set_header_size(1 << 14)
- writer.set_page_size(1 << 15)
- writer.add_schema(nlp_schema_json, "nlp_schema")
- writer.add_index(["id", "rating"])
- writer.write_raw_data(data)
- writer.commit()
- yield "yield_nlp_data"
- except Exception as error:
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
- raise error
- else:
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
-
-
- @pytest.fixture
- def add_and_remove_nlp_compress_file():
- """add/remove nlp file"""
- paths = ["{}{}".format(NLP_FILE_NAME, str(x).rjust(1, '0'))
- for x in range(FILES_NUM)]
- try:
- for x in paths:
- if os.path.exists("{}".format(x)):
- os.remove("{}".format(x))
- if os.path.exists("{}.db".format(x)):
- os.remove("{}.db".format(x))
- writer = FileWriter(NLP_FILE_NAME, FILES_NUM)
- data = []
- for row_id in range(16):
- data.append({
- "label": row_id,
- "array_a": np.reshape(np.array([0, 1, -1, 127, -128, 128, -129,
- 255, 256, -32768, 32767, -32769, 32768, -2147483648,
- 2147483647], dtype=np.int32), [-1]),
- "array_b": np.reshape(np.array([0, 1, -1, 127, -128, 128, -129, 255,
- 256, -32768, 32767, -32769, 32768,
- -2147483648, 2147483647, -2147483649, 2147483649,
- -922337036854775808, 9223372036854775807]), [1, -1]),
- "array_c": str.encode("nlp data"),
- "array_d": np.reshape(np.array([[-10, -127], [10, 127]]), [2, -1])
- })
- nlp_schema_json = {"label": {"type": "int32"},
- "array_a": {"type": "int32",
- "shape": [-1]},
- "array_b": {"type": "int64",
- "shape": [1, -1]},
- "array_c": {"type": "bytes"},
- "array_d": {"type": "int64",
- "shape": [2, -1]}
- }
- writer.set_header_size(1 << 14)
- writer.set_page_size(1 << 15)
- writer.add_schema(nlp_schema_json, "nlp_schema")
- writer.write_raw_data(data)
- writer.commit()
- yield "yield_nlp_data"
- except Exception as error:
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
- raise error
- else:
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
-
-
- def test_nlp_compress_data(add_and_remove_nlp_compress_file):
- """tutorial for nlp minderdataset."""
- data = []
- for row_id in range(16):
- data.append({
- "label": row_id,
- "array_a": np.reshape(np.array([0, 1, -1, 127, -128, 128, -129,
- 255, 256, -32768, 32767, -32769, 32768, -2147483648,
- 2147483647], dtype=np.int32), [-1]),
- "array_b": np.reshape(np.array([0, 1, -1, 127, -128, 128, -129, 255,
- 256, -32768, 32767, -32769, 32768,
- -2147483648, 2147483647, -2147483649, 2147483649,
- -922337036854775808, 9223372036854775807]), [1, -1]),
- "array_c": str.encode("nlp data"),
- "array_d": np.reshape(np.array([[-10, -127], [10, 127]]), [2, -1])
- })
- num_readers = 1
- data_set = ds.MindDataset(
- NLP_FILE_NAME + "0", None, num_readers, shuffle=False)
- assert data_set.get_dataset_size() == 16
- num_iter = 0
- for x, item in zip(data, data_set.create_dict_iterator(num_epochs=1, output_numpy=True)):
- assert (item["array_a"] == x["array_a"]).all()
- assert (item["array_b"] == x["array_b"]).all()
- assert item["array_c"].tobytes() == x["array_c"]
- assert (item["array_d"] == x["array_d"]).all()
- assert item["label"] == x["label"]
- num_iter += 1
- assert num_iter == 16
-
-
- def test_nlp_compress_data_old_version(add_and_remove_nlp_compress_file):
- """tutorial for nlp minderdataset."""
- num_readers = 1
- data_set = ds.MindDataset(
- NLP_FILE_NAME + "0", None, num_readers, shuffle=False)
- old_data_set = ds.MindDataset(
- OLD_NLP_FILE_NAME + "0", None, num_readers, shuffle=False)
- assert old_data_set.get_dataset_size() == 16
- num_iter = 0
- for x, item in zip(old_data_set.create_dict_iterator(num_epochs=1, output_numpy=True),
- data_set.create_dict_iterator(num_epochs=1, output_numpy=True)):
- assert (item["array_a"] == x["array_a"]).all()
- assert (item["array_b"] == x["array_b"]).all()
- assert (item["array_c"] == x["array_c"]).all()
- assert (item["array_d"] == x["array_d"]).all()
- assert item["label"] == x["label"]
- num_iter += 1
- assert num_iter == 16
-
-
- def test_cv_minddataset_writer_tutorial():
- """tutorial for cv dataset writer."""
- paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(1, '0'))
- for x in range(FILES_NUM)]
- try:
- for x in paths:
- if os.path.exists("{}".format(x)):
- os.remove("{}".format(x))
- if os.path.exists("{}.db".format(x)):
- os.remove("{}.db".format(x))
- writer = FileWriter(CV_FILE_NAME, FILES_NUM)
- data = get_data(CV_DIR_NAME)
- cv_schema_json = {"file_name": {"type": "string"}, "label": {"type": "int32"},
- "data": {"type": "bytes"}}
- writer.add_schema(cv_schema_json, "img_schema")
- writer.add_index(["file_name", "label"])
- writer.write_raw_data(data)
- writer.commit()
- except Exception as error:
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
- raise error
- else:
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
-
-
- def test_cv_minddataset_partition_tutorial(add_and_remove_cv_file):
- """tutorial for cv minddataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
-
- def partitions(num_shards):
- for partition_id in range(num_shards):
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers,
- num_shards=num_shards, shard_id=partition_id)
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info("-------------- partition : {} ------------------------".format(partition_id))
- logger.info("-------------- item[file_name]: {}-----------------------".format(item["file_name"]))
- logger.info("-------------- item[label]: {} -----------------------".format(item["label"]))
- num_iter += 1
- return num_iter
-
- assert partitions(4) == 3
- assert partitions(5) == 2
- assert partitions(9) == 2
-
-
- def test_cv_minddataset_partition_num_samples_0(add_and_remove_cv_file):
- """tutorial for cv minddataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
-
- def partitions(num_shards):
- for partition_id in range(num_shards):
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers,
- num_shards=num_shards,
- shard_id=partition_id, num_samples=1)
-
- assert data_set.get_dataset_size() == 1
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info("-------------- partition : {} ------------------------".format(partition_id))
- logger.info("-------------- item[file_name]: {}-----------------------".format(item["file_name"]))
- logger.info("-------------- item[label]: {} -----------------------".format(item["label"]))
- num_iter += 1
- return num_iter
-
- assert partitions(4) == 1
- assert partitions(5) == 1
- assert partitions(9) == 1
-
-
- def test_cv_minddataset_partition_num_samples_1(add_and_remove_cv_file):
- """tutorial for cv minddataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
-
- def partitions(num_shards):
- for partition_id in range(num_shards):
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers,
- num_shards=num_shards,
- shard_id=partition_id, num_samples=2)
-
- assert data_set.get_dataset_size() == 2
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info("-------------- partition : {} ------------------------".format(partition_id))
- logger.info("-------------- item[file_name]: {}-----------------------".format(item["file_name"]))
- logger.info("-------------- item[label]: {} -----------------------".format(item["label"]))
- num_iter += 1
- return num_iter
-
- assert partitions(4) == 2
- assert partitions(5) == 2
- assert partitions(9) == 2
-
-
- def test_cv_minddataset_partition_num_samples_2(add_and_remove_cv_file):
- """tutorial for cv minddataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
-
- def partitions(num_shards, expect):
- for partition_id in range(num_shards):
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers,
- num_shards=num_shards,
- shard_id=partition_id, num_samples=3)
-
- assert data_set.get_dataset_size() == expect
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info("-------------- partition : {} ------------------------".format(partition_id))
- logger.info("-------------- item[file_name]: {}-----------------------".format(item["file_name"]))
- logger.info("-------------- item[label]: {} -----------------------".format(item["label"]))
- num_iter += 1
- return num_iter
-
- assert partitions(4, 3) == 3
- assert partitions(5, 2) == 2
- assert partitions(9, 2) == 2
-
- def test_cv_minddataset_partition_num_samples_3(add_and_remove_cv_file):
- """tutorial for cv minddataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
-
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers, num_shards=1, shard_id=0, num_samples=5)
-
- assert data_set.get_dataset_size() == 5
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info("-------------- item[file_name]: {}-----------------------".format(item["file_name"]))
- logger.info("-------------- item[label]: {} -----------------------".format(item["label"]))
- num_iter += 1
-
- assert num_iter == 5
-
- def test_cv_minddataset_partition_tutorial_check_shuffle_result(add_and_remove_cv_file):
- """tutorial for cv minddataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
- num_shards = 3
- epoch1 = []
- epoch2 = []
- epoch3 = []
-
- for partition_id in range(num_shards):
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers,
- num_shards=num_shards, shard_id=partition_id)
-
- data_set = data_set.repeat(3)
-
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info("-------------- partition : {} ------------------------".format(partition_id))
- logger.info("-------------- item[file_name]: {}-----------------------".format(item["file_name"]))
- logger.info("-------------- item[label]: {} -----------------------".format(item["label"]))
- num_iter += 1
- if num_iter <= 4:
- epoch1.append(item["file_name"]) # save epoch 1 list
- elif num_iter <= 8:
- epoch2.append(item["file_name"]) # save epoch 2 list
- else:
- epoch3.append(item["file_name"]) # save epoch 3 list
- assert num_iter == 12
- assert len(epoch1) == 4
- assert len(epoch2) == 4
- assert len(epoch3) == 4
- assert epoch1 not in (epoch2, epoch3)
- assert epoch2 not in (epoch1, epoch3)
- assert epoch3 not in (epoch1, epoch2)
- epoch1 = []
- epoch2 = []
- epoch3 = []
-
-
- def test_cv_minddataset_partition_tutorial_check_whole_reshuffle_result_per_epoch(add_and_remove_cv_file):
- """tutorial for cv minddataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
- num_shards = 3
- epoch_result = [[["", "", "", ""], ["", "", "", ""], ["", "", "", ""]], # save partition 0 result
- [["", "", "", ""], ["", "", "", ""], ["", "", "", ""]], # save partition 1 result
- [["", "", "", ""], ["", "", "", ""], ["", "", "", ""]]] # svae partition 2 result
-
- for partition_id in range(num_shards):
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers,
- num_shards=num_shards, shard_id=partition_id)
-
- data_set = data_set.repeat(3)
-
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info("-------------- partition : {} ------------------------".format(partition_id))
- logger.info("-------------- item[file_name]: {}-----------------------".format(item["file_name"]))
- logger.info("-------------- item[label]: {} -----------------------".format(item["label"]))
- # total 3 partition, 4 result per epoch, total 12 result
- epoch_result[partition_id][int(num_iter / 4)][num_iter % 4] = item["file_name"] # save epoch result
- num_iter += 1
- assert num_iter == 12
- assert epoch_result[partition_id][0] not in (epoch_result[partition_id][1], epoch_result[partition_id][2])
- assert epoch_result[partition_id][1] not in (epoch_result[partition_id][0], epoch_result[partition_id][2])
- assert epoch_result[partition_id][2] not in (epoch_result[partition_id][1], epoch_result[partition_id][0])
- epoch_result[partition_id][0].sort()
- epoch_result[partition_id][1].sort()
- epoch_result[partition_id][2].sort()
- assert epoch_result[partition_id][0] != epoch_result[partition_id][1]
- assert epoch_result[partition_id][1] != epoch_result[partition_id][2]
- assert epoch_result[partition_id][2] != epoch_result[partition_id][0]
-
-
- def test_cv_minddataset_check_shuffle_result(add_and_remove_cv_file):
- """tutorial for cv minddataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
-
- ds.config.set_seed(54321)
- epoch1 = []
- epoch2 = []
- epoch3 = []
-
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers)
- data_set = data_set.repeat(3)
-
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info("-------------- item[file_name]: {}-----------------------".format(item["file_name"]))
- logger.info("-------------- item[label]: {} -----------------------".format(item["label"]))
- num_iter += 1
- if num_iter <= 10:
- epoch1.append(item["file_name"]) # save epoch 1 list
- elif num_iter <= 20:
- epoch2.append(item["file_name"]) # save epoch 2 list
- else:
- epoch3.append(item["file_name"]) # save epoch 3 list
- assert num_iter == 30
- assert len(epoch1) == 10
- assert len(epoch2) == 10
- assert len(epoch3) == 10
- assert epoch1 not in (epoch2, epoch3)
- assert epoch2 not in (epoch1, epoch3)
- assert epoch3 not in (epoch1, epoch2)
-
- epoch1_new_dataset = []
- epoch2_new_dataset = []
- epoch3_new_dataset = []
-
- data_set2 = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers)
- data_set2 = data_set2.repeat(3)
-
- num_iter = 0
- for item in data_set2.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info("-------------- item[file_name]: {}-----------------------".format(item["file_name"]))
- logger.info("-------------- item[label]: {} -----------------------".format(item["label"]))
- num_iter += 1
- if num_iter <= 10:
- epoch1_new_dataset.append(item["file_name"]) # save epoch 1 list
- elif num_iter <= 20:
- epoch2_new_dataset.append(item["file_name"]) # save epoch 2 list
- else:
- epoch3_new_dataset.append(item["file_name"]) # save epoch 3 list
- assert num_iter == 30
- assert len(epoch1_new_dataset) == 10
- assert len(epoch2_new_dataset) == 10
- assert len(epoch3_new_dataset) == 10
- assert epoch1_new_dataset not in (epoch2_new_dataset, epoch3_new_dataset)
- assert epoch2_new_dataset not in (epoch1_new_dataset, epoch3_new_dataset)
- assert epoch3_new_dataset not in (epoch1_new_dataset, epoch2_new_dataset)
-
- assert epoch1 == epoch1_new_dataset
- assert epoch2 == epoch2_new_dataset
- assert epoch3 == epoch3_new_dataset
-
- ds.config.set_seed(12345)
- epoch1_new_dataset2 = []
- epoch2_new_dataset2 = []
- epoch3_new_dataset2 = []
-
- data_set3 = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers)
- data_set3 = data_set3.repeat(3)
-
- num_iter = 0
- for item in data_set3.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info("-------------- item[file_name]: {}-----------------------".format(item["file_name"]))
- logger.info("-------------- item[label]: {} -----------------------".format(item["label"]))
- num_iter += 1
- if num_iter <= 10:
- epoch1_new_dataset2.append(item["file_name"]) # save epoch 1 list
- elif num_iter <= 20:
- epoch2_new_dataset2.append(item["file_name"]) # save epoch 2 list
- else:
- epoch3_new_dataset2.append(item["file_name"]) # save epoch 3 list
- assert num_iter == 30
- assert len(epoch1_new_dataset2) == 10
- assert len(epoch2_new_dataset2) == 10
- assert len(epoch3_new_dataset2) == 10
- assert epoch1_new_dataset2 not in (epoch2_new_dataset2, epoch3_new_dataset2)
- assert epoch2_new_dataset2 not in (epoch1_new_dataset2, epoch3_new_dataset2)
- assert epoch3_new_dataset2 not in (epoch1_new_dataset2, epoch2_new_dataset2)
-
- assert epoch1 != epoch1_new_dataset2
- assert epoch2 != epoch2_new_dataset2
- assert epoch3 != epoch3_new_dataset2
-
-
- def test_cv_minddataset_dataset_size(add_and_remove_cv_file):
- """tutorial for cv minddataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers)
- assert data_set.get_dataset_size() == 10
- repeat_num = 2
- data_set = data_set.repeat(repeat_num)
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info(
- "-------------- get dataset size {} -----------------".format(num_iter))
- logger.info(
- "-------------- item[label]: {} ---------------------".format(item["label"]))
- logger.info(
- "-------------- item[data]: {} ----------------------".format(item["data"]))
- num_iter += 1
- assert num_iter == 20
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers,
- num_shards=4, shard_id=3)
- assert data_set.get_dataset_size() == 3
-
-
- def test_cv_minddataset_repeat_reshuffle(add_and_remove_cv_file):
- """tutorial for cv minddataset."""
- columns_list = ["data", "label"]
- num_readers = 4
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers)
- decode_op = vision.Decode()
- data_set = data_set.map(
- input_columns=["data"], operations=decode_op, num_parallel_workers=2)
- resize_op = vision.Resize((32, 32), interpolation=Inter.LINEAR)
- data_set = data_set.map(operations=resize_op, input_columns="data",
- num_parallel_workers=2)
- data_set = data_set.batch(2)
- data_set = data_set.repeat(2)
- num_iter = 0
- labels = []
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info(
- "-------------- get dataset size {} -----------------".format(num_iter))
- logger.info(
- "-------------- item[label]: {} ---------------------".format(item["label"]))
- logger.info(
- "-------------- item[data]: {} ----------------------".format(item["data"]))
- num_iter += 1
- labels.append(item["label"])
- assert num_iter == 10
- logger.info("repeat shuffle: {}".format(labels))
- assert len(labels) == 10
- assert labels[0:5] == labels[0:5]
- assert labels[0:5] != labels[5:5]
-
-
- def test_cv_minddataset_batch_size_larger_than_records(add_and_remove_cv_file):
- """tutorial for cv minddataset."""
- columns_list = ["data", "label"]
- num_readers = 4
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers)
- decode_op = vision.Decode()
- data_set = data_set.map(
- input_columns=["data"], operations=decode_op, num_parallel_workers=2)
- resize_op = vision.Resize((32, 32), interpolation=Inter.LINEAR)
- data_set = data_set.map(operations=resize_op, input_columns="data",
- num_parallel_workers=2)
- data_set = data_set.batch(32, drop_remainder=True)
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info(
- "-------------- get dataset size {} -----------------".format(num_iter))
- logger.info(
- "-------------- item[label]: {} ---------------------".format(item["label"]))
- logger.info(
- "-------------- item[data]: {} ----------------------".format(item["data"]))
- num_iter += 1
- assert num_iter == 0
-
-
- def test_cv_minddataset_issue_888(add_and_remove_cv_file):
- """issue 888 test."""
- columns_list = ["data", "label"]
- num_readers = 2
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers, shuffle=False, num_shards=5, shard_id=1)
- data_set = data_set.shuffle(2)
- data_set = data_set.repeat(9)
- num_iter = 0
- for _ in data_set.create_dict_iterator(num_epochs=1):
- num_iter += 1
- assert num_iter == 18
-
-
- def test_cv_minddataset_reader_file_list(add_and_remove_cv_file):
- """tutorial for cv minderdataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
- data_set = ds.MindDataset([CV_FILE_NAME + str(x)
- for x in range(FILES_NUM)], columns_list, num_readers)
- assert data_set.get_dataset_size() == 10
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info(
- "-------------- cv reader basic: {} ------------------------".format(num_iter))
- logger.info(
- "-------------- len(item[data]): {} ------------------------".format(len(item["data"])))
- logger.info(
- "-------------- item[data]: {} -----------------------------".format(item["data"]))
- logger.info(
- "-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
- logger.info(
- "-------------- item[label]: {} ----------------------------".format(item["label"]))
- num_iter += 1
- assert num_iter == 10
-
-
- def test_cv_minddataset_reader_one_partition(add_and_remove_cv_file):
- """tutorial for cv minderdataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
- data_set = ds.MindDataset([CV_FILE_NAME + "0"], columns_list, num_readers)
- assert data_set.get_dataset_size() < 10
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info(
- "-------------- cv reader basic: {} ------------------------".format(num_iter))
- logger.info(
- "-------------- len(item[data]): {} ------------------------".format(len(item["data"])))
- logger.info(
- "-------------- item[data]: {} -----------------------------".format(item["data"]))
- logger.info(
- "-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
- logger.info(
- "-------------- item[label]: {} ----------------------------".format(item["label"]))
- num_iter += 1
- assert num_iter < 10
-
-
- def test_cv_minddataset_reader_two_dataset(add_and_remove_cv_file):
- """tutorial for cv minderdataset."""
- try:
- if os.path.exists(CV1_FILE_NAME):
- os.remove(CV1_FILE_NAME)
- if os.path.exists("{}.db".format(CV1_FILE_NAME)):
- os.remove("{}.db".format(CV1_FILE_NAME))
- if os.path.exists(CV2_FILE_NAME):
- os.remove(CV2_FILE_NAME)
- if os.path.exists("{}.db".format(CV2_FILE_NAME)):
- os.remove("{}.db".format(CV2_FILE_NAME))
- writer = FileWriter(CV1_FILE_NAME, 1)
- data = get_data(CV_DIR_NAME)
- cv_schema_json = {"id": {"type": "int32"},
- "file_name": {"type": "string"},
- "label": {"type": "int32"},
- "data": {"type": "bytes"}}
- writer.add_schema(cv_schema_json, "CV1_schema")
- writer.add_index(["file_name", "label"])
- writer.write_raw_data(data)
- writer.commit()
-
- writer = FileWriter(CV2_FILE_NAME, 1)
- data = get_data(CV_DIR_NAME)
- cv_schema_json = {"id": {"type": "int32"},
- "file_name": {"type": "string"},
- "label": {"type": "int32"},
- "data": {"type": "bytes"}}
- writer.add_schema(cv_schema_json, "CV2_schema")
- writer.add_index(["file_name", "label"])
- writer.write_raw_data(data)
- writer.commit()
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
- data_set = ds.MindDataset([CV_FILE_NAME + str(x) for x in range(FILES_NUM)] + [CV1_FILE_NAME, CV2_FILE_NAME],
- columns_list, num_readers)
- assert data_set.get_dataset_size() == 30
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info(
- "-------------- cv reader basic: {} ------------------------".format(num_iter))
- logger.info(
- "-------------- len(item[data]): {} ------------------------".format(len(item["data"])))
- logger.info(
- "-------------- item[data]: {} -----------------------------".format(item["data"]))
- logger.info(
- "-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
- logger.info(
- "-------------- item[label]: {} ----------------------------".format(item["label"]))
- num_iter += 1
- assert num_iter == 30
- except Exception as error:
- if os.path.exists(CV1_FILE_NAME):
- os.remove(CV1_FILE_NAME)
- if os.path.exists("{}.db".format(CV1_FILE_NAME)):
- os.remove("{}.db".format(CV1_FILE_NAME))
- if os.path.exists(CV2_FILE_NAME):
- os.remove(CV2_FILE_NAME)
- if os.path.exists("{}.db".format(CV2_FILE_NAME)):
- os.remove("{}.db".format(CV2_FILE_NAME))
- raise error
- else:
- if os.path.exists(CV1_FILE_NAME):
- os.remove(CV1_FILE_NAME)
- if os.path.exists("{}.db".format(CV1_FILE_NAME)):
- os.remove("{}.db".format(CV1_FILE_NAME))
- if os.path.exists(CV2_FILE_NAME):
- os.remove(CV2_FILE_NAME)
- if os.path.exists("{}.db".format(CV2_FILE_NAME)):
- os.remove("{}.db".format(CV2_FILE_NAME))
-
-
- def test_cv_minddataset_reader_two_dataset_partition(add_and_remove_cv_file):
- paths = ["{}{}".format(CV1_FILE_NAME, str(x).rjust(1, '0'))
- for x in range(FILES_NUM)]
- try:
- for x in paths:
- if os.path.exists("{}".format(x)):
- os.remove("{}".format(x))
- if os.path.exists("{}.db".format(x)):
- os.remove("{}.db".format(x))
- writer = FileWriter(CV1_FILE_NAME, FILES_NUM)
- data = get_data(CV_DIR_NAME)
- cv_schema_json = {"id": {"type": "int32"},
- "file_name": {"type": "string"},
- "label": {"type": "int32"},
- "data": {"type": "bytes"}}
- writer.add_schema(cv_schema_json, "CV1_schema")
- writer.add_index(["file_name", "label"])
- writer.write_raw_data(data)
- writer.commit()
-
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
- data_set = ds.MindDataset([CV_FILE_NAME + str(x) for x in range(2)] +
- [CV1_FILE_NAME + str(x) for x in range(2, 4)],
- columns_list, num_readers)
- assert data_set.get_dataset_size() < 20
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info(
- "-------------- cv reader basic: {} ------------------------".format(num_iter))
- logger.info(
- "-------------- len(item[data]): {} ------------------------".format(len(item["data"])))
- logger.info(
- "-------------- item[data]: {} -----------------------------".format(item["data"]))
- logger.info(
- "-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
- logger.info(
- "-------------- item[label]: {} ----------------------------".format(item["label"]))
- num_iter += 1
- assert num_iter < 20
- except Exception as error:
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
- raise error
- else:
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
-
-
- def test_cv_minddataset_reader_basic_tutorial(add_and_remove_cv_file):
- """tutorial for cv minderdataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers)
- assert data_set.get_dataset_size() == 10
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info(
- "-------------- cv reader basic: {} ------------------------".format(num_iter))
- logger.info(
- "-------------- len(item[data]): {} ------------------------".format(len(item["data"])))
- logger.info(
- "-------------- item[data]: {} -----------------------------".format(item["data"]))
- logger.info(
- "-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
- logger.info(
- "-------------- item[label]: {} ----------------------------".format(item["label"]))
- num_iter += 1
- assert num_iter == 10
-
-
- def test_nlp_minddataset_reader_basic_tutorial(add_and_remove_nlp_file):
- """tutorial for nlp minderdataset."""
- num_readers = 4
- data_set = ds.MindDataset(NLP_FILE_NAME + "0", None, num_readers)
- assert data_set.get_dataset_size() == 10
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info(
- "-------------- cv reader basic: {} ------------------------".format(num_iter))
- logger.info(
- "-------------- num_iter: {} ------------------------".format(num_iter))
- logger.info(
- "-------------- item[id]: {} ------------------------".format(item["id"]))
- logger.info(
- "-------------- item[rating]: {} --------------------".format(item["rating"]))
- logger.info("-------------- item[input_ids]: {}, shape: {} -----------------".format(
- item["input_ids"], item["input_ids"].shape))
- logger.info("-------------- item[input_mask]: {}, shape: {} -----------------".format(
- item["input_mask"], item["input_mask"].shape))
- logger.info("-------------- item[segment_ids]: {}, shape: {} -----------------".format(
- item["segment_ids"], item["segment_ids"].shape))
- assert item["input_ids"].shape == (50,)
- assert item["input_mask"].shape == (1, 50)
- assert item["segment_ids"].shape == (2, 25)
- num_iter += 1
- assert num_iter == 10
-
-
- def test_cv_minddataset_reader_basic_tutorial_5_epoch(add_and_remove_cv_file):
- """tutorial for cv minderdataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers)
- assert data_set.get_dataset_size() == 10
- for _ in range(5):
- num_iter = 0
- for data in data_set.create_tuple_iterator(output_numpy=True):
- logger.info("data is {}".format(data))
- num_iter += 1
- assert num_iter == 10
-
- data_set.reset()
-
-
- def test_cv_minddataset_reader_basic_tutorial_5_epoch_with_batch(add_and_remove_cv_file):
- """tutorial for cv minderdataset."""
- columns_list = ["data", "label"]
- num_readers = 4
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers)
-
- resize_height = 32
- resize_width = 32
-
- # define map operations
- decode_op = vision.Decode()
- resize_op = vision.Resize((resize_height, resize_width))
-
- data_set = data_set.map(input_columns=["data"], operations=decode_op, num_parallel_workers=4)
- data_set = data_set.map(input_columns=["data"], operations=resize_op, num_parallel_workers=4)
-
- data_set = data_set.batch(2)
- assert data_set.get_dataset_size() == 5
- for _ in range(5):
- num_iter = 0
- for data in data_set.create_tuple_iterator(output_numpy=True):
- logger.info("data is {}".format(data))
- num_iter += 1
- assert num_iter == 5
-
- data_set.reset()
-
-
- def test_cv_minddataset_reader_no_columns(add_and_remove_cv_file):
- """tutorial for cv minderdataset."""
- data_set = ds.MindDataset(CV_FILE_NAME + "0")
- assert data_set.get_dataset_size() == 10
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info(
- "-------------- cv reader basic: {} ------------------------".format(num_iter))
- logger.info(
- "-------------- len(item[data]): {} ------------------------".format(len(item["data"])))
- logger.info(
- "-------------- item[data]: {} -----------------------------".format(item["data"]))
- logger.info(
- "-------------- item[file_name]: {} ------------------------".format(item["file_name"]))
- logger.info(
- "-------------- item[label]: {} ----------------------------".format(item["label"]))
- num_iter += 1
- assert num_iter == 10
-
-
- def test_cv_minddataset_reader_repeat_tutorial(add_and_remove_cv_file):
- """tutorial for cv minderdataset."""
- columns_list = ["data", "file_name", "label"]
- num_readers = 4
- data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers)
- repeat_num = 2
- data_set = data_set.repeat(repeat_num)
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- logger.info(
- "-------------- repeat two test {} ------------------------".format(num_iter))
- logger.info(
- "-------------- len(item[data]): {} -----------------------".format(len(item["data"])))
- logger.info(
- "-------------- item[data]: {} ----------------------------".format(item["data"]))
- logger.info(
- "-------------- item[file_name]: {} -----------------------".format(item["file_name"]))
- logger.info(
- "-------------- item[label]: {} ---------------------------".format(item["label"]))
- num_iter += 1
- assert num_iter == 20
-
-
- def get_data(dir_name):
- """
- usage: get data from imagenet dataset
- params:
- dir_name: directory containing folder images and annotation information
-
- """
- if not os.path.isdir(dir_name):
- raise IOError("Directory {} not exists".format(dir_name))
- img_dir = os.path.join(dir_name, "images")
- ann_file = os.path.join(dir_name, "annotation.txt")
- with open(ann_file, "r") as file_reader:
- lines = file_reader.readlines()
-
- data_list = []
- for i, line in enumerate(lines):
- try:
- filename, label = line.split(",")
- label = label.strip("\n")
- with open(os.path.join(img_dir, filename), "rb") as file_reader:
- img = file_reader.read()
- data_json = {"id": i,
- "file_name": filename,
- "data": img,
- "label": int(label)}
- data_list.append(data_json)
- except FileNotFoundError:
- continue
- return data_list
-
-
- def get_multi_bytes_data(file_name, bytes_num=3):
- """
- Return raw data of multi-bytes dataset.
-
- Args:
- file_name (str): String of multi-bytes dataset's path.
- bytes_num (int): Number of bytes fields.
-
- Returns:
- List
- """
- if not os.path.exists(file_name):
- raise IOError("map file {} not exists".format(file_name))
- dir_name = os.path.dirname(file_name)
- with open(file_name, "r") as file_reader:
- lines = file_reader.readlines()
- data_list = []
- row_num = 0
- for line in lines:
- try:
- img10_path = line.strip('\n').split(" ")
- img5 = []
- for path in img10_path[:bytes_num]:
- with open(os.path.join(dir_name, path), "rb") as file_reader:
- img5 += [file_reader.read()]
- data_json = {"image_{}".format(i): img5[i]
- for i in range(len(img5))}
- data_json.update({"id": row_num})
- row_num += 1
- data_list.append(data_json)
- except FileNotFoundError:
- continue
- return data_list
-
-
- def get_mkv_data(dir_name):
- """
- Return raw data of Vehicle_and_Person dataset.
-
- Args:
- dir_name (str): String of Vehicle_and_Person dataset's path.
-
- Returns:
- List
- """
- if not os.path.isdir(dir_name):
- raise IOError("Directory {} not exists".format(dir_name))
- img_dir = os.path.join(dir_name, "Image")
- label_dir = os.path.join(dir_name, "prelabel")
-
- data_list = []
- file_list = os.listdir(label_dir)
-
- index = 1
- for item in file_list:
- if os.path.splitext(item)[1] == '.json':
- file_path = os.path.join(label_dir, item)
-
- image_name = ''.join([os.path.splitext(item)[0], ".jpg"])
- image_path = os.path.join(img_dir, image_name)
-
- with open(file_path, "r") as load_f:
- load_dict = json.load(load_f)
-
- if os.path.exists(image_path):
- with open(image_path, "rb") as file_reader:
- img = file_reader.read()
- data_json = {"file_name": image_name,
- "prelabel": str(load_dict),
- "data": img,
- "id": index}
- data_list.append(data_json)
- index += 1
- logger.info('{} images are missing'.format(
- len(file_list) - len(data_list)))
- return data_list
-
-
- def get_nlp_data(dir_name, vocab_file, num):
- """
- Return raw data of aclImdb dataset.
-
- Args:
- dir_name (str): String of aclImdb dataset's path.
- vocab_file (str): String of dictionary's path.
- num (int): Number of sample.
-
- Returns:
- List
- """
- if not os.path.isdir(dir_name):
- raise IOError("Directory {} not exists".format(dir_name))
- for root, _, files in os.walk(dir_name):
- for index, file_name_extension in enumerate(files):
- if index < num:
- file_path = os.path.join(root, file_name_extension)
- file_name, _ = file_name_extension.split('.', 1)
- id_, rating = file_name.split('_', 1)
- with open(file_path, 'r') as f:
- raw_content = f.read()
-
- dictionary = load_vocab(vocab_file)
- vectors = [dictionary.get('[CLS]')]
- vectors += [dictionary.get(i) if i in dictionary
- else dictionary.get('[UNK]')
- for i in re.findall(r"[\w']+|[{}]"
- .format(string.punctuation),
- raw_content)]
- vectors += [dictionary.get('[SEP]')]
- input_, mask, segment = inputs(vectors)
- input_ids = np.reshape(np.array(input_), [-1])
- input_mask = np.reshape(np.array(mask), [1, -1])
- segment_ids = np.reshape(np.array(segment), [2, -1])
- data = {
- "label": 1,
- "id": id_,
- "rating": float(rating),
- "input_ids": input_ids,
- "input_mask": input_mask,
- "segment_ids": segment_ids
- }
- yield data
-
-
- def convert_to_uni(text):
- if isinstance(text, str):
- return text
- if isinstance(text, bytes):
- return text.decode('utf-8', 'ignore')
- raise Exception("The type %s does not convert!" % type(text))
-
-
- def load_vocab(vocab_file):
- """load vocabulary to translate statement."""
- vocab = collections.OrderedDict()
- vocab.setdefault('blank', 2)
- index = 0
- with open(vocab_file) as reader:
- while True:
- tmp = reader.readline()
- if not tmp:
- break
- token = convert_to_uni(tmp)
- token = token.strip()
- vocab[token] = index
- index += 1
- return vocab
-
-
- def inputs(vectors, maxlen=50):
- length = len(vectors)
- if length > maxlen:
- return vectors[0:maxlen], [1] * maxlen, [0] * maxlen
- input_ = vectors + [0] * (maxlen - length)
- mask = [1] * length + [0] * (maxlen - length)
- segment = [0] * maxlen
- return input_, mask, segment
-
-
- def test_write_with_multi_bytes_and_array_and_read_by_MindDataset():
- mindrecord_file_name = "test.mindrecord"
- try:
- if os.path.exists("{}".format(mindrecord_file_name)):
- os.remove("{}".format(mindrecord_file_name))
- if os.path.exists("{}.db".format(mindrecord_file_name)):
- os.remove("{}.db".format(mindrecord_file_name))
- data = [{"file_name": "001.jpg", "label": 4,
- "image1": bytes("image1 bytes abc", encoding='UTF-8'),
- "image2": bytes("image1 bytes def", encoding='UTF-8'),
- "source_sos_ids": np.array([1, 2, 3, 4, 5], dtype=np.int64),
- "source_sos_mask": np.array([6, 7, 8, 9, 10, 11, 12], dtype=np.int64),
- "image3": bytes("image1 bytes ghi", encoding='UTF-8'),
- "image4": bytes("image1 bytes jkl", encoding='UTF-8'),
- "image5": bytes("image1 bytes mno", encoding='UTF-8'),
- "target_sos_ids": np.array([28, 29, 30, 31, 32], dtype=np.int64),
- "target_sos_mask": np.array([33, 34, 35, 36, 37, 38], dtype=np.int64),
- "target_eos_ids": np.array([39, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),
- "target_eos_mask": np.array([48, 49, 50, 51], dtype=np.int64)},
- {"file_name": "002.jpg", "label": 5,
- "image1": bytes("image2 bytes abc", encoding='UTF-8'),
- "image2": bytes("image2 bytes def", encoding='UTF-8'),
- "image3": bytes("image2 bytes ghi", encoding='UTF-8'),
- "image4": bytes("image2 bytes jkl", encoding='UTF-8'),
- "image5": bytes("image2 bytes mno", encoding='UTF-8'),
- "source_sos_ids": np.array([11, 2, 3, 4, 5], dtype=np.int64),
- "source_sos_mask": np.array([16, 7, 8, 9, 10, 11, 12], dtype=np.int64),
- "target_sos_ids": np.array([128, 29, 30, 31, 32], dtype=np.int64),
- "target_sos_mask": np.array([133, 34, 35, 36, 37, 38], dtype=np.int64),
- "target_eos_ids": np.array([139, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),
- "target_eos_mask": np.array([148, 49, 50, 51], dtype=np.int64)},
- {"file_name": "003.jpg", "label": 6,
- "source_sos_ids": np.array([21, 2, 3, 4, 5], dtype=np.int64),
- "source_sos_mask": np.array([26, 7, 8, 9, 10, 11, 12], dtype=np.int64),
- "target_sos_ids": np.array([228, 29, 30, 31, 32], dtype=np.int64),
- "target_sos_mask": np.array([233, 34, 35, 36, 37, 38], dtype=np.int64),
- "target_eos_ids": np.array([239, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),
- "image1": bytes("image3 bytes abc", encoding='UTF-8'),
- "image2": bytes("image3 bytes def", encoding='UTF-8'),
- "image3": bytes("image3 bytes ghi", encoding='UTF-8'),
- "image4": bytes("image3 bytes jkl", encoding='UTF-8'),
- "image5": bytes("image3 bytes mno", encoding='UTF-8'),
- "target_eos_mask": np.array([248, 49, 50, 51], dtype=np.int64)},
- {"file_name": "004.jpg", "label": 7,
- "source_sos_ids": np.array([31, 2, 3, 4, 5], dtype=np.int64),
- "source_sos_mask": np.array([36, 7, 8, 9, 10, 11, 12], dtype=np.int64),
- "image1": bytes("image4 bytes abc", encoding='UTF-8'),
- "image2": bytes("image4 bytes def", encoding='UTF-8'),
- "image3": bytes("image4 bytes ghi", encoding='UTF-8'),
- "image4": bytes("image4 bytes jkl", encoding='UTF-8'),
- "image5": bytes("image4 bytes mno", encoding='UTF-8'),
- "target_sos_ids": np.array([328, 29, 30, 31, 32], dtype=np.int64),
- "target_sos_mask": np.array([333, 34, 35, 36, 37, 38], dtype=np.int64),
- "target_eos_ids": np.array([339, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),
- "target_eos_mask": np.array([348, 49, 50, 51], dtype=np.int64)},
- {"file_name": "005.jpg", "label": 8,
- "source_sos_ids": np.array([41, 2, 3, 4, 5], dtype=np.int64),
- "source_sos_mask": np.array([46, 7, 8, 9, 10, 11, 12], dtype=np.int64),
- "target_sos_ids": np.array([428, 29, 30, 31, 32], dtype=np.int64),
- "target_sos_mask": np.array([433, 34, 35, 36, 37, 38], dtype=np.int64),
- "image1": bytes("image5 bytes abc", encoding='UTF-8'),
- "image2": bytes("image5 bytes def", encoding='UTF-8'),
- "image3": bytes("image5 bytes ghi", encoding='UTF-8'),
- "image4": bytes("image5 bytes jkl", encoding='UTF-8'),
- "image5": bytes("image5 bytes mno", encoding='UTF-8'),
- "target_eos_ids": np.array([439, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),
- "target_eos_mask": np.array([448, 49, 50, 51], dtype=np.int64)},
- {"file_name": "006.jpg", "label": 9,
- "source_sos_ids": np.array([51, 2, 3, 4, 5], dtype=np.int64),
- "source_sos_mask": np.array([56, 7, 8, 9, 10, 11, 12], dtype=np.int64),
- "target_sos_ids": np.array([528, 29, 30, 31, 32], dtype=np.int64),
- "image1": bytes("image6 bytes abc", encoding='UTF-8'),
- "image2": bytes("image6 bytes def", encoding='UTF-8'),
- "image3": bytes("image6 bytes ghi", encoding='UTF-8'),
- "image4": bytes("image6 bytes jkl", encoding='UTF-8'),
- "image5": bytes("image6 bytes mno", encoding='UTF-8'),
- "target_sos_mask": np.array([533, 34, 35, 36, 37, 38], dtype=np.int64),
- "target_eos_ids": np.array([539, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),
- "target_eos_mask": np.array([548, 49, 50, 51], dtype=np.int64)}
- ]
-
- writer = FileWriter(mindrecord_file_name)
- schema = {"file_name": {"type": "string"},
- "image1": {"type": "bytes"},
- "image2": {"type": "bytes"},
- "source_sos_ids": {"type": "int64", "shape": [-1]},
- "source_sos_mask": {"type": "int64", "shape": [-1]},
- "image3": {"type": "bytes"},
- "image4": {"type": "bytes"},
- "image5": {"type": "bytes"},
- "target_sos_ids": {"type": "int64", "shape": [-1]},
- "target_sos_mask": {"type": "int64", "shape": [-1]},
- "target_eos_ids": {"type": "int64", "shape": [-1]},
- "target_eos_mask": {"type": "int64", "shape": [-1]},
- "label": {"type": "int32"}}
- writer.add_schema(schema, "data is so cool")
- writer.write_raw_data(data)
- writer.commit()
-
- # change data value to list
- data_value_to_list = []
- for item in data:
- new_data = {}
- new_data['file_name'] = np.asarray(item["file_name"], dtype='S')
- new_data['label'] = np.asarray(list([item["label"]]), dtype=np.int32)
- new_data['image1'] = np.asarray(list(item["image1"]), dtype=np.uint8)
- new_data['image2'] = np.asarray(list(item["image2"]), dtype=np.uint8)
- new_data['image3'] = np.asarray(list(item["image3"]), dtype=np.uint8)
- new_data['image4'] = np.asarray(list(item["image4"]), dtype=np.uint8)
- new_data['image5'] = np.asarray(list(item["image5"]), dtype=np.uint8)
- new_data['source_sos_ids'] = item["source_sos_ids"]
- new_data['source_sos_mask'] = item["source_sos_mask"]
- new_data['target_sos_ids'] = item["target_sos_ids"]
- new_data['target_sos_mask'] = item["target_sos_mask"]
- new_data['target_eos_ids'] = item["target_eos_ids"]
- new_data['target_eos_mask'] = item["target_eos_mask"]
- data_value_to_list.append(new_data)
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 13
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["source_sos_ids",
- "source_sos_mask", "target_sos_ids"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 3
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] == data[num_iter][field]).all()
- else:
- assert item[field] == data[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 1
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["image2", "source_sos_mask", "image3", "target_sos_ids"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 4
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 3
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["target_sos_ids",
- "image4", "source_sos_ids"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 3
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 3
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["target_sos_ids", "image5",
- "image4", "image3", "source_sos_ids"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 5
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 1
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["target_eos_mask", "image5",
- "image2", "source_sos_mask", "label"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 5
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["label", "target_eos_mask", "image1", "target_eos_ids",
- "source_sos_mask", "image2", "image4", "image3",
- "source_sos_ids", "image5", "file_name"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 11
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
- except Exception as error:
- os.remove("{}".format(mindrecord_file_name))
- os.remove("{}.db".format(mindrecord_file_name))
- raise error
- else:
- os.remove("{}".format(mindrecord_file_name))
- os.remove("{}.db".format(mindrecord_file_name))
-
-
- def test_write_with_multi_bytes_and_MindDataset():
- mindrecord_file_name = "test.mindrecord"
- try:
- data = [{"file_name": "001.jpg", "label": 43,
- "image1": bytes("image1 bytes abc", encoding='UTF-8'),
- "image2": bytes("image1 bytes def", encoding='UTF-8'),
- "image3": bytes("image1 bytes ghi", encoding='UTF-8'),
- "image4": bytes("image1 bytes jkl", encoding='UTF-8'),
- "image5": bytes("image1 bytes mno", encoding='UTF-8')},
- {"file_name": "002.jpg", "label": 91,
- "image1": bytes("image2 bytes abc", encoding='UTF-8'),
- "image2": bytes("image2 bytes def", encoding='UTF-8'),
- "image3": bytes("image2 bytes ghi", encoding='UTF-8'),
- "image4": bytes("image2 bytes jkl", encoding='UTF-8'),
- "image5": bytes("image2 bytes mno", encoding='UTF-8')},
- {"file_name": "003.jpg", "label": 61,
- "image1": bytes("image3 bytes abc", encoding='UTF-8'),
- "image2": bytes("image3 bytes def", encoding='UTF-8'),
- "image3": bytes("image3 bytes ghi", encoding='UTF-8'),
- "image4": bytes("image3 bytes jkl", encoding='UTF-8'),
- "image5": bytes("image3 bytes mno", encoding='UTF-8')},
- {"file_name": "004.jpg", "label": 29,
- "image1": bytes("image4 bytes abc", encoding='UTF-8'),
- "image2": bytes("image4 bytes def", encoding='UTF-8'),
- "image3": bytes("image4 bytes ghi", encoding='UTF-8'),
- "image4": bytes("image4 bytes jkl", encoding='UTF-8'),
- "image5": bytes("image4 bytes mno", encoding='UTF-8')},
- {"file_name": "005.jpg", "label": 78,
- "image1": bytes("image5 bytes abc", encoding='UTF-8'),
- "image2": bytes("image5 bytes def", encoding='UTF-8'),
- "image3": bytes("image5 bytes ghi", encoding='UTF-8'),
- "image4": bytes("image5 bytes jkl", encoding='UTF-8'),
- "image5": bytes("image5 bytes mno", encoding='UTF-8')},
- {"file_name": "006.jpg", "label": 37,
- "image1": bytes("image6 bytes abc", encoding='UTF-8'),
- "image2": bytes("image6 bytes def", encoding='UTF-8'),
- "image3": bytes("image6 bytes ghi", encoding='UTF-8'),
- "image4": bytes("image6 bytes jkl", encoding='UTF-8'),
- "image5": bytes("image6 bytes mno", encoding='UTF-8')}
- ]
- writer = FileWriter(mindrecord_file_name)
- schema = {"file_name": {"type": "string"},
- "image1": {"type": "bytes"},
- "image2": {"type": "bytes"},
- "image3": {"type": "bytes"},
- "label": {"type": "int32"},
- "image4": {"type": "bytes"},
- "image5": {"type": "bytes"}}
- writer.add_schema(schema, "data is so cool")
- writer.write_raw_data(data)
- writer.commit()
-
- # change data value to list
- data_value_to_list = []
- for item in data:
- new_data = {}
- new_data['file_name'] = np.asarray(item["file_name"], dtype='S')
- new_data['label'] = np.asarray(list([item["label"]]), dtype=np.int32)
- new_data['image1'] = np.asarray(list(item["image1"]), dtype=np.uint8)
- new_data['image2'] = np.asarray(list(item["image2"]), dtype=np.uint8)
- new_data['image3'] = np.asarray(list(item["image3"]), dtype=np.uint8)
- new_data['image4'] = np.asarray(list(item["image4"]), dtype=np.uint8)
- new_data['image5'] = np.asarray(list(item["image5"]), dtype=np.uint8)
- data_value_to_list.append(new_data)
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 7
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["image1", "image2", "image5"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 3
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["image2", "image4"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 2
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["image5", "image2"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 2
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["image5", "image2", "label"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 3
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["image4", "image5",
- "image2", "image3", "file_name"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 5
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
- except Exception as error:
- os.remove("{}".format(mindrecord_file_name))
- os.remove("{}.db".format(mindrecord_file_name))
- raise error
- else:
- os.remove("{}".format(mindrecord_file_name))
- os.remove("{}.db".format(mindrecord_file_name))
-
-
- def test_write_with_multi_array_and_MindDataset():
- mindrecord_file_name = "test.mindrecord"
- try:
- data = [{"source_sos_ids": np.array([1, 2, 3, 4, 5], dtype=np.int64),
- "source_sos_mask": np.array([6, 7, 8, 9, 10, 11, 12], dtype=np.int64),
- "source_eos_ids": np.array([13, 14, 15, 16, 17, 18], dtype=np.int64),
- "source_eos_mask": np.array([19, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64),
- "target_sos_ids": np.array([28, 29, 30, 31, 32], dtype=np.int64),
- "target_sos_mask": np.array([33, 34, 35, 36, 37, 38], dtype=np.int64),
- "target_eos_ids": np.array([39, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),
- "target_eos_mask": np.array([48, 49, 50, 51], dtype=np.int64)},
- {"source_sos_ids": np.array([11, 2, 3, 4, 5], dtype=np.int64),
- "source_sos_mask": np.array([16, 7, 8, 9, 10, 11, 12], dtype=np.int64),
- "source_eos_ids": np.array([113, 14, 15, 16, 17, 18], dtype=np.int64),
- "source_eos_mask": np.array([119, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64),
- "target_sos_ids": np.array([128, 29, 30, 31, 32], dtype=np.int64),
- "target_sos_mask": np.array([133, 34, 35, 36, 37, 38], dtype=np.int64),
- "target_eos_ids": np.array([139, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),
- "target_eos_mask": np.array([148, 49, 50, 51], dtype=np.int64)},
- {"source_sos_ids": np.array([21, 2, 3, 4, 5], dtype=np.int64),
- "source_sos_mask": np.array([26, 7, 8, 9, 10, 11, 12], dtype=np.int64),
- "source_eos_ids": np.array([213, 14, 15, 16, 17, 18], dtype=np.int64),
- "source_eos_mask": np.array([219, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64),
- "target_sos_ids": np.array([228, 29, 30, 31, 32], dtype=np.int64),
- "target_sos_mask": np.array([233, 34, 35, 36, 37, 38], dtype=np.int64),
- "target_eos_ids": np.array([239, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),
- "target_eos_mask": np.array([248, 49, 50, 51], dtype=np.int64)},
- {"source_sos_ids": np.array([31, 2, 3, 4, 5], dtype=np.int64),
- "source_sos_mask": np.array([36, 7, 8, 9, 10, 11, 12], dtype=np.int64),
- "source_eos_ids": np.array([313, 14, 15, 16, 17, 18], dtype=np.int64),
- "source_eos_mask": np.array([319, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64),
- "target_sos_ids": np.array([328, 29, 30, 31, 32], dtype=np.int64),
- "target_sos_mask": np.array([333, 34, 35, 36, 37, 38], dtype=np.int64),
- "target_eos_ids": np.array([339, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),
- "target_eos_mask": np.array([348, 49, 50, 51], dtype=np.int64)},
- {"source_sos_ids": np.array([41, 2, 3, 4, 5], dtype=np.int64),
- "source_sos_mask": np.array([46, 7, 8, 9, 10, 11, 12], dtype=np.int64),
- "source_eos_ids": np.array([413, 14, 15, 16, 17, 18], dtype=np.int64),
- "source_eos_mask": np.array([419, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64),
- "target_sos_ids": np.array([428, 29, 30, 31, 32], dtype=np.int64),
- "target_sos_mask": np.array([433, 34, 35, 36, 37, 38], dtype=np.int64),
- "target_eos_ids": np.array([439, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),
- "target_eos_mask": np.array([448, 49, 50, 51], dtype=np.int64)},
- {"source_sos_ids": np.array([51, 2, 3, 4, 5], dtype=np.int64),
- "source_sos_mask": np.array([56, 7, 8, 9, 10, 11, 12], dtype=np.int64),
- "source_eos_ids": np.array([513, 14, 15, 16, 17, 18], dtype=np.int64),
- "source_eos_mask": np.array([519, 20, 21, 22, 23, 24, 25, 26, 27], dtype=np.int64),
- "target_sos_ids": np.array([528, 29, 30, 31, 32], dtype=np.int64),
- "target_sos_mask": np.array([533, 34, 35, 36, 37, 38], dtype=np.int64),
- "target_eos_ids": np.array([539, 40, 41, 42, 43, 44, 45, 46, 47], dtype=np.int64),
- "target_eos_mask": np.array([548, 49, 50, 51], dtype=np.int64)}
- ]
- writer = FileWriter(mindrecord_file_name)
- schema = {"source_sos_ids": {"type": "int64", "shape": [-1]},
- "source_sos_mask": {"type": "int64", "shape": [-1]},
- "source_eos_ids": {"type": "int64", "shape": [-1]},
- "source_eos_mask": {"type": "int64", "shape": [-1]},
- "target_sos_ids": {"type": "int64", "shape": [-1]},
- "target_sos_mask": {"type": "int64", "shape": [-1]},
- "target_eos_ids": {"type": "int64", "shape": [-1]},
- "target_eos_mask": {"type": "int64", "shape": [-1]}}
- writer.add_schema(schema, "data is so cool")
- writer.write_raw_data(data)
- writer.commit()
-
- # change data value to list - do none
- data_value_to_list = []
- for item in data:
- new_data = {}
- new_data['source_sos_ids'] = item["source_sos_ids"]
- new_data['source_sos_mask'] = item["source_sos_mask"]
- new_data['source_eos_ids'] = item["source_eos_ids"]
- new_data['source_eos_mask'] = item["source_eos_mask"]
- new_data['target_sos_ids'] = item["target_sos_ids"]
- new_data['target_sos_mask'] = item["target_sos_mask"]
- new_data['target_eos_ids'] = item["target_eos_ids"]
- new_data['target_eos_mask'] = item["target_eos_mask"]
- data_value_to_list.append(new_data)
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 8
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["source_eos_ids", "source_eos_mask",
- "target_sos_ids", "target_sos_mask",
- "target_eos_ids", "target_eos_mask"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 6
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["source_sos_ids",
- "target_sos_ids",
- "target_eos_mask"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 3
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["target_eos_mask",
- "source_eos_mask",
- "source_sos_mask"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 3
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["target_eos_ids"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
-
- num_readers = 1
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["target_eos_mask", "target_eos_ids",
- "target_sos_mask", "target_sos_ids",
- "source_eos_mask", "source_eos_ids",
- "source_sos_mask", "source_sos_ids"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 6
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 8
- for field in item:
- if isinstance(item[field], np.ndarray):
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 6
- except Exception as error:
- os.remove("{}".format(mindrecord_file_name))
- os.remove("{}.db".format(mindrecord_file_name))
- raise error
- else:
- os.remove("{}".format(mindrecord_file_name))
- os.remove("{}.db".format(mindrecord_file_name))
-
-
- def test_numpy_generic():
- paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(1, '0'))
- for x in range(FILES_NUM)]
- try:
- for x in paths:
- if os.path.exists("{}".format(x)):
- os.remove("{}".format(x))
- if os.path.exists("{}.db".format(x)):
- os.remove("{}.db".format(x))
- writer = FileWriter(CV_FILE_NAME, FILES_NUM)
- cv_schema_json = {"label1": {"type": "int32"}, "label2": {"type": "int64"},
- "label3": {"type": "float32"}, "label4": {"type": "float64"}}
- data = []
- for idx in range(10):
- row = {}
- row['label1'] = np.int32(idx)
- row['label2'] = np.int64(idx * 10)
- row['label3'] = np.float32(idx + 0.12345)
- row['label4'] = np.float64(idx + 0.12345789)
- data.append(row)
- writer.add_schema(cv_schema_json, "img_schema")
- writer.write_raw_data(data)
- writer.commit()
-
- num_readers = 4
- data_set = ds.MindDataset(CV_FILE_NAME + "0", None, num_readers, shuffle=False)
- assert data_set.get_dataset_size() == 10
- idx = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert item['label1'] == item['label1']
- assert item['label2'] == item['label2']
- assert item['label3'] == item['label3']
- assert item['label4'] == item['label4']
- idx += 1
- assert idx == 10
- except Exception as error:
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
- raise error
- else:
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
-
-
- def test_write_with_float32_float64_float32_array_float64_array_and_MindDataset():
- mindrecord_file_name = "test.mindrecord"
- try:
- data = [{"float32_array": np.array([1.2, 2.78, 3.1234, 4.9871, 5.12341], dtype=np.float32),
- "float64_array": np.array([48.1234556789, 49.3251241431, 50.13514312414, 51.8971298471,
- 123414314.2141243, 87.1212122], dtype=np.float64),
- "float32": 3456.12345,
- "float64": 1987654321.123456785,
- "int32_array": np.array([1, 2, 3, 4, 5], dtype=np.int32),
- "int64_array": np.array([48, 49, 50, 51, 123414314, 87], dtype=np.int64),
- "int32": 3456,
- "int64": 947654321123},
- {"float32_array": np.array([1.2, 2.78, 4.1234, 4.9871, 5.12341], dtype=np.float32),
- "float64_array": np.array([48.1234556789, 49.3251241431, 60.13514312414, 51.8971298471,
- 123414314.2141243, 87.1212122], dtype=np.float64),
- "float32": 3456.12445,
- "float64": 1987654321.123456786,
- "int32_array": np.array([11, 21, 31, 41, 51], dtype=np.int32),
- "int64_array": np.array([481, 491, 501, 511, 1234143141, 871], dtype=np.int64),
- "int32": 3466,
- "int64": 957654321123},
- {"float32_array": np.array([1.2, 2.78, 5.1234, 4.9871, 5.12341], dtype=np.float32),
- "float64_array": np.array([48.1234556789, 49.3251241431, 70.13514312414, 51.8971298471,
- 123414314.2141243, 87.1212122], dtype=np.float64),
- "float32": 3456.12545,
- "float64": 1987654321.123456787,
- "int32_array": np.array([12, 22, 32, 42, 52], dtype=np.int32),
- "int64_array": np.array([482, 492, 502, 512, 1234143142, 872], dtype=np.int64),
- "int32": 3476,
- "int64": 967654321123},
- {"float32_array": np.array([1.2, 2.78, 6.1234, 4.9871, 5.12341], dtype=np.float32),
- "float64_array": np.array([48.1234556789, 49.3251241431, 80.13514312414, 51.8971298471,
- 123414314.2141243, 87.1212122], dtype=np.float64),
- "float32": 3456.12645,
- "float64": 1987654321.123456788,
- "int32_array": np.array([13, 23, 33, 43, 53], dtype=np.int32),
- "int64_array": np.array([483, 493, 503, 513, 1234143143, 873], dtype=np.int64),
- "int32": 3486,
- "int64": 977654321123},
- {"float32_array": np.array([1.2, 2.78, 7.1234, 4.9871, 5.12341], dtype=np.float32),
- "float64_array": np.array([48.1234556789, 49.3251241431, 90.13514312414, 51.8971298471,
- 123414314.2141243, 87.1212122], dtype=np.float64),
- "float32": 3456.12745,
- "float64": 1987654321.123456789,
- "int32_array": np.array([14, 24, 34, 44, 54], dtype=np.int32),
- "int64_array": np.array([484, 494, 504, 514, 1234143144, 874], dtype=np.int64),
- "int32": 3496,
- "int64": 987654321123},
- ]
- writer = FileWriter(mindrecord_file_name)
- schema = {"float32_array": {"type": "float32", "shape": [-1]},
- "float64_array": {"type": "float64", "shape": [-1]},
- "float32": {"type": "float32"},
- "float64": {"type": "float64"},
- "int32_array": {"type": "int32", "shape": [-1]},
- "int64_array": {"type": "int64", "shape": [-1]},
- "int32": {"type": "int32"},
- "int64": {"type": "int64"}}
- writer.add_schema(schema, "data is so cool")
- writer.write_raw_data(data)
- writer.commit()
-
- # change data value to list - do none
- data_value_to_list = []
- for item in data:
- new_data = {}
- new_data['float32_array'] = item["float32_array"]
- new_data['float64_array'] = item["float64_array"]
- new_data['float32'] = item["float32"]
- new_data['float64'] = item["float64"]
- new_data['int32_array'] = item["int32_array"]
- new_data['int64_array'] = item["int64_array"]
- new_data['int32'] = item["int32"]
- new_data['int64'] = item["int64"]
- data_value_to_list.append(new_data)
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 5
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 8
- for field in item:
- if isinstance(item[field], np.ndarray):
- if item[field].dtype == np.float32:
- assert (item[field] ==
- np.array(data_value_to_list[num_iter][field], np.float32)).all()
- else:
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 5
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["float32", "int32"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 5
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 2
- for field in item:
- if isinstance(item[field], np.ndarray):
- if item[field].dtype == np.float32:
- assert (item[field] ==
- np.array(data_value_to_list[num_iter][field], np.float32)).all()
- else:
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 5
-
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=mindrecord_file_name,
- columns_list=["float64", "int64"],
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 5
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 2
- for field in item:
- if isinstance(item[field], np.ndarray):
- if item[field].dtype == np.float32:
- assert (item[field] ==
- np.array(data_value_to_list[num_iter][field], np.float32)).all()
- elif item[field].dtype == np.float64:
- assert math.isclose(item[field],
- np.array(data_value_to_list[num_iter][field], np.float64),
- rel_tol=1e-14)
- else:
- assert (item[field] ==
- data_value_to_list[num_iter][field]).all()
- else:
- assert item[field] == data_value_to_list[num_iter][field]
- num_iter += 1
- assert num_iter == 5
- except Exception as error:
- os.remove("{}".format(mindrecord_file_name))
- os.remove("{}.db".format(mindrecord_file_name))
- raise error
- else:
- os.remove("{}".format(mindrecord_file_name))
- os.remove("{}.db".format(mindrecord_file_name))
-
- FILES = ["0.mindrecord", "1.mindrecord", "2.mindrecord", "3.mindrecord"]
- ITEMS = [10, 14, 8, 20]
- FILES_ITEMS = {FILES[0]: ITEMS[0], FILES[1]: ITEMS[1], FILES[2]: ITEMS[2], FILES[3]: ITEMS[3]}
-
- @pytest.fixture
- def create_multi_mindrecord_files():
- """files: {0.mindrecord : 10, 1.mindrecord : 14, 2.mindrecord : 8, 3.mindrecord : 20}"""
- try:
- index = 0
- for filename in FILES_ITEMS:
- key = filename
- if os.path.exists(key):
- os.remove("{}".format(key))
- os.remove("{}.db".format(key))
-
- value = FILES_ITEMS[key]
- data_list = []
- for i in range(value):
- data = {}
- data['id'] = i + index
- data_list.append(data)
- index += value
-
- writer = FileWriter(key)
- schema = {"id": {"type": "int32"}}
- writer.add_schema(schema, "data is so cool")
- writer.write_raw_data(data_list)
- writer.commit()
- yield "yield_create_multi_mindrecord_files"
- except Exception as error:
- for filename in FILES_ITMES:
- if os.path.exists(filename):
- os.remove("{}".format(filename))
- os.remove("{}.db".format(filename))
- raise error
- else:
- for filename in FILES_ITEMS:
- if os.path.exists(filename):
- os.remove("{}".format(filename))
- os.remove("{}.db".format(filename))
-
- def test_shuffle_with_global_infile_files(create_multi_mindrecord_files):
- datas_all = []
- index = 0
- for filename in FILES_ITEMS:
- value = FILES_ITEMS[filename]
- data_list = []
- for i in range(value):
- data = {}
- data['id'] = np.array(i + index, dtype=np.int32)
- data_list.append(data)
- index += value
- datas_all.append(data_list)
-
- # no shuffle parameter
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers)
- assert data_set.get_dataset_size() == 52
- num_iter = 0
- datas_all_minddataset = []
- data_list = []
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- data_list.append(item)
- if num_iter == 9:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 23:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 31:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 51:
- datas_all_minddataset.append(data_list)
- data_list = []
- num_iter += 1
- assert data_set.get_dataset_size() == 52
-
- assert len(datas_all) == len(datas_all_minddataset)
- for i, _ in enumerate(datas_all):
- assert len(datas_all[i]) == len(datas_all_minddataset[i])
- assert datas_all[i] != datas_all_minddataset[i]
-
- # shuffle=False
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=False)
- assert data_set.get_dataset_size() == 52
- num_iter = 0
- datas_all_minddataset = []
- data_list = []
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- data_list.append(item)
- if num_iter == 9:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 23:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 31:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 51:
- datas_all_minddataset.append(data_list)
- data_list = []
- num_iter += 1
- assert data_set.get_dataset_size() == 52
-
- assert len(datas_all) == len(datas_all_minddataset)
- for i, _ in enumerate(datas_all):
- assert len(datas_all[i]) == len(datas_all_minddataset[i])
- assert datas_all[i] == datas_all_minddataset[i]
-
- # shuffle=True
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=True)
- assert data_set.get_dataset_size() == 52
- num_iter = 0
- datas_all_minddataset = []
- data_list = []
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- data_list.append(item)
- if num_iter == 9:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 23:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 31:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 51:
- datas_all_minddataset.append(data_list)
- data_list = []
- num_iter += 1
- assert data_set.get_dataset_size() == 52
-
- assert len(datas_all) == len(datas_all_minddataset)
- for i, _ in enumerate(datas_all):
- assert len(datas_all[i]) == len(datas_all_minddataset[i])
- assert datas_all[i] != datas_all_minddataset[i]
-
- # shuffle=Shuffle.GLOBAL
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=ds.Shuffle.GLOBAL)
- assert data_set.get_dataset_size() == 52
- num_iter = 0
- datas_all_minddataset = []
- data_list = []
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- data_list.append(item)
- if num_iter == 9:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 23:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 31:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 51:
- datas_all_minddataset.append(data_list)
- data_list = []
- num_iter += 1
- assert data_set.get_dataset_size() == 52
-
- assert len(datas_all) == len(datas_all_minddataset)
- for i, _ in enumerate(datas_all):
- assert len(datas_all[i]) == len(datas_all_minddataset[i])
- assert datas_all[i] != datas_all_minddataset[i]
-
- # shuffle=Shuffle.INFILE
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=ds.Shuffle.INFILE)
- assert data_set.get_dataset_size() == 52
- num_iter = 0
- datas_all_minddataset = []
- data_list = []
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- data_list.append(item)
- if num_iter == 9:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 23:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 31:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 51:
- datas_all_minddataset.append(data_list)
- data_list = []
- num_iter += 1
- assert data_set.get_dataset_size() == 52
-
- def sort_list_with_dict(dict_in_list):
- keys = []
- for item in dict_in_list:
- for key in item:
- keys.append(int(item[key]))
- keys.sort()
- data_list = []
- for item in keys:
- data = {}
- data['id'] = np.array(item, dtype=np.int32)
- data_list.append(data)
- return data_list
-
- assert len(datas_all) == len(datas_all_minddataset)
- for i, _ in enumerate(datas_all):
- assert len(datas_all[i]) == len(datas_all_minddataset[i])
- assert datas_all[i] != datas_all_minddataset[i]
- # order the datas_all_minddataset
- new_datas_all_minddataset = sort_list_with_dict(datas_all_minddataset[i])
- assert datas_all[i] == new_datas_all_minddataset
-
- # shuffle=Shuffle.FILES
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=ds.Shuffle.FILES)
- assert data_set.get_dataset_size() == 52
-
- num_iter = 0
- data_list = []
-
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- data_list.append(item)
- num_iter += 1
- assert data_set.get_dataset_size() == 52
-
- current_shard_size = 0
- current_shard_index = 0
- shard_count = 0
- datas_index = 0
- origin_index = [i for i in range(len(ITEMS))]
- current_index = []
- while shard_count < len(ITEMS):
- if data_list[datas_index]['id'] < 10:
- current_shard_index = 0
- elif data_list[datas_index]['id'] < 24:
- current_shard_index = 1
- elif data_list[datas_index]['id'] < 32:
- current_shard_index = 2
- elif data_list[datas_index]['id'] < 52:
- current_shard_index = 3
- else:
- raise ValueError("Index out of range")
- current_shard_size = ITEMS[current_shard_index]
-
- tmp_datas = data_list[datas_index:datas_index + current_shard_size]
- current_index.append(current_shard_index)
- assert len(datas_all[current_shard_index]) == len(tmp_datas)
- assert datas_all[current_shard_index] == tmp_datas
-
- datas_index += current_shard_size
- shard_count += 1
- assert origin_index != current_index
-
- def test_distributed_shuffle_with_global_infile_files(create_multi_mindrecord_files):
- datas_all = []
- datas_all_samples = []
- index = 0
- for filename in FILES_ITEMS:
- value = FILES_ITEMS[filename]
- data_list = []
- for i in range(value):
- data = {}
- data['id'] = np.array(i + index, dtype=np.int32)
- data_list.append(data)
- datas_all_samples.append(data)
- index += value
- datas_all.append(data_list)
-
- # no shuffle parameter
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- num_shards=4,
- shard_id=3)
- assert data_set.get_dataset_size() == 13
- num_iter = 0
- data_list = []
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- data_list.append(item)
- num_iter += 1
- assert num_iter == 13
- assert data_list != datas_all_samples[3*13:]
-
- # shuffle=False
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=False,
- num_shards=4,
- shard_id=2)
- assert data_set.get_dataset_size() == 13
- num_iter = 0
- data_list = []
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- data_list.append(item)
- num_iter += 1
- assert num_iter == 13
- assert data_list == datas_all_samples[2*13:3*13]
-
- # shuffle=True
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=True,
- num_shards=4,
- shard_id=1)
- assert data_set.get_dataset_size() == 13
- num_iter = 0
- data_list = []
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- data_list.append(item)
- num_iter += 1
- assert num_iter == 13
- assert data_list != datas_all_samples[1*13:2*13]
-
- # shuffle=Shuffle.GLOBAL
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=ds.Shuffle.GLOBAL,
- num_shards=4,
- shard_id=0)
- assert data_set.get_dataset_size() == 13
- num_iter = 0
- data_list = []
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- data_list.append(item)
- num_iter += 1
- assert num_iter == 13
- assert data_list != datas_all_samples[0:1*13]
-
- # shuffle=Shuffle.INFILE
- output_datas = []
- for shard_id in range(4):
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=ds.Shuffle.INFILE,
- num_shards=4,
- shard_id=shard_id)
- assert data_set.get_dataset_size() == 13
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- output_datas.append(item)
- num_iter += 1
- assert num_iter == 13
-
- num_iter = 0
- datas_all_minddataset = []
- data_list = []
- for item in output_datas:
- assert len(item) == 1
- data_list.append(item)
- if num_iter == 9:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 23:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 31:
- datas_all_minddataset.append(data_list)
- data_list = []
- elif num_iter == 51:
- datas_all_minddataset.append(data_list)
- data_list = []
- num_iter += 1
- assert num_iter == 52
-
- def sort_list_with_dict(dict_in_list):
- keys = []
- for item in dict_in_list:
- for key in item:
- keys.append(int(item[key]))
- keys.sort()
- data_list = []
- for item in keys:
- data = {}
- data['id'] = np.array(item, dtype=np.int32)
- data_list.append(data)
- return data_list
-
- assert len(datas_all) == len(datas_all_minddataset)
- for i, _ in enumerate(datas_all):
- assert len(datas_all[i]) == len(datas_all_minddataset[i])
- assert datas_all[i] != datas_all_minddataset[i]
- # order the datas_all_minddataset
- new_datas_all_minddataset = sort_list_with_dict(datas_all_minddataset[i])
- assert datas_all[i] == new_datas_all_minddataset
-
- # shuffle=Shuffle.FILES
- data_list = []
- for shard_id in range(4):
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=ds.Shuffle.FILES,
- num_shards=4,
- shard_id=shard_id)
- assert data_set.get_dataset_size() == 13
- num_iter = 0
- for item in data_set.create_dict_iterator(num_epochs=1, output_numpy=True):
- assert len(item) == 1
- data_list.append(item)
- num_iter += 1
- assert num_iter == 13
- assert len(data_list) == 52
-
- current_shard_size = 0
- current_shard_index = 0
- shard_count = 0
- datas_index = 0
- origin_index = [i for i in range(len(ITEMS))]
- current_index = []
- while shard_count < len(ITEMS):
- if data_list[datas_index]['id'] < 10:
- current_shard_index = 0
- elif data_list[datas_index]['id'] < 24:
- current_shard_index = 1
- elif data_list[datas_index]['id'] < 32:
- current_shard_index = 2
- elif data_list[datas_index]['id'] < 52:
- current_shard_index = 3
- else:
- raise ValueError("Index out of range")
- current_shard_size = ITEMS[current_shard_index]
-
- tmp_datas = data_list[datas_index:datas_index + current_shard_size]
- current_index.append(current_shard_index)
- assert len(datas_all[current_shard_index]) == len(tmp_datas)
- assert datas_all[current_shard_index] == tmp_datas
-
- datas_index += current_shard_size
- shard_count += 1
- assert origin_index != current_index
-
- def test_distributed_shuffle_with_multi_epochs(create_multi_mindrecord_files):
- datas_all = []
- datas_all_samples = []
- index = 0
- for filename in FILES_ITEMS:
- value = FILES_ITEMS[filename]
- data_list = []
- for i in range(value):
- data = {}
- data['id'] = np.array(i + index, dtype=np.int32)
- data_list.append(data)
- datas_all_samples.append(data)
- index += value
- datas_all.append(data_list)
-
- epoch_size = 3
-
- # no shuffle parameter
- for shard_id in range(4):
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- num_shards=4,
- shard_id=shard_id)
- assert data_set.get_dataset_size() == 13
- data_list = []
- dataset_iter = data_set.create_dict_iterator(num_epochs=epoch_size, output_numpy=True)
- for epoch in range(epoch_size): # 3 epoch
- num_iter = 0
- new_datas = []
- for item in dataset_iter:
- assert len(item) == 1
- new_datas.append(item)
- num_iter += 1
- assert num_iter == 13
- assert new_datas != datas_all_samples[shard_id*13:(shard_id+1)*13]
- assert data_list != new_datas
- data_list = new_datas
-
- # shuffle=False
- for shard_id in range(4):
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=False,
- num_shards=4,
- shard_id=shard_id)
- assert data_set.get_dataset_size() == 13
- data_list = []
- dataset_iter = data_set.create_dict_iterator(num_epochs=epoch_size, output_numpy=True)
- for epoch in range(epoch_size): # 3 epoch
- num_iter = 0
- new_datas = []
- for item in dataset_iter:
- assert len(item) == 1
- new_datas.append(item)
- num_iter += 1
- assert num_iter == 13
- assert new_datas == datas_all_samples[shard_id*13:(shard_id+1)*13]
-
- # shuffle=True
- for shard_id in range(4):
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=True,
- num_shards=4,
- shard_id=shard_id)
- assert data_set.get_dataset_size() == 13
- data_list = []
- dataset_iter = data_set.create_dict_iterator(num_epochs=epoch_size, output_numpy=True)
- for epoch in range(epoch_size): # 3 epoch
- num_iter = 0
- new_datas = []
- for item in dataset_iter:
- assert len(item) == 1
- new_datas.append(item)
- num_iter += 1
- assert num_iter == 13
- assert new_datas != datas_all_samples[shard_id*13:(shard_id+1)*13]
- assert data_list != new_datas
- data_list = new_datas
-
- # shuffle=Shuffle.GLOBAL
- for shard_id in range(4):
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=ds.Shuffle.GLOBAL,
- num_shards=4,
- shard_id=shard_id)
- assert data_set.get_dataset_size() == 13
- data_list = []
- dataset_iter = data_set.create_dict_iterator(num_epochs=epoch_size, output_numpy=True)
- for epoch in range(epoch_size): # 3 epoch
- num_iter = 0
- new_datas = []
- for item in dataset_iter:
- assert len(item) == 1
- new_datas.append(item)
- num_iter += 1
- assert num_iter == 13
- assert new_datas != datas_all_samples[shard_id*13:(shard_id+1)*13]
- assert data_list != new_datas
- data_list = new_datas
-
- # shuffle=Shuffle.INFILE
- for shard_id in range(4):
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=ds.Shuffle.INFILE,
- num_shards=4,
- shard_id=shard_id)
- assert data_set.get_dataset_size() == 13
- data_list = []
- dataset_iter = data_set.create_dict_iterator(num_epochs=epoch_size, output_numpy=True)
- for epoch in range(epoch_size): # 3 epoch
- num_iter = 0
- new_datas = []
- for item in dataset_iter:
- assert len(item) == 1
- new_datas.append(item)
- num_iter += 1
- assert num_iter == 13
- assert new_datas != datas_all_samples[shard_id*13:(shard_id+1)*13]
- assert data_list != new_datas
- data_list = new_datas
-
- # shuffle=Shuffle.FILES
- datas_epoch1 = []
- datas_epoch2 = []
- datas_epoch3 = []
- for shard_id in range(4):
- num_readers = 2
- data_set = ds.MindDataset(dataset_file=FILES,
- num_parallel_workers=num_readers,
- shuffle=ds.Shuffle.FILES,
- num_shards=4,
- shard_id=shard_id)
- assert data_set.get_dataset_size() == 13
- dataset_iter = data_set.create_dict_iterator(num_epochs=epoch_size, output_numpy=True)
- for epoch in range(epoch_size): # 3 epoch
- num_iter = 0
- for item in dataset_iter:
- assert len(item) == 1
- if epoch == 0:
- datas_epoch1.append(item)
- elif epoch == 1:
- datas_epoch2.append(item)
- elif epoch == 2:
- datas_epoch3.append(item)
- num_iter += 1
- assert num_iter == 13
- assert datas_epoch1 not in (datas_epoch2, datas_epoch3)
- assert datas_epoch2 not in (datas_epoch1, datas_epoch3)
- assert datas_epoch3 not in (datas_epoch2, datas_epoch1)
-
- def test_field_is_null_numpy():
- """add/remove nlp file"""
- paths = ["{}{}".format(NLP_FILE_NAME, str(x).rjust(1, '0'))
- for x in range(FILES_NUM)]
- for x in paths:
- if os.path.exists("{}".format(x)):
- os.remove("{}".format(x))
- if os.path.exists("{}.db".format(x)):
- os.remove("{}.db".format(x))
-
- writer = FileWriter(NLP_FILE_NAME, FILES_NUM)
- data = []
- # field array_d is null
- for row_id in range(16):
- data.append({
- "label": row_id,
- "array_a": np.reshape(np.array([0, 1, -1, 127, -128, 128, -129,
- 255, 256, -32768, 32767, -32769, 32768, -2147483648,
- 2147483647], dtype=np.int32), [-1]),
- "array_b": np.reshape(np.array([0, 1, -1, 127, -128, 128, -129, 255,
- 256, -32768, 32767, -32769, 32768,
- -2147483648, 2147483647, -2147483649, 2147483649,
- -922337036854775808, 9223372036854775807]), [1, -1]),
- "array_d": np.array([], dtype=np.int64)
- })
- nlp_schema_json = {"label": {"type": "int32"},
- "array_a": {"type": "int32",
- "shape": [-1]},
- "array_b": {"type": "int64",
- "shape": [1, -1]},
- "array_d": {"type": "int64",
- "shape": [-1]}
- }
- writer.set_header_size(1 << 14)
- writer.set_page_size(1 << 15)
- writer.add_schema(nlp_schema_json, "nlp_schema")
- writer.write_raw_data(data)
- writer.commit()
-
- data_set = ds.MindDataset(dataset_file=NLP_FILE_NAME + "0",
- columns_list=["label", "array_a", "array_b", "array_d"],
- num_parallel_workers=2,
- shuffle=False)
- assert data_set.get_dataset_size() == 16
- assert data_set.output_shapes() == [[], [15], [1, 19], []]
- assert data_set.output_types()[0] == np.int32
- assert data_set.output_types()[1] == np.int32
- assert data_set.output_types()[2] == np.int64
- assert data_set.output_types()[3] == np.int64
-
- for x in paths:
- os.remove("{}".format(x))
- os.remove("{}.db".format(x))
-
- def test_for_loop_dataset_iterator(add_and_remove_nlp_compress_file):
- """test for loop dataset iterator"""
- data = []
- for row_id in range(16):
- data.append({
- "label": row_id,
- "array_a": np.reshape(np.array([0, 1, -1, 127, -128, 128, -129,
- 255, 256, -32768, 32767, -32769, 32768, -2147483648,
- 2147483647], dtype=np.int32), [-1]),
- "array_b": np.reshape(np.array([0, 1, -1, 127, -128, 128, -129, 255,
- 256, -32768, 32767, -32769, 32768,
- -2147483648, 2147483647, -2147483649, 2147483649,
- -922337036854775808, 9223372036854775807]), [1, -1]),
- "array_c": str.encode("nlp data"),
- "array_d": np.reshape(np.array([[-10, -127], [10, 127]]), [2, -1])
- })
- num_readers = 1
- data_set = ds.MindDataset(
- NLP_FILE_NAME + "0", None, num_readers, shuffle=False)
- assert data_set.get_dataset_size() == 16
-
- # create_dict_iterator in for loop
- for _ in range(10):
- num_iter = 0
- for x, item in zip(data, data_set.create_dict_iterator(num_epochs=1, output_numpy=True)):
- assert (item["array_a"] == x["array_a"]).all()
- assert (item["array_b"] == x["array_b"]).all()
- assert item["array_c"].tobytes() == x["array_c"]
- assert (item["array_d"] == x["array_d"]).all()
- assert item["label"] == x["label"]
- num_iter += 1
- assert num_iter == 16
-
- # create_dict_iterator beyond for loop
- dataset_iter = data_set.create_dict_iterator(num_epochs=10, output_numpy=True)
- new_data = data * 10
- for _ in range(10):
- num_iter = 0
- for x, item in zip(new_data, dataset_iter):
- assert (item["array_a"] == x["array_a"]).all()
- assert (item["array_b"] == x["array_b"]).all()
- assert item["array_c"].tobytes() == x["array_c"]
- assert (item["array_d"] == x["array_d"]).all()
- assert item["label"] == x["label"]
- num_iter += 1
- assert num_iter == 16
-
- # create mulit iter by user
- dataset_iter2 = data_set.create_dict_iterator(num_epochs=1, output_numpy=True)
- assert (next(dataset_iter2)["array_a"] == data[0]["array_a"]).all()
- assert (next(dataset_iter2)["array_a"] == data[1]["array_a"]).all()
-
- dataset_iter3 = data_set.create_dict_iterator(num_epochs=1, output_numpy=True)
- assert (next(dataset_iter3)["array_a"] == data[0]["array_a"]).all()
- assert (next(dataset_iter3)["array_a"] == data[1]["array_a"]).all()
- assert (next(dataset_iter3)["array_a"] == data[2]["array_a"]).all()
-
- assert (next(dataset_iter2)["array_a"] == data[2]["array_a"]).all()
- assert (next(dataset_iter2)["array_a"] == data[3]["array_a"]).all()
-
- dataset_iter4 = data_set.create_dict_iterator(num_epochs=1, output_numpy=True)
- assert (next(dataset_iter4)["array_a"] == data[0]["array_a"]).all()
- assert (next(dataset_iter4)["array_a"] == data[1]["array_a"]).all()
- assert (next(dataset_iter4)["array_a"] == data[2]["array_a"]).all()
-
- assert (next(dataset_iter3)["array_a"] == data[3]["array_a"]).all()
- assert (next(dataset_iter3)["array_a"] == data[4]["array_a"]).all()
- assert (next(dataset_iter3)["array_a"] == data[5]["array_a"]).all()
-
- if __name__ == '__main__':
- test_nlp_compress_data(add_and_remove_nlp_compress_file)
- test_nlp_compress_data_old_version(add_and_remove_nlp_compress_file)
- test_cv_minddataset_writer_tutorial()
- test_cv_minddataset_partition_tutorial(add_and_remove_cv_file)
- test_cv_minddataset_partition_num_samples_0(add_and_remove_cv_file)
- test_cv_minddataset_partition_num_samples_1(add_and_remove_cv_file)
- test_cv_minddataset_partition_num_samples_2(add_and_remove_cv_file)
- test_cv_minddataset_partition_tutorial_check_shuffle_result(add_and_remove_cv_file)
- test_cv_minddataset_partition_tutorial_check_whole_reshuffle_result_per_epoch(add_and_remove_cv_file)
- test_cv_minddataset_check_shuffle_result(add_and_remove_cv_file)
- test_cv_minddataset_dataset_size(add_and_remove_cv_file)
- test_cv_minddataset_repeat_reshuffle(add_and_remove_cv_file)
- test_cv_minddataset_batch_size_larger_than_records(add_and_remove_cv_file)
- test_cv_minddataset_issue_888(add_and_remove_cv_file)
- test_cv_minddataset_blockreader_tutorial(add_and_remove_cv_file)
- test_cv_minddataset_blockreader_some_field_not_in_index_tutorial(add_and_remove_cv_file)
- test_cv_minddataset_reader_file_list(add_and_remove_cv_file)
- test_cv_minddataset_reader_one_partition(add_and_remove_cv_file)
- test_cv_minddataset_reader_two_dataset(add_and_remove_cv_file)
- test_cv_minddataset_reader_two_dataset_partition(add_and_remove_cv_file)
- test_cv_minddataset_reader_basic_tutorial(add_and_remove_cv_file)
- test_nlp_minddataset_reader_basic_tutorial(add_and_remove_cv_file)
- test_cv_minddataset_reader_basic_tutorial_5_epoch(add_and_remove_cv_file)
- test_cv_minddataset_reader_basic_tutorial_5_epoch_with_batch(add_and_remove_cv_file)
- test_cv_minddataset_reader_no_columns(add_and_remove_cv_file)
- test_cv_minddataset_reader_repeat_tutorial(add_and_remove_cv_file)
- test_write_with_multi_bytes_and_array_and_read_by_MindDataset()
- test_write_with_multi_bytes_and_MindDataset()
- test_write_with_multi_array_and_MindDataset()
- test_numpy_generic()
- test_write_with_float32_float64_float32_array_float64_array_and_MindDataset()
- test_shuffle_with_global_infile_files(create_multi_mindrecord_files)
- test_distributed_shuffle_with_global_infile_files(create_multi_mindrecord_files)
- test_distributed_shuffle_with_multi_epochs(create_multi_mindrecord_files)
- test_field_is_null_numpy()
- test_for_loop_dataset_iterator(add_and_remove_nlp_compress_file)
|