You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

datasets.cc 38 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "minddata/dataset/include/datasets.h"
  17. #include <algorithm>
  18. #include <fstream>
  19. #include <unordered_set>
  20. #include <utility>
  21. #include "minddata/dataset/include/samplers.h"
  22. #include "minddata/dataset/include/transforms.h"
  23. // Source dataset headers (in alphabetical order)
  24. #include "minddata/dataset/engine/dataset_iterator.h"
  25. #include "minddata/dataset/engine/datasetops/source/album_op.h"
  26. #include "minddata/dataset/engine/datasetops/source/celeba_op.h"
  27. #include "minddata/dataset/engine/datasetops/source/cifar_op.h"
  28. #include "minddata/dataset/engine/datasetops/source/clue_op.h"
  29. #include "minddata/dataset/engine/datasetops/source/coco_op.h"
  30. #include "minddata/dataset/engine/datasetops/source/csv_op.h"
  31. #include "minddata/dataset/engine/datasetops/source/image_folder_op.h"
  32. #ifndef ENABLE_ANDROID
  33. #include "minddata/dataset/engine/datasetops/source/manifest_op.h"
  34. #include "minddata/dataset/engine/datasetops/source/mindrecord_op.h"
  35. #include "minddata/dataset/engine/ir/cache/dataset_cache_impl.h"
  36. #endif
  37. #include "minddata/dataset/engine/datasetops/source/mnist_op.h"
  38. #include "minddata/dataset/engine/datasetops/source/random_data_op.h"
  39. #include "minddata/dataset/engine/datasetops/source/text_file_op.h"
  40. #ifndef ENABLE_ANDROID
  41. #include "minddata/dataset/engine/datasetops/source/tf_reader_op.h"
  42. #include "minddata/dataset/engine/datasetops/source/voc_op.h"
  43. #endif
  44. // Dataset operator headers (in alphabetical order)
  45. #include "minddata/dataset/engine/datasetops/map_op/map_op.h"
  46. #include "minddata/dataset/engine/datasetops/skip_op.h"
  47. #include "minddata/dataset/engine/datasetops/zip_op.h"
  48. // Sampler headers (in alphabetical order)
  49. #include "minddata/dataset/engine/datasetops/source/sampler/random_sampler.h"
  50. #include "minddata/dataset/engine/datasetops/source/sampler/sampler.h"
  51. #include "minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h"
  52. // IR non-leaf nodes
  53. #include "minddata/dataset/engine/ir/datasetops/batch_node.h"
  54. #include "minddata/dataset/engine/ir/datasetops/concat_node.h"
  55. #include "minddata/dataset/engine/ir/datasetops/map_node.h"
  56. #include "minddata/dataset/engine/ir/datasetops/project_node.h"
  57. #include "minddata/dataset/engine/ir/datasetops/rename_node.h"
  58. #include "minddata/dataset/engine/ir/datasetops/repeat_node.h"
  59. #include "minddata/dataset/engine/ir/datasetops/shuffle_node.h"
  60. #include "minddata/dataset/engine/ir/datasetops/skip_node.h"
  61. #include "minddata/dataset/engine/ir/datasetops/take_node.h"
  62. #include "minddata/dataset/engine/ir/datasetops/transfer_node.h"
  63. #include "minddata/dataset/engine/ir/datasetops/zip_node.h"
  64. #ifndef ENABLE_ANDROID
  65. #include "minddata/dataset/engine/ir/datasetops/bucket_batch_by_length_node.h"
  66. #include "minddata/dataset/engine/ir/datasetops/build_vocab_node.h"
  67. #endif
  68. #include "minddata/dataset/core/config_manager.h"
  69. #include "minddata/dataset/util/path.h"
  70. #include "minddata/dataset/util/random.h"
  71. #include "minddata/dataset/util/services.h"
  72. // IR leaf nodes
  73. #include "minddata/dataset/engine/ir/datasetops/source/album_node.h"
  74. #include "minddata/dataset/engine/ir/datasetops/source/celeba_node.h"
  75. #include "minddata/dataset/engine/ir/datasetops/source/cifar100_node.h"
  76. #include "minddata/dataset/engine/ir/datasetops/source/cifar10_node.h"
  77. #include "minddata/dataset/engine/ir/datasetops/source/clue_node.h"
  78. #include "minddata/dataset/engine/ir/datasetops/source/coco_node.h"
  79. #include "minddata/dataset/engine/ir/datasetops/source/csv_node.h"
  80. #include "minddata/dataset/engine/ir/datasetops/source/image_folder_node.h"
  81. #include "minddata/dataset/engine/ir/datasetops/source/mnist_node.h"
  82. #include "minddata/dataset/engine/ir/datasetops/source/text_file_node.h"
  83. // IR leaf nodes disabled for android
  84. #ifndef ENABLE_ANDROID
  85. #include "minddata/dataset/engine/ir/datasetops/source/manifest_node.h"
  86. #include "minddata/dataset/engine/ir/datasetops/source/minddata_node.h"
  87. #include "minddata/dataset/engine/ir/datasetops/source/tf_record_node.h"
  88. #include "minddata/dataset/engine/ir/datasetops/source/voc_node.h"
  89. #endif
  90. namespace mindspore {
  91. namespace dataset {
  92. namespace api {
  93. // Function to create the iterator, which will build and launch the execution tree.
  94. std::shared_ptr<Iterator> Dataset::CreateIterator(std::vector<std::string> columns) {
  95. std::shared_ptr<Iterator> iter;
  96. try {
  97. auto ds = shared_from_this();
  98. // The specified columns will be selected from the dataset and passed down the pipeline
  99. // in the order specified, other columns will be discarded.
  100. if (!columns.empty()) {
  101. ds = ds->Project(columns);
  102. }
  103. iter = std::make_shared<Iterator>();
  104. Status rc = iter->BuildAndLaunchTree(ds);
  105. if (rc.IsError()) {
  106. MS_LOG(ERROR) << "CreateIterator failed." << rc;
  107. return nullptr;
  108. }
  109. return iter;
  110. } catch (const std::exception &err) {
  111. MS_LOG(ERROR) << "CreateIterator: Iterator exception caught: " << err.what();
  112. return nullptr;
  113. }
  114. return iter;
  115. }
  116. // Function to return a transferred Node that transfers data through a device.
  117. bool Dataset::DeviceQueue(bool send_epoch_end) {
  118. Status rc;
  119. // Build and launch tree
  120. std::unique_ptr<RuntimeContext> runtime_context = std::make_unique<RuntimeContext>();
  121. rc = runtime_context->Init();
  122. if (rc.IsError()) {
  123. MS_LOG(ERROR) << "Failed to init runtime context. Error status: " << rc;
  124. return false;
  125. }
  126. // Get a uuid for queue name
  127. std::string queue_name = Services::GetUniqueID();
  128. // TODO(CRC):
  129. // Get device type from ms context
  130. std::string device_type = "CPU";
  131. // Get device ID from children
  132. int32_t device_id = 0;
  133. rc = TransferNode::get_distribution(shared_from_this(), &device_id);
  134. if (rc.IsError()) {
  135. MS_LOG(ERROR) << "Failed to get shard id. Error status: " << rc;
  136. return false;
  137. }
  138. // Add TransferNode IR on top of dataset d
  139. auto ds = std::make_shared<TransferNode>(shared_from_this(), queue_name, device_id, device_type, send_epoch_end);
  140. // Get ToDevice consumer
  141. auto consumer = std::make_unique<ToDevice>(device_type, send_epoch_end, -1);
  142. ToDevice *consumer_ = consumer.get();
  143. rc = consumer->Init(ds);
  144. if (rc.IsError()) {
  145. MS_LOG(ERROR) << "ToDevice: Failed to init. Error status: " << rc;
  146. return false;
  147. }
  148. runtime_context->AssignConsumer(std::move(consumer));
  149. // Send data to device
  150. rc = consumer_->Send();
  151. if (rc.IsError()) {
  152. MS_LOG(ERROR) << "ToDevice: Failed to send data to device. Error status: " << rc;
  153. return false;
  154. }
  155. return true;
  156. }
  157. #ifndef ENABLE_ANDROID
  158. // Function to create the saver, which will build and launch the execution tree and save data
  159. bool Dataset::Save(std::string dataset_path, int32_t num_files, std::string dataset_type) {
  160. Status rc;
  161. // Build and launch tree
  162. auto ds = shared_from_this();
  163. std::unique_ptr<RuntimeContext> runtime_context = std::make_unique<RuntimeContext>();
  164. rc = runtime_context->Init();
  165. if (rc.IsError()) {
  166. MS_LOG(ERROR) << "CreateSaver failed." << rc;
  167. return false;
  168. }
  169. // Get SaveToDisk consumer
  170. auto consumer = std::make_unique<SaveToDisk>(dataset_path, num_files, dataset_type);
  171. rc = consumer->ValidateParams();
  172. if (rc.IsError()) {
  173. MS_LOG(ERROR) << "CreateSaver failed." << rc;
  174. return false;
  175. }
  176. SaveToDisk *consumer_ = consumer.get();
  177. rc = consumer->Init(ds);
  178. if (rc.IsError()) {
  179. MS_LOG(ERROR) << "CreateSaver failed." << rc;
  180. return false;
  181. }
  182. runtime_context->AssignConsumer(std::move(consumer));
  183. // Save data into file
  184. rc = consumer_->Save();
  185. if (rc.IsError()) {
  186. MS_LOG(ERROR) << "Saver: Failed to save data into file. Error status: " << rc;
  187. return false;
  188. }
  189. // Shut down the data pipeline
  190. rc = runtime_context->Terminate();
  191. if (rc.IsError()) {
  192. MS_LOG(ERROR) << "Saver: Failed to shut down pipeline. Error status: " << rc;
  193. return false;
  194. }
  195. return true;
  196. }
  197. #endif
  198. // Constructor
  199. Dataset::Dataset() {
  200. // Fetch some default value from config manager
  201. std::shared_ptr<ConfigManager> cfg = GlobalContext::config_manager();
  202. num_workers_ = cfg->num_parallel_workers();
  203. rows_per_buffer_ = cfg->rows_per_buffer();
  204. connector_que_size_ = cfg->op_connector_size();
  205. worker_connector_size_ = cfg->worker_connector_size();
  206. tree_getters_ = std::make_shared<TreeGetters>();
  207. }
  208. int64_t Dataset::GetDatasetSize() {
  209. int64_t dataset_size;
  210. auto ds = shared_from_this();
  211. Status rc;
  212. std::unique_ptr<RuntimeContext> runtime_context = std::make_unique<RuntimeContext>();
  213. rc = runtime_context->Init();
  214. if (rc.IsError()) {
  215. MS_LOG(ERROR) << "GetDatasetSize: Initializing RuntimeContext failed.";
  216. return -1;
  217. }
  218. if (!tree_getters_->isInitialized()) {
  219. rc = tree_getters_->Init(ds);
  220. if (rc.IsError()) {
  221. MS_LOG(ERROR) << "GetDatasetSize: Initializing TreeGetters failed.";
  222. return -1;
  223. }
  224. }
  225. rc = tree_getters_->GetDatasetSize(&dataset_size);
  226. return rc.IsError() ? -1 : dataset_size;
  227. }
  228. std::vector<DataType> Dataset::GetOutputTypes() {
  229. std::vector<DataType> types;
  230. Status s;
  231. if (!tree_getters_->isInitialized()) {
  232. s = tree_getters_->Init(shared_from_this());
  233. if (s.IsError()) {
  234. MS_LOG(ERROR) << "GetDatasetSize: Initializing RuntimeContext failed.";
  235. return types;
  236. }
  237. }
  238. tree_getters_->GetOutputTypes(&types);
  239. return types;
  240. }
  241. std::vector<TensorShape> Dataset::GetOutputShapes() {
  242. std::vector<TensorShape> shapes;
  243. Status s;
  244. if (!tree_getters_->isInitialized()) {
  245. s = tree_getters_->Init(shared_from_this());
  246. if (s.IsError()) {
  247. MS_LOG(ERROR) << "GetDatasetSize: Initializing RuntimeContext failed.";
  248. return shapes;
  249. }
  250. }
  251. tree_getters_->GetOutputShapes(&shapes);
  252. return shapes;
  253. }
  254. // Constructor to initialize the cache
  255. Dataset::Dataset(const std::shared_ptr<DatasetCache> &dataset_cache) : Dataset() { cache_ = dataset_cache; }
  256. /// \brief Function to create a SchemaObj
  257. /// \param[in] schema_file Path of schema file
  258. /// \return Shared pointer to the current schema
  259. std::shared_ptr<SchemaObj> Schema(const std::string &schema_file) {
  260. auto schema = std::make_shared<SchemaObj>(schema_file);
  261. return schema->init() ? schema : nullptr;
  262. }
  263. // FUNCTIONS TO CREATE DATASETS FOR LEAF-NODE DATASETS
  264. // (In alphabetical order)
  265. // Function to create a AlbumNode.
  266. std::shared_ptr<AlbumNode> Album(const std::string &dataset_dir, const std::string &data_schema,
  267. const std::vector<std::string> &column_names, bool decode,
  268. const std::shared_ptr<SamplerObj> &sampler) {
  269. auto ds = std::make_shared<AlbumNode>(dataset_dir, data_schema, column_names, decode, sampler);
  270. return ds->ValidateParams() ? ds : nullptr;
  271. }
  272. // Function to create a CelebANode.
  273. std::shared_ptr<CelebANode> CelebA(const std::string &dataset_dir, const std::string &usage,
  274. const std::shared_ptr<SamplerObj> &sampler, bool decode,
  275. const std::set<std::string> &extensions,
  276. const std::shared_ptr<DatasetCache> &cache) {
  277. auto ds = std::make_shared<CelebANode>(dataset_dir, usage, sampler, decode, extensions, cache);
  278. // Call derived class validation method.
  279. return ds->ValidateParams() ? ds : nullptr;
  280. }
  281. // Function to create a Cifar10Node.
  282. std::shared_ptr<Cifar10Node> Cifar10(const std::string &dataset_dir, const std::string &usage,
  283. const std::shared_ptr<SamplerObj> &sampler,
  284. const std::shared_ptr<DatasetCache> &cache) {
  285. auto ds = std::make_shared<Cifar10Node>(dataset_dir, usage, sampler, cache);
  286. // Call derived class validation method.
  287. return ds->ValidateParams() ? ds : nullptr;
  288. }
  289. // Function to create a Cifar100Node.
  290. std::shared_ptr<Cifar100Node> Cifar100(const std::string &dataset_dir, const std::string &usage,
  291. const std::shared_ptr<SamplerObj> &sampler,
  292. const std::shared_ptr<DatasetCache> &cache) {
  293. auto ds = std::make_shared<Cifar100Node>(dataset_dir, usage, sampler, cache);
  294. // Call derived class validation method.
  295. return ds->ValidateParams() ? ds : nullptr;
  296. }
  297. // Function to create a CLUENode.
  298. std::shared_ptr<CLUENode> CLUE(const std::vector<std::string> &clue_files, const std::string &task,
  299. const std::string &usage, int64_t num_samples, ShuffleMode shuffle, int32_t num_shards,
  300. int32_t shard_id, const std::shared_ptr<DatasetCache> &cache) {
  301. auto ds = std::make_shared<CLUENode>(clue_files, task, usage, num_samples, shuffle, num_shards, shard_id, cache);
  302. // Call derived class validation method.
  303. return ds->ValidateParams() ? ds : nullptr;
  304. }
  305. // Function to create a CocoNode.
  306. std::shared_ptr<CocoNode> Coco(const std::string &dataset_dir, const std::string &annotation_file,
  307. const std::string &task, const bool &decode, const std::shared_ptr<SamplerObj> &sampler,
  308. const std::shared_ptr<DatasetCache> &cache) {
  309. auto ds = std::make_shared<CocoNode>(dataset_dir, annotation_file, task, decode, sampler, cache);
  310. // Call derived class validation method.
  311. return ds->ValidateParams() ? ds : nullptr;
  312. }
  313. // Function to create a CSVNode.
  314. std::shared_ptr<CSVNode> CSV(const std::vector<std::string> &dataset_files, char field_delim,
  315. const std::vector<std::shared_ptr<CsvBase>> &column_defaults,
  316. const std::vector<std::string> &column_names, int64_t num_samples, ShuffleMode shuffle,
  317. int32_t num_shards, int32_t shard_id, const std::shared_ptr<DatasetCache> &cache) {
  318. auto ds = std::make_shared<CSVNode>(dataset_files, field_delim, column_defaults, column_names, num_samples, shuffle,
  319. num_shards, shard_id, cache);
  320. // Call derived class validation method.
  321. return ds->ValidateParams() ? ds : nullptr;
  322. }
  323. // Function to create a ImageFolderNode.
  324. std::shared_ptr<ImageFolderNode> ImageFolder(const std::string &dataset_dir, bool decode,
  325. const std::shared_ptr<SamplerObj> &sampler,
  326. const std::set<std::string> &extensions,
  327. const std::map<std::string, int32_t> &class_indexing,
  328. const std::shared_ptr<DatasetCache> &cache) {
  329. // This arg exists in ImageFolderOp, but not externalized (in Python API). The default value is false.
  330. bool recursive = false;
  331. // Create logical representation of ImageFolderNode.
  332. auto ds =
  333. std::make_shared<ImageFolderNode>(dataset_dir, decode, sampler, recursive, extensions, class_indexing, cache);
  334. // Call derived class validation method.
  335. return ds->ValidateParams() ? ds : nullptr;
  336. }
  337. #ifndef ENABLE_ANDROID
  338. // Function to create a ManifestNode.
  339. std::shared_ptr<ManifestNode> Manifest(const std::string &dataset_file, const std::string &usage,
  340. const std::shared_ptr<SamplerObj> &sampler,
  341. const std::map<std::string, int32_t> &class_indexing, bool decode,
  342. const std::shared_ptr<DatasetCache> &cache) {
  343. auto ds = std::make_shared<ManifestNode>(dataset_file, usage, sampler, class_indexing, decode, cache);
  344. // Call derived class validation method.
  345. return ds->ValidateParams() ? ds : nullptr;
  346. }
  347. // Function to create a MindDataNode.
  348. std::shared_ptr<MindDataNode> MindData(const std::string &dataset_file, const std::vector<std::string> &columns_list,
  349. const std::shared_ptr<SamplerObj> &sampler, nlohmann::json padded_sample,
  350. int64_t num_padded) {
  351. auto ds = std::make_shared<MindDataNode>(dataset_file, columns_list, sampler, padded_sample, num_padded);
  352. // Call derived class validation method.
  353. return ds->ValidateParams() ? ds : nullptr;
  354. }
  355. // Function to create a MindDataNode.
  356. std::shared_ptr<MindDataNode> MindData(const std::vector<std::string> &dataset_files,
  357. const std::vector<std::string> &columns_list,
  358. const std::shared_ptr<SamplerObj> &sampler, nlohmann::json padded_sample,
  359. int64_t num_padded) {
  360. auto ds = std::make_shared<MindDataNode>(dataset_files, columns_list, sampler, padded_sample, num_padded);
  361. // Call derived class validation method.
  362. return ds->ValidateParams() ? ds : nullptr;
  363. }
  364. #endif
  365. // Function to create a MnistNode.
  366. std::shared_ptr<MnistNode> Mnist(const std::string &dataset_dir, const std::string &usage,
  367. const std::shared_ptr<SamplerObj> &sampler,
  368. const std::shared_ptr<DatasetCache> &cache) {
  369. auto ds = std::make_shared<MnistNode>(dataset_dir, usage, sampler, cache);
  370. // Call derived class validation method.
  371. return ds->ValidateParams() ? ds : nullptr;
  372. }
  373. // Function to overload "+" operator to concat two datasets
  374. std::shared_ptr<ConcatNode> operator+(const std::shared_ptr<Dataset> &datasets1,
  375. const std::shared_ptr<Dataset> &datasets2) {
  376. std::shared_ptr<ConcatNode> ds = std::make_shared<ConcatNode>(std::vector({datasets2, datasets1}));
  377. // Call derived class validation method.
  378. return ds->ValidateParams() ? ds : nullptr;
  379. }
  380. // Function to create a TextFileNode.
  381. std::shared_ptr<TextFileNode> TextFile(const std::vector<std::string> &dataset_files, int64_t num_samples,
  382. ShuffleMode shuffle, int32_t num_shards, int32_t shard_id,
  383. const std::shared_ptr<DatasetCache> &cache) {
  384. auto ds = std::make_shared<TextFileNode>(dataset_files, num_samples, shuffle, num_shards, shard_id, cache);
  385. // Call derived class validation method.
  386. return ds->ValidateParams() ? ds : nullptr;
  387. }
  388. #ifndef ENABLE_ANDROID
  389. // Function to create a VOCNode.
  390. std::shared_ptr<VOCNode> VOC(const std::string &dataset_dir, const std::string &task, const std::string &usage,
  391. const std::map<std::string, int32_t> &class_indexing, bool decode,
  392. const std::shared_ptr<SamplerObj> &sampler, const std::shared_ptr<DatasetCache> &cache) {
  393. auto ds = std::make_shared<VOCNode>(dataset_dir, task, usage, class_indexing, decode, sampler, cache);
  394. // Call derived class validation method.
  395. return ds->ValidateParams() ? ds : nullptr;
  396. }
  397. #endif
  398. // Function to create a ZipNode.
  399. std::shared_ptr<ZipNode> Zip(const std::vector<std::shared_ptr<Dataset>> &datasets) {
  400. auto ds = std::make_shared<ZipNode>(datasets);
  401. // Call derived class validation method.
  402. return ds->ValidateParams() ? ds : nullptr;
  403. }
  404. // FUNCTIONS TO CREATE DATASETS FOR DATASET OPS
  405. // (In alphabetical order)
  406. // Function to create a Batch dataset
  407. std::shared_ptr<BatchNode> Dataset::Batch(int32_t batch_size, bool drop_remainder) {
  408. // Default values
  409. std::vector<std::string> cols_to_map = {};
  410. std::map<std::string, std::pair<TensorShape, std::shared_ptr<Tensor>>> pad_map;
  411. bool pad = false;
  412. auto ds = std::make_shared<BatchNode>(shared_from_this(), batch_size, drop_remainder, pad, cols_to_map, pad_map);
  413. if (!ds->ValidateParams()) {
  414. return nullptr;
  415. }
  416. return ds;
  417. }
  418. #ifndef ENABLE_ANDROID
  419. // Function to create a BucketBatchByLength dataset
  420. std::shared_ptr<BucketBatchByLengthNode> Dataset::BucketBatchByLength(
  421. const std::vector<std::string> &column_names, const std::vector<int32_t> &bucket_boundaries,
  422. const std::vector<int32_t> &bucket_batch_sizes, std::function<TensorRow(TensorRow)> element_length_function,
  423. const std::map<std::string, std::pair<TensorShape, std::shared_ptr<Tensor>>> &pad_info, bool pad_to_bucket_boundary,
  424. bool drop_remainder) {
  425. auto ds = std::make_shared<BucketBatchByLengthNode>(shared_from_this(), column_names, bucket_boundaries,
  426. bucket_batch_sizes, element_length_function, pad_info,
  427. pad_to_bucket_boundary, drop_remainder);
  428. if (!ds->ValidateParams()) {
  429. return nullptr;
  430. }
  431. return ds;
  432. }
  433. // Function to create a Vocab from dataset
  434. std::shared_ptr<Vocab> Dataset::BuildVocab(const std::vector<std::string> &columns,
  435. const std::pair<int64_t, int64_t> &freq_range, int64_t top_k,
  436. const std::vector<std::string> &special_tokens, bool special_first) {
  437. auto vocab = std::make_shared<Vocab>();
  438. auto ds = std::make_shared<BuildVocabNode>(shared_from_this(), vocab, columns, freq_range, top_k, special_tokens,
  439. special_first);
  440. if (!ds->ValidateParams()) {
  441. return nullptr;
  442. }
  443. // Run tree here to starting building vocab
  444. std::shared_ptr<Iterator> iter = ds->CreateIterator();
  445. if (iter == nullptr) {
  446. MS_LOG(ERROR) << "Fail to run iterator in BuildVocab.";
  447. return nullptr;
  448. }
  449. // Finish building vocab by triggering GetNextRow
  450. std::unordered_map<std::string, std::shared_ptr<Tensor>> row;
  451. if (!iter->GetNextRow(&row)) {
  452. return nullptr;
  453. }
  454. return vocab;
  455. }
  456. #endif
  457. // Function to create a Concat dataset
  458. std::shared_ptr<ConcatNode> Dataset::Concat(const std::vector<std::shared_ptr<Dataset>> &datasets) {
  459. auto ds = std::make_shared<ConcatNode>(datasets);
  460. ds->children.push_back(shared_from_this());
  461. return ds->ValidateParams() ? ds : nullptr;
  462. }
  463. // Function to create a Map dataset.
  464. std::shared_ptr<MapNode> Dataset::Map(std::vector<std::shared_ptr<TensorOperation>> operations,
  465. std::vector<std::string> input_columns, std::vector<std::string> output_columns,
  466. const std::vector<std::string> &project_columns,
  467. const std::shared_ptr<DatasetCache> &cache) {
  468. auto ds =
  469. std::make_shared<MapNode>(shared_from_this(), operations, input_columns, output_columns, project_columns, cache);
  470. if (!ds->ValidateParams()) {
  471. return nullptr;
  472. }
  473. return ds;
  474. }
  475. // Function to create a ProjectNode.
  476. std::shared_ptr<ProjectNode> Dataset::Project(const std::vector<std::string> &columns) {
  477. auto ds = std::make_shared<ProjectNode>(shared_from_this(), columns);
  478. // Call derived class validation method.
  479. if (!ds->ValidateParams()) {
  480. return nullptr;
  481. }
  482. return ds;
  483. }
  484. // Function to create a RenameNode.
  485. std::shared_ptr<RenameNode> Dataset::Rename(const std::vector<std::string> &input_columns,
  486. const std::vector<std::string> &output_columns) {
  487. auto ds = std::make_shared<RenameNode>(shared_from_this(), input_columns, output_columns);
  488. // Call derived class validation method.
  489. if (!ds->ValidateParams()) {
  490. return nullptr;
  491. }
  492. return ds;
  493. }
  494. // Function to create Repeat dataset.
  495. std::shared_ptr<Dataset> Dataset::Repeat(int32_t count) {
  496. // Workaround for repeat == 1, do not inject repeat.
  497. if (count == 1) {
  498. return shared_from_this();
  499. }
  500. auto ds = std::make_shared<RepeatNode>(shared_from_this(), count);
  501. if (!ds->ValidateParams()) {
  502. return nullptr;
  503. }
  504. return ds;
  505. }
  506. // Function to create a ShuffleOp
  507. std::shared_ptr<ShuffleNode> Dataset::Shuffle(int32_t buffer_size) {
  508. // Pass in reshuffle_each_epoch with true
  509. auto ds = std::make_shared<ShuffleNode>(shared_from_this(), buffer_size, true);
  510. if (!ds->ValidateParams()) {
  511. return nullptr;
  512. }
  513. return ds;
  514. }
  515. // Function to create a SkipNode.
  516. std::shared_ptr<SkipNode> Dataset::Skip(int32_t count) {
  517. auto ds = std::make_shared<SkipNode>(shared_from_this(), count);
  518. // Call derived class validation method.
  519. if (!ds->ValidateParams()) {
  520. return nullptr;
  521. }
  522. return ds;
  523. }
  524. // Function to create a TakeNode.
  525. std::shared_ptr<Dataset> Dataset::Take(int32_t count) {
  526. // If count is greater than the number of element in dataset or equal to -1,
  527. // all the element in dataset will be taken
  528. if (count == -1) {
  529. return shared_from_this();
  530. }
  531. auto ds = std::make_shared<TakeNode>(shared_from_this(), count);
  532. // Call derived class validation method.
  533. if (!ds->ValidateParams()) {
  534. return nullptr;
  535. }
  536. return ds;
  537. }
  538. // Function to create a Zip dataset
  539. std::shared_ptr<ZipNode> Dataset::Zip(const std::vector<std::shared_ptr<Dataset>> &datasets) {
  540. // Default values
  541. auto ds = std::make_shared<ZipNode>(datasets);
  542. ds->children.push_back(shared_from_this());
  543. return ds->ValidateParams() ? ds : nullptr;
  544. }
  545. Status Dataset::AddCacheOp(std::vector<std::shared_ptr<DatasetOp>> *node_ops) {
  546. if (cache_ != nullptr) {
  547. std::shared_ptr<DatasetOp> cache_op;
  548. RETURN_IF_NOT_OK(cache_->CreateCacheOp(num_workers_, &cache_op));
  549. node_ops->push_back(cache_op);
  550. }
  551. return Status::OK();
  552. }
  553. int64_t Dataset::GetBatchSize() {
  554. int64_t batch_size;
  555. auto ds = shared_from_this();
  556. Status rc;
  557. std::unique_ptr<RuntimeContext> runtime_context = std::make_unique<RuntimeContext>();
  558. rc = runtime_context->Init();
  559. if (rc.IsError()) {
  560. MS_LOG(ERROR) << "GetBatchSize: Initializing RuntimeContext failed.";
  561. return -1;
  562. }
  563. rc = tree_getters_->Init(ds);
  564. if (rc.IsError()) {
  565. MS_LOG(ERROR) << "GetBatchSize: Initializing TreeGetters failed.";
  566. return -1;
  567. }
  568. rc = tree_getters_->GetBatchSize(&batch_size);
  569. return rc.IsError() ? -1 : batch_size;
  570. }
  571. int64_t Dataset::GetRepeatCount() {
  572. int64_t repeat_count;
  573. auto ds = shared_from_this();
  574. Status rc;
  575. std::unique_ptr<RuntimeContext> runtime_context = std::make_unique<RuntimeContext>();
  576. rc = runtime_context->Init();
  577. if (rc.IsError()) {
  578. MS_LOG(ERROR) << "GetRepeatCount: Initializing RuntimeContext failed.";
  579. return -1;
  580. }
  581. rc = tree_getters_->Init(ds);
  582. if (rc.IsError()) {
  583. MS_LOG(ERROR) << "GetRepeatCount: Initializing TreeGetters failed.";
  584. return -1;
  585. }
  586. rc = tree_getters_->GetRepeatCount(&repeat_count);
  587. return rc.IsError() ? 0 : repeat_count;
  588. }
  589. SchemaObj::SchemaObj(const std::string &schema_file) : schema_file_(schema_file), num_rows_(0), dataset_type_("") {}
  590. // SchemaObj init function
  591. bool SchemaObj::init() {
  592. if (schema_file_ != "") {
  593. Path schema_file(schema_file_);
  594. if (!schema_file.Exists()) {
  595. MS_LOG(ERROR) << "The file " << schema_file << " does not exist or permission denied!";
  596. return false;
  597. }
  598. nlohmann::json js;
  599. try {
  600. std::ifstream in(schema_file_);
  601. in >> js;
  602. if (js.find("columns") == js.end()) {
  603. MS_LOG(ERROR) << "\"columns\" node is required in the schema json file.";
  604. return false;
  605. }
  606. } catch (const std::exception &err) {
  607. MS_LOG(ERROR) << "Schema file failed to load";
  608. return false;
  609. }
  610. return from_json(js);
  611. }
  612. return true;
  613. }
  614. // Function to add a column to schema with a mstype de_type
  615. bool SchemaObj::add_column(std::string name, TypeId de_type, std::vector<int32_t> shape) {
  616. nlohmann::json new_column;
  617. new_column["name"] = name;
  618. // if de_type is mstype
  619. DataType data_type = dataset::MSTypeToDEType(de_type);
  620. new_column["type"] = data_type.ToString();
  621. if (shape.size() > 0) {
  622. new_column["shape"] = shape;
  623. new_column["rank"] = shape.size();
  624. } else {
  625. new_column["rank"] = 1;
  626. }
  627. columns_.push_back(new_column);
  628. return true;
  629. }
  630. // Function to add a column to schema with a string de_type
  631. bool SchemaObj::add_column(std::string name, std::string de_type, std::vector<int32_t> shape) {
  632. nlohmann::json new_column;
  633. new_column["name"] = name;
  634. DataType data_type(de_type);
  635. new_column["type"] = data_type.ToString();
  636. if (shape.size() > 0) {
  637. new_column["shape"] = shape;
  638. new_column["rank"] = shape.size();
  639. } else {
  640. new_column["rank"] = 1;
  641. }
  642. columns_.push_back(new_column);
  643. return true;
  644. }
  645. std::string SchemaObj::to_json() {
  646. nlohmann::json json_file;
  647. json_file["columns"] = columns_;
  648. if (dataset_type_ != "") {
  649. json_file["datasetType"] = dataset_type_;
  650. }
  651. if (num_rows_ > 0) {
  652. json_file["numRows"] = num_rows_;
  653. }
  654. return json_file.dump(2);
  655. }
  656. bool SchemaObj::parse_column(nlohmann::json columns) {
  657. std::string name, de_type;
  658. std::vector<int32_t> shape;
  659. columns_.clear();
  660. if (columns.type() == nlohmann::json::value_t::array) {
  661. // reference to python list
  662. for (auto column : columns) {
  663. auto key_name = column.find("name");
  664. if (key_name == column.end()) {
  665. MS_LOG(ERROR) << "Column's name is missing";
  666. return false;
  667. }
  668. name = *key_name;
  669. auto key_type = column.find("type");
  670. if (key_type == column.end()) {
  671. MS_LOG(ERROR) << "Column's type is missing";
  672. return false;
  673. }
  674. de_type = *key_type;
  675. shape.clear();
  676. auto key_shape = column.find("shape");
  677. if (key_shape != column.end()) {
  678. shape.insert(shape.end(), (*key_shape).begin(), (*key_shape).end());
  679. }
  680. if (!add_column(name, de_type, shape)) {
  681. return false;
  682. }
  683. }
  684. } else if (columns.type() == nlohmann::json::value_t::object) {
  685. for (const auto &it_child : columns.items()) {
  686. name = it_child.key();
  687. auto key_type = it_child.value().find("type");
  688. if (key_type == it_child.value().end()) {
  689. MS_LOG(ERROR) << "Column's type is missing";
  690. return false;
  691. }
  692. de_type = *key_type;
  693. shape.clear();
  694. auto key_shape = it_child.value().find("shape");
  695. if (key_shape != it_child.value().end()) {
  696. shape.insert(shape.end(), (*key_shape).begin(), (*key_shape).end());
  697. }
  698. if (!add_column(name, de_type, shape)) {
  699. return false;
  700. }
  701. }
  702. } else {
  703. MS_LOG(ERROR) << "columns must be dict or list, columns contain name, type, shape(optional).";
  704. return false;
  705. }
  706. return true;
  707. }
  708. bool SchemaObj::from_json(nlohmann::json json_obj) {
  709. for (const auto &it_child : json_obj.items()) {
  710. if (it_child.key() == "datasetType") {
  711. dataset_type_ = it_child.value();
  712. } else if (it_child.key() == "numRows") {
  713. num_rows_ = it_child.value();
  714. } else if (it_child.key() == "columns") {
  715. if (!parse_column(it_child.value())) {
  716. MS_LOG(ERROR) << "parse columns failed";
  717. return false;
  718. }
  719. } else {
  720. MS_LOG(ERROR) << "Unknown field " << it_child.key();
  721. return false;
  722. }
  723. }
  724. if (columns_.empty()) {
  725. MS_LOG(ERROR) << "Columns are missing.";
  726. return false;
  727. }
  728. if (num_rows_ <= 0) {
  729. MS_LOG(ERROR) << "numRows must be greater than 0";
  730. return false;
  731. }
  732. return true;
  733. }
  734. // OTHER FUNCTIONS
  735. // Helper function to compute a default shuffle size
  736. Status ComputeShuffleSize(int64_t num_files, int64_t num_devices, int64_t num_rows, int64_t total_rows,
  737. int64_t *shuffle_size) {
  738. const int64_t average_files_multiplier = 4;
  739. const int64_t shuffle_max = 10000;
  740. int64_t avg_rows_per_file = 0;
  741. // Adjust the num rows per shard if sharding was given
  742. if (num_devices > 0) {
  743. if (num_rows % num_devices == 0) {
  744. num_rows = num_rows / num_devices;
  745. } else {
  746. num_rows = (num_rows / num_devices) + 1;
  747. }
  748. }
  749. // Cap based on total rows directive. Some ops do not have this and give value of 0.
  750. if (total_rows > 0) {
  751. num_rows = std::min(num_rows, total_rows);
  752. }
  753. // get the average per file
  754. CHECK_FAIL_RETURN_UNEXPECTED(num_files != 0, "The size of dataset_files must greater than 0.");
  755. avg_rows_per_file = num_rows / num_files;
  756. *shuffle_size = std::max(avg_rows_per_file * average_files_multiplier, shuffle_max);
  757. return Status::OK();
  758. }
  759. // Helper function to inject a shuffle operator over top of current operator being built
  760. Status AddShuffleOp(int64_t num_files, int64_t num_devices, int64_t num_rows, int64_t total_rows,
  761. int32_t connector_que_size, int32_t rows_per_buffer, std::shared_ptr<DatasetOp> *shuffle_op) {
  762. std::shared_ptr<ShuffleOp> new_shuffle_op = nullptr;
  763. int64_t shuffle_size = 0;
  764. RETURN_EMPTY_IF_ERROR(ComputeShuffleSize(num_files, num_devices, num_rows, total_rows, &shuffle_size));
  765. MS_LOG(INFO) << "Dataset::AddShuffleOp - num_rows: " << num_rows << ", shuffle_size: " << shuffle_size;
  766. // Add the shuffle op
  767. *shuffle_op = std::make_shared<ShuffleOp>(shuffle_size, GetSeed(), connector_que_size, true, rows_per_buffer);
  768. return Status::OK();
  769. }
  770. // Helper function to validate dataset directory parameter
  771. Status ValidateDatasetDirParam(const std::string &dataset_name, std::string dataset_dir) {
  772. if (dataset_dir.empty()) {
  773. std::string err_msg = dataset_name + ": dataset_dir is not specified.";
  774. MS_LOG(ERROR) << err_msg;
  775. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  776. }
  777. Path dir(dataset_dir);
  778. if (!dir.IsDirectory()) {
  779. std::string err_msg = dataset_name + ": dataset_dir: [" + dataset_dir + "] is an invalid directory path.";
  780. MS_LOG(ERROR) << err_msg;
  781. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  782. }
  783. if (access(dataset_dir.c_str(), R_OK) == -1) {
  784. std::string err_msg = dataset_name + ": No access to specified dataset path: " + dataset_dir;
  785. MS_LOG(ERROR) << err_msg;
  786. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  787. }
  788. return Status::OK();
  789. }
  790. // Helper function to validate dataset files parameter
  791. Status ValidateDatasetFilesParam(const std::string &dataset_name, const std::vector<std::string> &dataset_files) {
  792. if (dataset_files.empty()) {
  793. std::string err_msg = dataset_name + ": dataset_files is not specified.";
  794. MS_LOG(ERROR) << err_msg;
  795. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  796. }
  797. for (auto f : dataset_files) {
  798. Path dataset_file(f);
  799. if (!dataset_file.Exists()) {
  800. std::string err_msg = dataset_name + ": dataset file: [" + f + "] is invalid or does not exist.";
  801. MS_LOG(ERROR) << err_msg;
  802. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  803. }
  804. if (access(dataset_file.toString().c_str(), R_OK) == -1) {
  805. std::string err_msg = dataset_name + ": No access to specified dataset file: " + f;
  806. MS_LOG(ERROR) << err_msg;
  807. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  808. }
  809. }
  810. return Status::OK();
  811. }
  812. // Helper function to validate dataset num_shards and shard_id parameters
  813. Status ValidateDatasetShardParams(const std::string &dataset_name, int32_t num_shards, int32_t shard_id) {
  814. if (num_shards <= 0) {
  815. std::string err_msg = dataset_name + ": Invalid num_shards: " + std::to_string(num_shards);
  816. MS_LOG(ERROR) << err_msg;
  817. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  818. }
  819. if (shard_id < 0 || shard_id >= num_shards) {
  820. // num_shards;
  821. std::string err_msg = dataset_name + ": Invalid input, shard_id: " + std::to_string(shard_id) +
  822. ", num_shards: " + std::to_string(num_shards);
  823. MS_LOG(ERROR) << err_msg;
  824. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  825. }
  826. return Status::OK();
  827. }
  828. // Helper function to validate dataset sampler parameter
  829. Status ValidateDatasetSampler(const std::string &dataset_name, const std::shared_ptr<SamplerObj> &sampler) {
  830. if (sampler == nullptr) {
  831. std::string err_msg = dataset_name + ": Sampler is not constructed correctly, sampler: nullptr";
  832. MS_LOG(ERROR) << err_msg;
  833. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  834. }
  835. return Status::OK();
  836. }
  837. Status ValidateStringValue(const std::string &dataset_name, const std::string &str,
  838. const std::unordered_set<std::string> &valid_strings) {
  839. if (valid_strings.find(str) == valid_strings.end()) {
  840. std::string mode;
  841. mode = std::accumulate(valid_strings.begin(), valid_strings.end(), mode,
  842. [](std::string a, std::string b) { return std::move(a) + " " + std::move(b); });
  843. std::string err_msg = dataset_name + ": " + str + " does not match any mode in [" + mode + " ]";
  844. MS_LOG(ERROR) << err_msg;
  845. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  846. }
  847. return Status::OK();
  848. }
  849. // Helper function to validate dataset input/output column parameter
  850. Status ValidateDatasetColumnParam(const std::string &dataset_name, const std::string &column_param,
  851. const std::vector<std::string> &columns) {
  852. if (columns.empty()) {
  853. std::string err_msg = dataset_name + ":" + column_param + " should not be empty string";
  854. MS_LOG(ERROR) << err_msg;
  855. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  856. }
  857. for (uint32_t i = 0; i < columns.size(); ++i) {
  858. if (columns[i].empty()) {
  859. std::string err_msg = dataset_name + ":" + column_param + "[" + std::to_string(i) + "] must not be empty";
  860. MS_LOG(ERROR) << err_msg;
  861. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  862. }
  863. }
  864. std::set<std::string> columns_set(columns.begin(), columns.end());
  865. if (columns_set.size() != columns.size()) {
  866. std::string err_msg = dataset_name + ":" + column_param + ": Every column name should not be same with others";
  867. MS_LOG(ERROR) << err_msg;
  868. RETURN_STATUS_SYNTAX_ERROR(err_msg);
  869. }
  870. return Status::OK();
  871. }
  872. #ifndef ENABLE_ANDROID
  873. std::shared_ptr<DatasetCache> CreateDatasetCache(session_id_type id, uint64_t mem_sz, bool spill,
  874. std::optional<std::string> hostname, std::optional<int32_t> port,
  875. std::optional<int32_t> num_connections,
  876. std::optional<int32_t> prefetch_sz) {
  877. auto cache = std::make_shared<DatasetCacheImpl>(id, mem_sz, spill, hostname, port, num_connections, prefetch_sz);
  878. return cache->ValidateParams() ? cache : nullptr;
  879. }
  880. #endif
  881. } // namespace api
  882. } // namespace dataset
  883. } // namespace mindspore