You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

cache_op_test.cc 19 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <string>
  17. #include "minddata/dataset/core/client.h"
  18. #include "minddata/dataset/engine/cache/cache_client.h"
  19. #include "minddata/dataset/engine/execution_tree.h"
  20. #include "minddata/dataset/engine/datasetops/cache_op.h"
  21. #include "minddata/dataset/engine/datasetops/cache_lookup_op.h"
  22. #include "minddata/dataset/engine/datasetops/cache_merge_op.h"
  23. #include "minddata/dataset/engine/datasetops/source/image_folder_op.h"
  24. #include "common/common.h"
  25. #include "gtest/gtest.h"
  26. #include "utils/log_adapter.h"
  27. #include "minddata/dataset/util/storage_container.h" // lint !e322
  28. #include "minddata/dataset/engine/datasetops/source/random_data_op.h"
  29. #include "minddata/dataset/engine/data_schema.h"
  30. using namespace mindspore::dataset;
  31. using mindspore::LogStream;
  32. using mindspore::dataset::CacheClient;
  33. using mindspore::dataset::TaskGroup;
  34. using mindspore::ExceptionType::NoExceptionType;
  35. using mindspore::MsLogLevel::INFO;
  36. class MindDataTestCacheOp : public UT::DatasetOpTesting {
  37. public:
  38. void SetUp() override {
  39. DatasetOpTesting::SetUp();
  40. GlobalInit();
  41. }
  42. };
  43. TEST_F(MindDataTestCacheOp, TestCacheServer) {
  44. Status rc;
  45. CacheClient myClient(1, 0, true); // use arbitrary session of 1, size of 0, spilling is true
  46. // cksum value of 1 for CreateCache here...normally you do not directly create a cache and the cksum arg is generated.
  47. rc = myClient.CreateCache(1, true);
  48. EXPECT_TRUE(rc.IsOk());
  49. std::cout << myClient << std::endl;
  50. // Create a schema using the C api's
  51. int32_t rank = 0; // not used
  52. std::unique_ptr<DataSchema> testSchema = std::make_unique<DataSchema>();
  53. // 2 columns. First column is an "image" 640,480,3
  54. TensorShape c1Shape({640, 480, 3});
  55. ColDescriptor c1("image", DataType(DataType::DE_INT8), TensorImpl::kFlexible,
  56. rank, // not used
  57. &c1Shape);
  58. // Column 2 will just be a scalar label number
  59. TensorShape c2Shape({}); // empty shape is a 1-value scalar Tensor
  60. ColDescriptor c2("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, rank, &c2Shape);
  61. testSchema->AddColumn(c1);
  62. testSchema->AddColumn(c2);
  63. std::unordered_map<std::string, int32_t> map;
  64. rc = testSchema->GetColumnNameMap(&map);
  65. EXPECT_TRUE(rc.IsOk());
  66. // Test the CacheSchema api
  67. rc = myClient.CacheSchema(map);
  68. EXPECT_TRUE(rc.IsOk());
  69. // Create a tensor, take a snapshot and restore it back, and compare.
  70. std::shared_ptr<Tensor> t;
  71. Tensor::CreateEmpty(TensorShape({2, 3}), DataType(DataType::DE_UINT64), &t);
  72. t->SetItemAt<uint64_t>({0, 0}, 1);
  73. t->SetItemAt<uint64_t>({0, 1}, 2);
  74. t->SetItemAt<uint64_t>({0, 2}, 3);
  75. t->SetItemAt<uint64_t>({1, 0}, 4);
  76. t->SetItemAt<uint64_t>({1, 1}, 5);
  77. t->SetItemAt<uint64_t>({1, 2}, 6);
  78. std::cout << *t << std::endl;
  79. TensorTable tbl;
  80. TensorRow row;
  81. row.push_back(t);
  82. int64_t row_id;
  83. rc = myClient.WriteRow(row, &row_id);
  84. EXPECT_TRUE(rc.IsOk());
  85. // Switch off build phase.
  86. rc = myClient.BuildPhaseDone();
  87. EXPECT_TRUE(rc.IsOk());
  88. // Now restore from cache.
  89. row.clear();
  90. rc = myClient.GetRows({row_id}, &tbl);
  91. row = tbl.front();
  92. EXPECT_TRUE(rc.IsOk());
  93. auto r = row.front();
  94. std::cout << *r << std::endl;
  95. // Compare
  96. bool cmp = (*t == *r);
  97. EXPECT_TRUE(cmp);
  98. // Get back the schema and verify
  99. std::unordered_map<std::string, int32_t> map_out;
  100. rc = myClient.FetchSchema(&map_out);
  101. EXPECT_TRUE(rc.IsOk());
  102. cmp = (map_out == map);
  103. EXPECT_TRUE(cmp);
  104. // Test Purge and Destroy
  105. rc = myClient.PurgeCache();
  106. EXPECT_TRUE(rc.IsOk());
  107. rc = myClient.DestroyCache();
  108. EXPECT_TRUE(rc.IsOk());
  109. }
  110. TEST_F(MindDataTestCacheOp, TestConcurrencyRequest) {
  111. // Clear the rc of the master thread if any
  112. (void)TaskManager::GetMasterThreadRc();
  113. TaskGroup vg;
  114. Status rc;
  115. CacheClient myClient(1, 1, true); // use arbitrary session of 1, size 1, spilling is true
  116. // cksum value of 1 for CreateCache here...normally you do not directly create a cache and the cksum arg is generated.
  117. rc = myClient.CreateCache(1, true);
  118. EXPECT_TRUE(rc.IsOk());
  119. std::cout << myClient << std::endl;
  120. std::shared_ptr<Tensor> t;
  121. Tensor::CreateEmpty(TensorShape({2, 3}), DataType(DataType::DE_UINT64), &t);
  122. t->SetItemAt<uint64_t>({0, 0}, 1);
  123. t->SetItemAt<uint64_t>({0, 1}, 2);
  124. t->SetItemAt<uint64_t>({0, 2}, 3);
  125. t->SetItemAt<uint64_t>({1, 0}, 4);
  126. t->SetItemAt<uint64_t>({1, 1}, 5);
  127. t->SetItemAt<uint64_t>({1, 2}, 6);
  128. TensorTable tbl;
  129. TensorRow row;
  130. row.push_back(t);
  131. // Cache tensor row t 5000 times using 10 threads.
  132. for (auto k = 0; k < 10; ++k) {
  133. Status vg_rc = vg.CreateAsyncTask("Test agent", [&myClient, &row]() -> Status {
  134. TaskManager::FindMe()->Post();
  135. for (auto i = 0; i < 500; i++) {
  136. RETURN_IF_NOT_OK(myClient.WriteRow(row));
  137. }
  138. return Status::OK();
  139. });
  140. EXPECT_TRUE(vg_rc.IsOk());
  141. }
  142. ASSERT_TRUE(vg.join_all().IsOk());
  143. ASSERT_TRUE(vg.GetTaskErrorIfAny().IsOk());
  144. rc = myClient.BuildPhaseDone();
  145. ASSERT_TRUE(rc.IsOk());
  146. // Get statistics from the server.
  147. CacheClient::ServiceStat stat{};
  148. rc = myClient.GetStat(&stat);
  149. ASSERT_TRUE(rc.IsOk());
  150. std::cout << stat.min_row_id << ":" << stat.max_row_id << ":" << stat.num_mem_cached << ":" << stat.num_disk_cached
  151. << "\n";
  152. // Expect there are 5000 rows there.
  153. EXPECT_EQ(5000, stat.max_row_id - stat.min_row_id + 1);
  154. // Get them all back using row id and compare with tensor t.
  155. for (auto i = stat.min_row_id; i <= stat.max_row_id; ++i) {
  156. tbl.clear();
  157. row.clear();
  158. rc = myClient.GetRows({i}, &tbl);
  159. EXPECT_TRUE(rc.IsOk());
  160. row = tbl.front();
  161. auto r = row.front();
  162. bool cmp = (*t == *r);
  163. EXPECT_TRUE(cmp);
  164. }
  165. rc = myClient.DestroyCache();
  166. EXPECT_TRUE(rc.IsOk());
  167. }
  168. // Simple test with a repeated cache op over random data producer
  169. //
  170. // RepeatOp
  171. // |
  172. // CacheOp
  173. // |
  174. // RandomDataOp
  175. //
  176. TEST_F(MindDataTestCacheOp, TestRandomDataCache1) {
  177. Status rc;
  178. int32_t rank = 0; // not used
  179. MS_LOG(INFO) << "UT test TestRandomDataCache1";
  180. // Start with an empty execution tree
  181. auto myTree = std::make_shared<ExecutionTree>();
  182. // Create a schema using the C api's
  183. std::unique_ptr<DataSchema> testSchema = std::make_unique<DataSchema>();
  184. // 2 columns. First column is an "image" 640,480,3
  185. TensorShape c1Shape({640, 480, 3});
  186. ColDescriptor c1("image", DataType(DataType::DE_INT8), TensorImpl::kFlexible,
  187. rank, // not used
  188. &c1Shape);
  189. // Column 2 will just be a scalar label number
  190. TensorShape c2Shape({}); // empty shape is a 1-value scalar Tensor
  191. ColDescriptor c2("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, rank, &c2Shape);
  192. testSchema->AddColumn(c1);
  193. testSchema->AddColumn(c2);
  194. // RandomDataOp
  195. std::shared_ptr<RandomDataOp> myRandomDataOp;
  196. rc = RandomDataOp::Builder()
  197. .SetRowsPerBuffer(4)
  198. .SetNumWorkers(4)
  199. .SetDataSchema(std::move(testSchema))
  200. .SetTotalRows(50) // 50 samples for now
  201. .Build(&myRandomDataOp);
  202. EXPECT_TRUE(rc.IsOk());
  203. rc = myTree->AssociateNode(myRandomDataOp);
  204. EXPECT_TRUE(rc.IsOk());
  205. // CacheOp
  206. // size of 0, spilling is true
  207. std::shared_ptr<CacheClient> myClient = std::make_shared<CacheClient>(1, 0, true);
  208. std::shared_ptr<CacheOp> myCacheOp;
  209. int64_t num_samples = 0;
  210. int64_t start_index = 0;
  211. auto seq_sampler = std::make_shared<SequentialSampler>(num_samples, start_index);
  212. rc = CacheOp::Builder()
  213. .SetNumWorkers(5)
  214. .SetClient(myClient)
  215. .SetRowsPerBuffer(4)
  216. .SetSampler(std::move(seq_sampler))
  217. .Build(&myCacheOp);
  218. EXPECT_TRUE(rc.IsOk());
  219. rc = myTree->AssociateNode(myCacheOp);
  220. EXPECT_TRUE(rc.IsOk());
  221. // RepeatOp
  222. uint32_t numRepeats = 4;
  223. std::shared_ptr<RepeatOp> myRepeatOp;
  224. rc = RepeatOp::Builder(numRepeats).Build(&myRepeatOp);
  225. EXPECT_TRUE(rc.IsOk());
  226. rc = myTree->AssociateNode(myRepeatOp);
  227. EXPECT_TRUE(rc.IsOk());
  228. // Assign tree relations and root
  229. rc = myRepeatOp->AddChild(myCacheOp);
  230. EXPECT_TRUE(rc.IsOk());
  231. rc = myCacheOp->AddChild(myRandomDataOp);
  232. EXPECT_TRUE(rc.IsOk());
  233. rc = myTree->AssignRoot(myRepeatOp);
  234. EXPECT_TRUE(rc.IsOk());
  235. MS_LOG(INFO) << "Launching tree and begin iteration";
  236. rc = myTree->Prepare();
  237. EXPECT_TRUE(rc.IsOk());
  238. // quick check to see what tree looks like
  239. std::ostringstream ss;
  240. ss << *myTree; // some funny const error if I try to write directly to ms log stream
  241. MS_LOG(INFO) << "Here's the tree:\n" << ss.str();
  242. std::cout << *myClient << std::endl;
  243. rc = myTree->Launch();
  244. EXPECT_TRUE(rc.IsOk());
  245. // Start the loop of reading tensors from our pipeline
  246. DatasetIterator dI(myTree);
  247. TensorRow tensorList;
  248. rc = dI.FetchNextTensorRow(&tensorList);
  249. EXPECT_TRUE(rc.IsOk());
  250. int rowCount = 0;
  251. while (!tensorList.empty()) {
  252. // Don't display these rows, just count them
  253. MS_LOG(INFO) << "Row fetched #: " << rowCount;
  254. rc = dI.FetchNextTensorRow(&tensorList);
  255. EXPECT_TRUE(rc.IsOk());
  256. rowCount++;
  257. }
  258. ASSERT_EQ(rowCount, 200);
  259. rc = myClient->DestroyCache();
  260. EXPECT_TRUE(rc.IsOk());
  261. }
  262. //// Simple test with a repeated cache op over random data producer.
  263. //// This one will exceed memory and require a spill.
  264. ////
  265. //// RepeatOp
  266. //// |
  267. //// CacheOp
  268. //// |
  269. //// RandomDataOp
  270. ////
  271. TEST_F(MindDataTestCacheOp, TestRandomDataCacheSpill) {
  272. Status rc;
  273. int32_t rank = 0; // not used
  274. MS_LOG(INFO) << "UT test TestRandomDataCacheSpill";
  275. // Start with an empty execution tree
  276. auto myTree = std::make_shared<ExecutionTree>();
  277. // Create a schema using the C api's
  278. std::unique_ptr<DataSchema> testSchema = std::make_unique<DataSchema>();
  279. // 2 columns. First column is an "image" 640,480,3
  280. TensorShape c1Shape({640, 480, 3});
  281. ColDescriptor c1("image", DataType(DataType::DE_INT8), TensorImpl::kFlexible,
  282. rank, // not used
  283. &c1Shape);
  284. // Column 2 will just be a scalar label number
  285. TensorShape c2Shape({}); // empty shape is a 1-value scalar Tensor
  286. ColDescriptor c2("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, rank, &c2Shape);
  287. testSchema->AddColumn(c1);
  288. testSchema->AddColumn(c2);
  289. // RandomDataOp
  290. std::shared_ptr<RandomDataOp> myRandomDataOp;
  291. rc = RandomDataOp::Builder()
  292. .SetRowsPerBuffer(2)
  293. .SetNumWorkers(4)
  294. .SetDataSchema(std::move(testSchema))
  295. .SetTotalRows(10)
  296. .Build(&myRandomDataOp);
  297. EXPECT_TRUE(rc.IsOk());
  298. rc = myTree->AssociateNode(myRandomDataOp);
  299. EXPECT_TRUE(rc.IsOk());
  300. // CacheOp
  301. int64_t num_samples = 0;
  302. int64_t start_index = 0;
  303. auto seq_sampler = std::make_shared<SequentialSampler>(num_samples, start_index);
  304. std::shared_ptr<CacheClient> myClient = std::make_shared<CacheClient>(1, 4, true);
  305. std::shared_ptr<CacheOp> myCacheOp;
  306. rc = CacheOp::Builder()
  307. .SetNumWorkers(4)
  308. .SetClient(myClient)
  309. .SetRowsPerBuffer(3)
  310. .SetSampler(std::move(seq_sampler))
  311. .Build(&myCacheOp);
  312. EXPECT_TRUE(rc.IsOk());
  313. rc = myTree->AssociateNode(myCacheOp);
  314. EXPECT_TRUE(rc.IsOk());
  315. // RepeatOp
  316. uint32_t numRepeats = 4;
  317. std::shared_ptr<RepeatOp> myRepeatOp;
  318. rc = RepeatOp::Builder(numRepeats).Build(&myRepeatOp);
  319. EXPECT_TRUE(rc.IsOk());
  320. rc = myTree->AssociateNode(myRepeatOp);
  321. EXPECT_TRUE(rc.IsOk());
  322. // Assign tree relations and root
  323. rc = myRepeatOp->AddChild(myCacheOp);
  324. EXPECT_TRUE(rc.IsOk());
  325. rc = myCacheOp->AddChild(myRandomDataOp);
  326. EXPECT_TRUE(rc.IsOk());
  327. rc = myTree->AssignRoot(myRepeatOp);
  328. EXPECT_TRUE(rc.IsOk());
  329. MS_LOG(INFO) << "Launching tree and begin iteration";
  330. rc = myTree->Prepare();
  331. EXPECT_TRUE(rc.IsOk());
  332. std::cout << *myClient << std::endl;
  333. rc = myTree->Launch();
  334. EXPECT_TRUE(rc.IsOk());
  335. // Start the loop of reading tensors from our pipeline
  336. DatasetIterator dI(myTree);
  337. TensorRow tensorList;
  338. rc = dI.FetchNextTensorRow(&tensorList);
  339. EXPECT_TRUE(rc.IsOk());
  340. int rowCount = 0;
  341. while (!tensorList.empty()) {
  342. // Don't display these rows, just count them
  343. MS_LOG(INFO) << "Row fetched #: " << rowCount;
  344. rc = dI.FetchNextTensorRow(&tensorList);
  345. EXPECT_TRUE(rc.IsOk());
  346. rowCount++;
  347. }
  348. ASSERT_EQ(rowCount, 40);
  349. rc = myClient->DestroyCache();
  350. EXPECT_TRUE(rc.IsOk());
  351. }
  352. TEST_F(MindDataTestCacheOp, TestImageFolderCacheMerge) {
  353. Status rc;
  354. int64_t num_samples = 0;
  355. int64_t start_index = 0;
  356. auto seq_sampler = std::make_shared<SequentialSampler>(num_samples, start_index);
  357. std::shared_ptr<CacheClient> myClient = std::make_shared<CacheClient>(1, 0, true);
  358. // In a mappable dataset, it uses a complex interactions of cache lookup op and cache merge op.
  359. // Rather than manually build this, the way to do it is to choose the position of the cache in the tree by
  360. // adding a CacheOp. Then, the tree prepare code will drive a transform that will remove the CacheOp and
  361. // replace it with the required tree structures for cache lookup op and cache merge op.
  362. std::shared_ptr<CacheOp> myCacheOp;
  363. rc = CacheOp::Builder().SetNumWorkers(4).SetClient(myClient).SetRowsPerBuffer(3).Build(&myCacheOp);
  364. std::shared_ptr<ImageFolderOp> so;
  365. ImageFolderOp::Builder builder;
  366. builder.SetSampler(std::move(seq_sampler))
  367. .SetOpConnectorSize(3)
  368. .SetNumWorkers(3)
  369. .SetRowsPerBuffer(2)
  370. .SetExtensions({".jpg", ".JPEG"})
  371. .SetRecursive(true)
  372. .SetImageFolderDir(datasets_root_path_ + "/testPK/data");
  373. rc = builder.Build(&so);
  374. EXPECT_TRUE(rc.IsOk());
  375. // RepeatOp
  376. uint32_t numRepeats = 4;
  377. std::shared_ptr<RepeatOp> myRepeatOp;
  378. rc = RepeatOp::Builder(numRepeats).Build(&myRepeatOp);
  379. EXPECT_TRUE(rc.IsOk());
  380. auto myTree = std::make_shared<ExecutionTree>();
  381. rc = myTree->AssociateNode(so);
  382. EXPECT_TRUE(rc.IsOk());
  383. rc = myTree->AssociateNode(myCacheOp);
  384. EXPECT_TRUE(rc.IsOk());
  385. rc = myTree->AssociateNode(myRepeatOp);
  386. EXPECT_TRUE(rc.IsOk());
  387. rc = myTree->AssignRoot(myRepeatOp);
  388. EXPECT_TRUE(rc.IsOk());
  389. rc = myRepeatOp->AddChild(myCacheOp);
  390. EXPECT_TRUE(rc.IsOk());
  391. rc = myCacheOp->AddChild(so);
  392. EXPECT_TRUE(rc.IsOk());
  393. rc = myTree->Prepare();
  394. EXPECT_TRUE(rc.IsOk());
  395. rc = myTree->Launch();
  396. EXPECT_TRUE(rc.IsOk());
  397. // Start the loop of reading tensors from our pipeline
  398. DatasetIterator dI(myTree);
  399. TensorRow tensorList;
  400. rc = dI.FetchNextTensorRow(&tensorList);
  401. EXPECT_TRUE(rc.IsOk());
  402. int rowCount = 0;
  403. while (!tensorList.empty()) {
  404. rc = dI.FetchNextTensorRow(&tensorList);
  405. EXPECT_TRUE(rc.IsOk());
  406. if (rc.IsError()) {
  407. std::cout << rc << std::endl;
  408. break;
  409. }
  410. rowCount++;
  411. }
  412. ASSERT_EQ(rowCount, 176);
  413. std::cout << "Row count : " << rowCount << std::endl;
  414. rc = myClient->DestroyCache();
  415. EXPECT_TRUE(rc.IsOk());
  416. }
  417. //// Simple test with a repeated cache op over random data producer.
  418. //// The difference in this one is that you do not add the sampler to the cache op directly.
  419. //// Instead, the sampler is added as part of the leaf op construction. Then, the prepare
  420. //// phase will pull this up from the leaf and into the cache.
  421. //// It removes the sampler from the leaf op, which doesn't make sense there anyway for
  422. //// the RandomDataOp which doesn't support sampling without a cache.
  423. ////
  424. //// RepeatOp
  425. //// |
  426. //// CacheOp
  427. //// |
  428. //// RandomDataOp
  429. ////
  430. TEST_F(MindDataTestCacheOp, TestCacheInheritSampler) {
  431. Status rc;
  432. int32_t rank = 0; // not used
  433. MS_LOG(INFO) << "UT test TestCacheInheritSampler";
  434. int64_t num_samples = 0;
  435. int64_t start_index = 0;
  436. auto seq_sampler = std::make_shared<SequentialSampler>(num_samples, start_index);
  437. // Start with an empty execution tree
  438. auto myTree = std::make_shared<ExecutionTree>();
  439. // Create a schema using the C api's
  440. std::unique_ptr<DataSchema> testSchema = std::make_unique<DataSchema>();
  441. // 2 columns. First column is an "image" 640,480,3
  442. TensorShape c1Shape({640, 480, 3});
  443. ColDescriptor c1("image", DataType(DataType::DE_INT8), TensorImpl::kFlexible,
  444. rank, // not used
  445. &c1Shape);
  446. // Column 2 will just be a scalar label number
  447. TensorShape c2Shape({}); // empty shape is a 1-value scalar Tensor
  448. ColDescriptor c2("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, rank, &c2Shape);
  449. testSchema->AddColumn(c1);
  450. testSchema->AddColumn(c2);
  451. // RandomDataOp
  452. std::shared_ptr<RandomDataOp> myRandomDataOp;
  453. rc = RandomDataOp::Builder()
  454. .SetRowsPerBuffer(2)
  455. .SetNumWorkers(4)
  456. .SetDataSchema(std::move(testSchema))
  457. .SetTotalRows(10)
  458. .SetSampler(std::move(seq_sampler))
  459. .Build(&myRandomDataOp);
  460. EXPECT_TRUE(rc.IsOk());
  461. rc = myTree->AssociateNode(myRandomDataOp);
  462. EXPECT_TRUE(rc.IsOk());
  463. // CacheOp
  464. std::shared_ptr<CacheClient> myClient = std::make_shared<CacheClient>(1, 4, true);
  465. std::shared_ptr<CacheOp> myCacheOp;
  466. rc = CacheOp::Builder().SetNumWorkers(4).SetClient(myClient).SetRowsPerBuffer(3).Build(&myCacheOp);
  467. EXPECT_TRUE(rc.IsOk());
  468. rc = myTree->AssociateNode(myCacheOp);
  469. EXPECT_TRUE(rc.IsOk());
  470. // RepeatOp
  471. uint32_t numRepeats = 4;
  472. std::shared_ptr<RepeatOp> myRepeatOp;
  473. rc = RepeatOp::Builder(numRepeats).Build(&myRepeatOp);
  474. EXPECT_TRUE(rc.IsOk());
  475. rc = myTree->AssociateNode(myRepeatOp);
  476. EXPECT_TRUE(rc.IsOk());
  477. // Assign tree relations and root
  478. rc = myRepeatOp->AddChild(myCacheOp);
  479. EXPECT_TRUE(rc.IsOk());
  480. rc = myCacheOp->AddChild(myRandomDataOp);
  481. EXPECT_TRUE(rc.IsOk());
  482. rc = myTree->AssignRoot(myRepeatOp);
  483. EXPECT_TRUE(rc.IsOk());
  484. MS_LOG(INFO) << "Launching tree and begin iteration";
  485. rc = myTree->Prepare();
  486. EXPECT_TRUE(rc.IsOk());
  487. std::cout << *myClient << std::endl;
  488. rc = myTree->Launch();
  489. EXPECT_TRUE(rc.IsOk());
  490. // Start the loop of reading tensors from our pipeline
  491. DatasetIterator dI(myTree);
  492. TensorRow tensorList;
  493. rc = dI.FetchNextTensorRow(&tensorList);
  494. EXPECT_TRUE(rc.IsOk());
  495. int rowCount = 0;
  496. while (!tensorList.empty()) {
  497. // Don't display these rows, just count them
  498. MS_LOG(INFO) << "Row fetched #: " << rowCount;
  499. rc = dI.FetchNextTensorRow(&tensorList);
  500. EXPECT_TRUE(rc.IsOk());
  501. rowCount++;
  502. }
  503. ASSERT_EQ(rowCount, 40);
  504. rc = myClient->DestroyCache();
  505. EXPECT_TRUE(rc.IsOk());
  506. }