You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ir_callback_test.cc 15 kB

4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /**
  2. * Copyright 2020-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <memory>
  17. #include <list>
  18. #include "common/common.h"
  19. #include "minddata/dataset/callback/ds_callback.h"
  20. #include "minddata/dataset/core/client.h"
  21. #include "minddata/dataset/engine/datasetops/epoch_ctrl_op.h"
  22. #include "minddata/dataset/engine/datasetops/source/random_data_op.h"
  23. #include "minddata/dataset/engine/perf/monitor.h"
  24. #include "minddata/dataset/engine/perf/profiling.h"
  25. #include "minddata/dataset/engine/tree_adapter.h"
  26. #include "minddata/dataset/include/dataset/datasets.h"
  27. #include "minddata/dataset/include/dataset/transforms.h"
  28. #include "minddata/dataset/kernels/data/no_op.h"
  29. #include "utils/log_adapter.h"
  30. using namespace mindspore::dataset;
  31. using mindspore::LogStream;
  32. using mindspore::MsLogLevel::INFO;
  33. namespace mindspore {
  34. namespace dataset {
  35. namespace test {
  36. class TestCallback : public DSCallback {
  37. public:
  38. TestCallback(int32_t step_size)
  39. : DSCallback(step_size),
  40. begin_(true),
  41. epoch_begin_(true),
  42. step_begin_(true),
  43. end_(false),
  44. epoch_end_(true),
  45. step_end_(true) {
  46. all_names_.reserve(32);
  47. all_step_nums_.reserve(32);
  48. all_ep_nums_.reserve(32);
  49. }
  50. Status DSBegin(const CallbackParam &cb_param) override {
  51. all_names_.push_back("BGN");
  52. all_step_nums_.push_back(cb_param.cur_step_num_);
  53. all_ep_nums_.push_back(cb_param.cur_epoch_num_);
  54. return Status::OK();
  55. }
  56. Status DSEpochBegin(const CallbackParam &cb_param) override {
  57. all_names_.push_back("EPBGN");
  58. all_step_nums_.push_back(cb_param.cur_step_num_);
  59. all_ep_nums_.push_back(cb_param.cur_epoch_num_);
  60. return Status::OK();
  61. }
  62. Status DSNStepBegin(const CallbackParam &cb_param) override {
  63. all_names_.push_back("SPBGN");
  64. all_step_nums_.push_back(cb_param.cur_step_num_);
  65. all_ep_nums_.push_back(cb_param.cur_epoch_num_);
  66. return Status::OK();
  67. }
  68. Status DSEnd(const CallbackParam &cb_param) override {
  69. all_names_.push_back("END");
  70. all_step_nums_.push_back(cb_param.cur_step_num_);
  71. all_ep_nums_.push_back(cb_param.cur_epoch_num_);
  72. return Status::OK();
  73. }
  74. Status DSEpochEnd(const CallbackParam &cb_param) override {
  75. all_names_.push_back("EPEND");
  76. all_step_nums_.push_back(cb_param.cur_step_num_);
  77. all_ep_nums_.push_back(cb_param.cur_epoch_num_);
  78. return Status::OK();
  79. }
  80. Status DSNStepEnd(const CallbackParam &cb_param) override {
  81. all_names_.push_back("SPEND");
  82. all_step_nums_.push_back(cb_param.cur_step_num_);
  83. all_ep_nums_.push_back(cb_param.cur_epoch_num_);
  84. return Status::OK();
  85. }
  86. bool IsBeginNeeded() override { return begin_; }
  87. bool IsEpochBeginNeeded() override { return epoch_begin_; }
  88. bool IsNStepBeginNeeded() override { return step_begin_; }
  89. bool IsEndNeeded() override { return end_; }
  90. bool IsEpochEndNeeded() override { return epoch_end_; }
  91. bool IsNStepEndNeeded() override { return step_end_; }
  92. std::vector<std::string> all_names(size_t len) {
  93. return std::vector<std::string>(all_names_.begin(), all_names_.begin() + len);
  94. }
  95. std::vector<int64_t> all_step_nums(size_t len) {
  96. return std::vector<int64_t>(all_step_nums_.begin(), all_step_nums_.begin() + len);
  97. }
  98. std::vector<int64_t> all_ep_nums(size_t len) {
  99. return std::vector<int64_t>(all_ep_nums_.begin(), all_ep_nums_.begin() + len);
  100. }
  101. // flag for turning callback on and off
  102. bool begin_, epoch_begin_, step_begin_, end_, epoch_end_, step_end_;
  103. // name of the callback function in sequence, BGN, EPBGN, SPB, END, EPEND, SPEND
  104. std::vector<std::string> all_names_;
  105. std::vector<int64_t> all_step_nums_, all_ep_nums_;
  106. };
  107. } // namespace test
  108. } // namespace dataset
  109. } // namespace mindspore
  110. class MindDataTestCallback : public UT::DatasetOpTesting {
  111. public:
  112. void SetUp() override {
  113. DatasetOpTesting::SetUp();
  114. GlobalInit();
  115. }
  116. };
  117. TEST_F(MindDataTestCallback, TestBasicCallback) {
  118. MS_LOG(INFO) << "Doing: MindDataTestCallback-TestBasicCallback";
  119. // config callback
  120. Status rc;
  121. std::shared_ptr<test::TestCallback> tst_cb = std::make_shared<test::TestCallback>(64);
  122. std::shared_ptr<DSCallback> cb1 = tst_cb;
  123. // config leaf_op, use random_data to avoid I/O
  124. std::unique_ptr<DataSchema> schema = std::make_unique<DataSchema>();
  125. TensorShape shape({}); // empty shape is a 1-value scalar Tensor
  126. ColDescriptor col("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &shape);
  127. ASSERT_OK(schema->AddColumn(col));
  128. std::shared_ptr<ConfigManager> config_manager = GlobalContext::config_manager();
  129. int32_t op_connector_size = config_manager->op_connector_size();
  130. int32_t num_workers = config_manager->num_parallel_workers();
  131. std::shared_ptr<RandomDataOp> leaf =
  132. std::make_shared<RandomDataOp>(num_workers, op_connector_size, 44, std::move(schema));
  133. // config mapOp
  134. std::vector<std::string> input_columns = {"label"};
  135. std::vector<std::string> output_columns = {};
  136. std::vector<std::shared_ptr<TensorOp>> op_list;
  137. std::shared_ptr<TensorOp> my_no_op = std::make_shared<NoOp>();
  138. op_list.push_back(my_no_op);
  139. std::shared_ptr<MapOp> map_op =
  140. std::make_shared<MapOp>(input_columns, output_columns, std::move(op_list), num_workers, op_connector_size);
  141. std::vector<std::shared_ptr<DSCallback>> cbs = {};
  142. cbs.push_back(cb1);
  143. map_op->AddCallbacks(std::move(cbs));
  144. // config RepeatOp
  145. std::shared_ptr<RepeatOp> repeat_op = std::make_shared<RepeatOp>(2);
  146. // start build then launch tree
  147. leaf->SetTotalRepeats(2);
  148. leaf->SetNumRepeatsPerEpoch(2);
  149. map_op->SetTotalRepeats(2);
  150. map_op->SetNumRepeatsPerEpoch(2);
  151. std::shared_ptr<ExecutionTree> tree = Build({leaf, map_op, repeat_op});
  152. rc = tree->Prepare();
  153. EXPECT_TRUE(rc.IsOk());
  154. rc = tree->Launch();
  155. EXPECT_TRUE(rc.IsOk());
  156. // Start the loop of reading tensors from our pipeline
  157. DatasetIterator di(tree);
  158. TensorMap tensor_map;
  159. rc = di.GetNextAsMap(&tensor_map);
  160. EXPECT_TRUE(rc.IsOk());
  161. while (!tensor_map.empty()) {
  162. rc = di.GetNextAsMap(&tensor_map);
  163. EXPECT_TRUE(rc.IsOk());
  164. }
  165. std::vector<std::string> callback_names = {"BGN", "EPBGN", "SPBGN", "SPEND", "SPBGN", "SPEND", "EPEND"};
  166. std::vector<int64_t> all_steps = {0, 0, 1, 1, 65, 65, 88};
  167. std::vector<int64_t> all_epochs = {0, 1, 1, 1, 1, 1, 1};
  168. // doing resize to make sure no unexpected epoch_end or extra epoch_begin is called
  169. size_t len = 7;
  170. EXPECT_EQ(tst_cb->all_names(len), callback_names);
  171. EXPECT_EQ(tst_cb->all_step_nums(len), all_steps);
  172. EXPECT_EQ(tst_cb->all_ep_nums(len), all_epochs);
  173. }
  174. TEST_F(MindDataTestCallback, TestMultiEpochCallback) {
  175. MS_LOG(INFO) << "Doing: MindDataTestCallback-TestMultiEpochCallback";
  176. // config callback
  177. Status rc;
  178. std::shared_ptr<test::TestCallback> tst_cb = std::make_shared<test::TestCallback>(4);
  179. std::shared_ptr<DSCallback> cb1 = tst_cb;
  180. // config leaf_op, use random_data to avoid I/O
  181. std::shared_ptr<ConfigManager> config_manager = GlobalContext::config_manager();
  182. int32_t op_connector_size = config_manager->op_connector_size();
  183. int32_t num_workers = config_manager->num_parallel_workers();
  184. std::unique_ptr<DataSchema> schema = std::make_unique<DataSchema>();
  185. TensorShape shape({}); // empty shape is a 1-value scalar Tensor
  186. ColDescriptor col("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &shape);
  187. ASSERT_OK(schema->AddColumn(col));
  188. std::shared_ptr<RandomDataOp> leaf = std::make_shared<RandomDataOp>(4, op_connector_size, 4, std::move(schema));
  189. // config mapOp
  190. std::vector<std::string> input_columns = {"label"};
  191. std::vector<std::string> output_columns = {};
  192. std::vector<std::shared_ptr<TensorOp>> op_list;
  193. std::shared_ptr<TensorOp> my_no_op = std::make_shared<NoOp>();
  194. op_list.push_back(my_no_op);
  195. std::shared_ptr<MapOp> map_op =
  196. std::make_shared<MapOp>(input_columns, output_columns, std::move(op_list), num_workers, op_connector_size);
  197. std::vector<std::shared_ptr<DSCallback>> cbs = {};
  198. cbs.push_back(cb1);
  199. map_op->AddCallbacks(std::move(cbs));
  200. EXPECT_TRUE(rc.IsOk());
  201. // config RepeatOp
  202. std::shared_ptr<RepeatOp> repeat_op = std::make_shared<RepeatOp>(2);
  203. // config EpochCtrlOp
  204. std::shared_ptr<EpochCtrlOp> epoch_ctrl_op = std::make_shared<EpochCtrlOp>(-1);
  205. // start build then launch tree
  206. leaf->SetTotalRepeats(-2);
  207. leaf->SetNumRepeatsPerEpoch(2);
  208. map_op->SetTotalRepeats(-2);
  209. map_op->SetNumRepeatsPerEpoch(2);
  210. std::shared_ptr<ExecutionTree> tree = Build({leaf, map_op, repeat_op, epoch_ctrl_op});
  211. rc = tree->Prepare();
  212. EXPECT_TRUE(rc.IsOk());
  213. rc = tree->Launch();
  214. EXPECT_TRUE(rc.IsOk());
  215. // Start the loop of reading tensors from our pipeline
  216. DatasetIterator di(tree);
  217. TensorMap tensor_map;
  218. size_t num_epochs = 2;
  219. for (int ep_num = 0; ep_num < num_epochs; ++ep_num) {
  220. ASSERT_OK(di.GetNextAsMap(&tensor_map));
  221. EXPECT_TRUE(rc.IsOk());
  222. while (tensor_map.size() != 0) {
  223. rc = di.GetNextAsMap(&tensor_map);
  224. EXPECT_TRUE(rc.IsOk());
  225. }
  226. }
  227. std::vector<std::string> callback_names = {"BGN", "EPBGN", "SPBGN", "SPEND", "SPBGN", "SPEND", "EPEND",
  228. "EPBGN", "SPBGN", "SPEND", "SPBGN", "SPEND", "EPEND"};
  229. std::vector<int64_t> all_steps = {0, 0, 1, 1, 5, 5, 8, 8, 9, 9, 13, 13, 16};
  230. std::vector<int64_t> all_epochs = {0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2};
  231. size_t len = 13;
  232. EXPECT_EQ(tst_cb->all_names(len), callback_names);
  233. EXPECT_EQ(tst_cb->all_ep_nums(len), all_epochs);
  234. EXPECT_EQ(tst_cb->all_step_nums(len), all_steps);
  235. }
  236. TEST_F(MindDataTestCallback, TestSelectedCallback) {
  237. MS_LOG(INFO) << "Doing: MindDataTestCallback-TestSelectedCallback";
  238. // config callback
  239. Status rc;
  240. std::shared_ptr<test::TestCallback> tst_cb = std::make_shared<test::TestCallback>(4);
  241. std::shared_ptr<DSCallback> cb1 = tst_cb;
  242. // turn off the epochs
  243. tst_cb->epoch_begin_ = false;
  244. tst_cb->epoch_end_ = false;
  245. // config leaf_op, use random_data to avoid I/O
  246. std::shared_ptr<ConfigManager> config_manager = GlobalContext::config_manager();
  247. int32_t op_connector_size = config_manager->op_connector_size();
  248. int32_t num_workers = config_manager->num_parallel_workers();
  249. std::unique_ptr<DataSchema> schema = std::make_unique<DataSchema>();
  250. TensorShape shape({}); // empty shape is a 1-value scalar Tensor
  251. ColDescriptor col("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &shape);
  252. ASSERT_OK(schema->AddColumn(col));
  253. std::shared_ptr<RandomDataOp> leaf = std::make_shared<RandomDataOp>(4, op_connector_size, 4, std::move(schema));
  254. // config mapOp
  255. std::vector<std::string> input_columns = {"label"};
  256. std::vector<std::string> output_columns = {};
  257. std::vector<std::shared_ptr<TensorOp>> op_list;
  258. std::shared_ptr<TensorOp> my_no_op = std::make_shared<NoOp>();
  259. op_list.push_back(my_no_op);
  260. std::shared_ptr<MapOp> map_op =
  261. std::make_shared<MapOp>(input_columns, output_columns, std::move(op_list), num_workers, op_connector_size);
  262. map_op->AddCallbacks({cb1});
  263. // config RepeatOp
  264. std::shared_ptr<RepeatOp> repeat_op = std::make_shared<RepeatOp>(2);
  265. // config EpochCtrlOp
  266. std::shared_ptr<EpochCtrlOp> epoch_ctrl_op = std::make_shared<EpochCtrlOp>(-1);
  267. // start build then launch tree
  268. leaf->SetTotalRepeats(-2);
  269. leaf->SetNumRepeatsPerEpoch(2);
  270. map_op->SetTotalRepeats(-2);
  271. map_op->SetNumRepeatsPerEpoch(2);
  272. std::shared_ptr<ExecutionTree> tree = Build({leaf, map_op, repeat_op, epoch_ctrl_op});
  273. rc = tree->Prepare();
  274. EXPECT_TRUE(rc.IsOk());
  275. rc = tree->Launch();
  276. EXPECT_TRUE(rc.IsOk());
  277. // Start the loop of reading tensors from our pipeline
  278. DatasetIterator di(tree);
  279. TensorMap tensor_map;
  280. size_t num_epochs = 2;
  281. for (int ep_num = 0; ep_num < num_epochs; ++ep_num) {
  282. ASSERT_OK(di.GetNextAsMap(&tensor_map));
  283. EXPECT_TRUE(rc.IsOk());
  284. while (tensor_map.size() != 0) {
  285. rc = di.GetNextAsMap(&tensor_map);
  286. EXPECT_TRUE(rc.IsOk());
  287. }
  288. }
  289. std::vector<std::string> callback_names = {"BGN", "SPBGN", "SPEND", "SPBGN", "SPEND",
  290. "SPBGN", "SPEND", "SPBGN", "SPEND"};
  291. std::vector<int64_t> all_steps = {0, 1, 1, 5, 5, 9, 9, 13, 13};
  292. std::vector<int64_t> all_epochs = {0, 1, 1, 1, 1, 2, 2, 2, 2};
  293. size_t len = 9;
  294. EXPECT_EQ(tst_cb->all_names(len), callback_names);
  295. EXPECT_EQ(tst_cb->all_ep_nums(len), all_epochs);
  296. EXPECT_EQ(tst_cb->all_step_nums(len), all_steps);
  297. }
  298. TEST_F(MindDataTestCallback, TestCAPICallback) {
  299. MS_LOG(INFO) << "Doing: MindDataTestCallback-TestCAPICallback";
  300. // config callback
  301. std::shared_ptr<test::TestCallback> tst_cb = std::make_shared<test::TestCallback>(64);
  302. std::shared_ptr<DSCallback> cb1 = tst_cb;
  303. // Create a RandomDataset. Use random_data to avoid I/O
  304. std::shared_ptr<SchemaObj> schema = Schema();
  305. ASSERT_OK(schema->add_column("label", mindspore::DataType::kNumberTypeUInt32, {}));
  306. std::shared_ptr<Dataset> ds = RandomData(44, schema);
  307. ASSERT_NE(ds, nullptr);
  308. ds = ds->Map({std::make_shared<transforms::TypeCast>(mindspore::DataType::kNumberTypeUInt64)}, {"label"}, {}, {},
  309. nullptr, {cb1});
  310. ASSERT_NE(ds, nullptr);
  311. ds = ds->Repeat(2);
  312. ASSERT_NE(ds, nullptr);
  313. auto tree_adapter = std::make_shared<TreeAdapter>();
  314. // Create ProfilingManager
  315. auto profiling_manager = std::make_shared<ProfilingManager>(nullptr);
  316. tree_adapter->SetProfilingManagerPtr(profiling_manager);
  317. // Disable IR optimization pass
  318. tree_adapter->SetOptimize(false);
  319. // using tree_adapter to set num_epoch = 1
  320. ASSERT_OK(tree_adapter->Compile(ds->IRNode(), 1));
  321. TensorRow row;
  322. ASSERT_OK(tree_adapter->GetNext(&row));
  323. while (!row.empty()) {
  324. ASSERT_OK(tree_adapter->GetNext(&row));
  325. }
  326. std::vector<std::string> callback_names = {"BGN", "EPBGN", "SPBGN", "SPEND", "SPBGN", "SPEND", "EPEND"};
  327. std::vector<int64_t> all_steps = {0, 0, 1, 1, 65, 65, 88};
  328. std::vector<int64_t> all_epochs = {0, 1, 1, 1, 1, 1, 1};
  329. // doing resize to make sure no unexpected epoch_end or extra epoch_begin is called
  330. size_t len = 7;
  331. EXPECT_EQ(tst_cb->all_names(len), callback_names);
  332. EXPECT_EQ(tst_cb->all_step_nums(len), all_steps);
  333. EXPECT_EQ(tst_cb->all_ep_nums(len), all_epochs);
  334. }