You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

optimization_pass_test.cc 5.1 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <memory>
  17. #include <string>
  18. #include "common/common.h"
  19. #include "gtest/gtest.h"
  20. #include "minddata/dataset/core/client.h"
  21. #include "minddata/dataset/engine/ir/datasetops/dataset_node.h"
  22. #include "minddata/dataset/engine/ir/datasetops/map_node.h"
  23. #include "minddata/dataset/engine/opt/optional/tensor_op_fusion_pass.h"
  24. #include "minddata/dataset/engine/opt/post/auto_worker_pass.h"
  25. #include "minddata/dataset/include/transforms.h"
  26. #include "minddata/dataset/include/vision.h"
  27. #include "minddata/dataset/include/vision_lite.h"
  28. using namespace mindspore::dataset;
  29. using mindspore::LogStream;
  30. using mindspore::MsLogLevel::INFO;
  31. class MindDataTestOptimizationPass : public UT::DatasetOpTesting {};
  32. TEST_F(MindDataTestOptimizationPass, MindDataTestAutoWorkerPass) {
  33. MS_LOG(INFO) << "Doing MindDataTestOptimizationPass-MindDataTestAutoWorkerPass.";
  34. std::shared_ptr<SchemaObj> schema = std::make_shared<SchemaObj>();
  35. ASSERT_TRUE(schema->add_column("label", "uint32", {}));
  36. std::shared_ptr<Dataset> map_leaf = ImageFolder("dir")->SetNumWorkers(0);
  37. std::shared_ptr<Dataset> nonmap_leaf = RandomData(44, schema)->SetNumWorkers(0);
  38. std::shared_ptr<Dataset> batch = Zip({map_leaf, nonmap_leaf})->Batch(1)->SetNumWorkers(0);
  39. std::shared_ptr<Dataset> map = batch->Map({})->SetNumWorkers(0);
  40. // {ImageFolder, RandomData} -> zip -> batch
  41. EXPECT_EQ(map_leaf->IRNode()->num_workers(), 0);
  42. EXPECT_EQ(nonmap_leaf->IRNode()->num_workers(), 0);
  43. EXPECT_EQ(batch->IRNode()->num_workers(), 0);
  44. EXPECT_EQ(map->IRNode()->num_workers(), 0);
  45. std::unique_ptr<IRPass> pass = std::make_unique<AutoWorkerPass>();
  46. bool m = false;
  47. ASSERT_OK(pass->Run(map->IRNode(), &m));
  48. // checking that after this pass, num_workers are set correctly (aka a positive number)
  49. // It is hard to test a exact value because num_threads are different for different machine
  50. // however, this will for sure succeed bc regardless of the total threads on cpu, this would always be >= 1
  51. EXPECT_NE(map_leaf->IRNode()->num_workers(), 0);
  52. EXPECT_NE(nonmap_leaf->IRNode()->num_workers(), 0);
  53. EXPECT_NE(batch->IRNode()->num_workers(), 0);
  54. EXPECT_NE(map->IRNode()->num_workers(), 0);
  55. MS_LOG(DEBUG) << map_leaf->IRNode()->Name() << ": num_worker=" << map_leaf->IRNode()->num_workers();
  56. MS_LOG(DEBUG) << nonmap_leaf->IRNode()->Name() << ": num_worker=" << nonmap_leaf->IRNode()->num_workers();
  57. MS_LOG(DEBUG) << batch->IRNode()->Name() << ": num_worker=" << batch->IRNode()->num_workers();
  58. MS_LOG(DEBUG) << map->IRNode()->Name() << ": num_worker=" << map->IRNode()->num_workers();
  59. }
  60. TEST_F(MindDataTestOptimizationPass, MindDataTestTensorFusionPass) {
  61. MS_LOG(INFO) << "Doing MindDataTestOptimizationPass-MindDataTestTensorFusionPass.";
  62. std::string folder_path = datasets_root_path_ + "/testPK/data/";
  63. std::shared_ptr<Dataset> root =
  64. ImageFolder(folder_path, false)->Map({vision::Decode(), vision::RandomResizedCrop({100})}, {"image"});
  65. TensorOpFusionPass fusion_pass;
  66. bool modified = false;
  67. std::shared_ptr<MapNode> map_node = std::dynamic_pointer_cast<MapNode>(root->IRNode());
  68. // no deepcopy is performed because this doesn't go through tree_adapter
  69. fusion_pass.Run(root->IRNode(), &modified);
  70. EXPECT_EQ(modified, true);
  71. ASSERT_NE(map_node, nullptr);
  72. auto fused_ops = map_node->operations();
  73. ASSERT_EQ(fused_ops.size(), 1);
  74. ASSERT_EQ(fused_ops[0]->Name(), vision::kRandomCropDecodeResizeOperation);
  75. }
  76. TEST_F(MindDataTestOptimizationPass, MindDataTestTensorFusionPassPreBuiltTensorOperation) {
  77. MS_LOG(INFO) << "Doing MindDataTestOptimizationPass-MindDataTestTensorFusionPassPreBuiltTensorOperation.";
  78. std::string folder_path = datasets_root_path_ + "/testPK/data/";
  79. // make prebuilt tensor operation
  80. auto decode = std::make_shared<transforms::PreBuiltOperation>(vision::Decode()->Build());
  81. auto resize = std::make_shared<transforms::PreBuiltOperation>(vision::RandomResizedCrop({100})->Build());
  82. std::shared_ptr<Dataset> root = ImageFolder(folder_path, false)->Map({decode, resize}, {"image"});
  83. TensorOpFusionPass fusion_pass;
  84. bool modified = false;
  85. std::shared_ptr<MapNode> map_node = std::dynamic_pointer_cast<MapNode>(root->IRNode());
  86. // no deepcopy is performed because this doesn't go through tree_adapter
  87. fusion_pass.Run(root->IRNode(), &modified);
  88. EXPECT_EQ(modified, true);
  89. ASSERT_NE(map_node, nullptr);
  90. auto fused_ops = map_node->operations();
  91. ASSERT_EQ(fused_ops.size(), 1);
  92. ASSERT_EQ(fused_ops[0]->Name(), kRandomCropDecodeResizeOp);
  93. }