Browse Source

!28784 [GraphKernel] add lite pass manager.

Merge pull request !28784 from chenlei_autodiff/lite_callback
feature/build-system-rewrite
i-robot Gitee 4 years ago
parent
commit
68ec2be828
No known key found for this signature in database GPG Key ID: 173E9B9CA92EEF8F
16 changed files with 273 additions and 21 deletions
  1. +3
    -3
      mindspore/ccsrc/backend/optimizer/graph_kernel/adapter/graph_kernel_optimization.cc
  2. +4
    -4
      mindspore/ccsrc/backend/optimizer/graph_kernel/adapter/graph_kernel_optimization.h
  3. +5
    -3
      mindspore/ccsrc/backend/optimizer/graph_kernel/adapter/graph_kernel_pass_manager.cc
  4. +4
    -4
      mindspore/ccsrc/backend/optimizer/graph_kernel/adapter/graph_kernel_pass_manager.h
  5. +82
    -0
      mindspore/ccsrc/backend/optimizer/graph_kernel/lite_adapter/graph_kernel_optimization.cc
  6. +39
    -0
      mindspore/ccsrc/backend/optimizer/graph_kernel/lite_adapter/graph_kernel_optimization.h
  7. +75
    -0
      mindspore/ccsrc/backend/optimizer/graph_kernel/lite_adapter/graph_kernel_pass_manager.cc
  8. +50
    -0
      mindspore/ccsrc/backend/optimizer/graph_kernel/lite_adapter/graph_kernel_pass_manager.h
  9. +1
    -1
      mindspore/ccsrc/backend/session/ascend_session.cc
  10. +1
    -1
      mindspore/ccsrc/backend/session/cpu_session.cc
  11. +1
    -1
      mindspore/ccsrc/backend/session/gpu_session.cc
  12. +1
    -1
      mindspore/ccsrc/runtime/hardware/ascend/ascend_device_context.cc
  13. +1
    -1
      mindspore/ccsrc/runtime/hardware/ascend/ascend_graph_optimization.cc
  14. +1
    -1
      mindspore/ccsrc/runtime/hardware/cpu/cpu_device_context.cc
  15. +1
    -1
      mindspore/ccsrc/runtime/hardware/gpu/optimizer.h
  16. +4
    -0
      tests/ut/cpp/CMakeLists.txt

mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_optimization.cc → mindspore/ccsrc/backend/optimizer/graph_kernel/adapter/graph_kernel_optimization.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/optimizer/graph_kernel/graph_kernel_optimization.h"
#include "backend/optimizer/graph_kernel/adapter/graph_kernel_optimization.h"

#include <vector>
#include <string>
@@ -46,7 +46,7 @@
#include "backend/optimizer/graph_kernel/tsa_atomic_add_to_first_tensor.h"
#include "backend/optimizer/graph_kernel/uss_atomic_add.h"
#include "backend/optimizer/pass/getitem_tuple.h"
#include "backend/optimizer/graph_kernel/graph_kernel_pass_manager.h"
#include "backend/optimizer/graph_kernel/adapter/graph_kernel_pass_manager.h"
#include "backend/optimizer/graph_kernel/transform_op_optimizer.h"
#include "backend/optimizer/graph_kernel/rewrite_output_shape.h"
#include "backend/optimizer/graph_kernel/graph_kernel_recompute.h"

mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_optimization.h → mindspore/ccsrc/backend/optimizer/graph_kernel/adapter/graph_kernel_optimization.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -13,8 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_GRAPH_KERNEL_OPTIMIZATION_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_GRAPH_KERNEL_OPTIMIZATION_H_
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_ADAPTER_GRAPH_KERNEL_OPTIMIZATION_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_ADAPTER_GRAPH_KERNEL_OPTIMIZATION_H_

#include "ir/anf.h"
#include "ir/func_graph.h"
@@ -51,4 +51,4 @@ class GraphKernelOptimizer {

void GraphKernelOptimize(const KernelGraphPtr &kernel_graph);
} // namespace mindspore::graphkernel
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_GRAPH_KERNEL_OPTIMIZATION_H_
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_ADAPTER_GRAPH_KERNEL_OPTIMIZATION_H_

mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_pass_manager.cc → mindspore/ccsrc/backend/optimizer/graph_kernel/adapter/graph_kernel_pass_manager.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
* Copyright 2019-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/optimizer/graph_kernel/graph_kernel_pass_manager.h"
#include "backend/optimizer/graph_kernel/adapter/graph_kernel_pass_manager.h"
#include <iomanip>
@@ -58,7 +58,9 @@ bool GraphKernelPassManager::Run(const FuncGraphPtr &func_graph) const {
// dump ir to a graph_kernel subdir, and set a global id in front of the filename
std::ostringstream oss;
static int g_id = 0;
oss << "graph_kernel/" << std::setfill('0') << std::setw(4) << g_id++ << "_" << GetPassFullname(i, passes_[i]);
constexpr int id_length = 4;
oss << "graph_kernel/" << std::setfill('0') << std::setw(id_length) << g_id++ << "_"
<< GetPassFullname(i, passes_[i]);
DumpPassIR(func_graph, oss.str());
} else {
MS_LOG(INFO) << "pass " << GetPassFullname(i, passes_[i]) << " is disabled.";

mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_pass_manager.h → mindspore/ccsrc/backend/optimizer/graph_kernel/adapter/graph_kernel_pass_manager.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -13,8 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_PASS_MANAGER_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_PASS_MANAGER_H_
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_ADAPTER_GRAPH_KERNEL_PASS_MANAGER_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_ADAPTER_GRAPH_KERNEL_PASS_MANAGER_H_
#include <utility>
#include <vector>
@@ -46,4 +46,4 @@ class GraphKernelPassManager : public PassManager {
const GraphKernelFlags &flags_;
};
} // namespace mindspore::graphkernel
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_PASS_MANAGER_H_
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_ADAPTER_GRAPH_KERNEL_PASS_MANAGER_H_

+ 82
- 0
mindspore/ccsrc/backend/optimizer/graph_kernel/lite_adapter/graph_kernel_optimization.cc View File

@@ -0,0 +1,82 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/optimizer/graph_kernel/lite_adapter/graph_kernel_optimization.h"

#include <vector>
#include <string>
#include <memory>

#include "ir/func_graph.h"
#include "utils/context/graph_kernel_flags.h"
#include "backend/optimizer/pass/getitem_tuple.h"
#include "backend/optimizer/graph_kernel/core/graph_kernel_cluster.h"
#include "backend/optimizer/graph_kernel/core/graph_kernel_expander.h"
#include "backend/optimizer/graph_kernel/core/eliminate_redundant_output.h"
#include "backend/optimizer/graph_kernel/core/shape_ops_splitter.h"
#include "backend/optimizer/graph_kernel/core/update_state_formatter.h"
#include "backend/optimizer/graph_kernel/lite_adapter/graph_kernel_pass_manager.h"

namespace mindspore::graphkernel {
using opt::GetitemTuple;
using opt::GraphOptimizer;

PassManagerPtr GraphKernelOptimizer::Cluster() const {
auto pm = std::make_shared<GraphKernelPassManager>(0, "cluster");
// Expand complex basic kernels to composite kernels
pm->AddPass(std::make_shared<GraphKernelExpander>(), OptLevel_1);

// Cluster basic kernels and composite kernels
pm->AddPass(std::make_shared<GraphKernelCluster>(), OptLevel_1);

// Eliminate the outputs without external user
pm->AddPass(std::make_shared<EliminateRedundantOutput>(), OptLevel_1);
return pm;
}

PassManagerPtr GraphKernelOptimizer::Split() const {
auto pm = std::make_shared<GraphKernelPassManager>(1, "split");
// Make certain nodes redundant so that they are used by only one user,
// which can avoid unnecessary input-output and get better performance.
// preprocess for ShapeOpsSplitter
pm->AddPass(std::make_shared<ExtendOutputForUpdateState>(), OptLevel_1);
std::vector<PrimitivePtr> duplicated_ops = {prim::kPrimReshape};
pm->AddPass(std::make_shared<ShapeOpsSplitter>(duplicated_ops), OptLevel_1);

// After Simplify and Splitter, a lot of redundant getitem/maketuple
// will be exposed, use GetitemTuple Pass to delete them.
pm->AddPass(std::make_shared<GetitemTuple>(), OptLevel_1);

// Eliminate the redundant node that is copied above but not handled by GraphKernelSplitter
pm->AddPass(std::make_shared<MergeOutputForUpdateState>(), OptLevel_1);
pm->AddPass(std::make_shared<EliminateRedundantOutput>(), OptLevel_1);
return pm;
}

void GraphKernelOptimizer::Run(const FuncGraphPtr &kernel_graph) {
auto optimizer = std::make_shared<GraphOptimizer>("graph_kernel_optimizer");
optimizer->AddPassManager(Cluster());
optimizer->AddPassManager(Split());

auto mng = kernel_graph->manager();
if (mng == nullptr) {
mng = Manage(kernel_graph, true);
kernel_graph->set_manager(mng);
}
(void)optimizer->Optimize(kernel_graph);
}

void GraphKernelOptimize(const FuncGraphPtr &kernel_graph) { GraphKernelOptimizer().Run(kernel_graph); }
} // namespace mindspore::graphkernel

+ 39
- 0
mindspore/ccsrc/backend/optimizer/graph_kernel/lite_adapter/graph_kernel_optimization.h View File

@@ -0,0 +1,39 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_LITE_ADAPTER_GRAPH_KERNEL_OPTIMIZATION_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_LITE_ADAPTER_GRAPH_KERNEL_OPTIMIZATION_H_

#include "ir/anf.h"
#include "ir/func_graph.h"
#include "backend/optimizer/common/optimizer.h"
#include "backend/optimizer/common/pass_manager.h"

namespace mindspore::graphkernel {
using opt::PassManagerPtr;
class GraphKernelOptimizer {
public:
void Run(const FuncGraphPtr &kernel_graph);

private:
// Cluster kernels
PassManagerPtr Cluster() const;
// Split kernels
PassManagerPtr Split() const;
};

void GraphKernelOptimize(const FuncGraphPtr &kernel_graph);
} // namespace mindspore::graphkernel
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_LITE_ADAPTER_GRAPH_KERNEL_OPTIMIZATION_H_

+ 75
- 0
mindspore/ccsrc/backend/optimizer/graph_kernel/lite_adapter/graph_kernel_pass_manager.cc View File

@@ -0,0 +1,75 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/optimizer/graph_kernel/lite_adapter/graph_kernel_pass_manager.h"
#include <iomanip>
#include "ir/anf.h"
#include "ir/func_graph.h"
#include "ir/manager.h"
#include "debug/anf_ir_dump.h"
namespace mindspore::graphkernel {
void GraphKernelPassManager::AddPass(const opt::PassPtr &pass, unsigned int pass_level, bool supported_device) {
MS_EXCEPTION_IF_NULL(pass);
auto pass_id = passes_.size();
auto pass_name = pass->name();
auto pass_in_list = [this, pass_id, &pass_name](const std::vector<std::string> &pass_list) {
// the config format can be "stage_id.pass_id" or "stage_name.pass_name"
return std::find(pass_list.begin(), pass_list.end(),
std::to_string(this->stage_) + "." + std::to_string(pass_id)) != pass_list.end() ||
std::find(pass_list.begin(), pass_list.end(), this->name_ + "." + pass_name) != pass_list.end();
};
bool enable = supported_device && flags_.opt_level >= pass_level;
if (enable) {
// if it meets the condition to enable, check whether it's in the disabled list.
enable = !pass_in_list(flags_.disable_pass);
} else {
// if it doesn't meet the condition to enable, check whether it's in the enabled list.
enable = pass_in_list(flags_.enable_pass);
}
passes_.push_back(pass);
enabled_.push_back(enable);
}
bool GraphKernelPassManager::RunPass(const FuncGraphPtr &func_graph, size_t pass_id, const opt::PassPtr &pass) const {
bool changed = pass->Run(func_graph);
return changed;
}
std::string GraphKernelPassManager::GetPassFullname(size_t pass_id, const opt::PassPtr &pass) const {
return "stage" + std::to_string(stage_) + "_" + name() + "_" + std::to_string(pass_id) + "_" + pass->name();
}
bool GraphKernelPassManager::Run(const FuncGraphPtr &func_graph) const {
bool changed = false;
for (size_t i = 0; i < passes_.size(); i++) {
if (enabled_[i]) {
changed = RunPass(func_graph, i, passes_[i]) || changed;
// dump ir to a graph_kernel subdir, and set a global id in front of the filename
std::ostringstream oss;
static int g_id = 0;
constexpr int id_length = 4;
oss << "graph_kernel/" << std::setfill('0') << std::setw(id_length) << g_id++ << "_"
<< GetPassFullname(i, passes_[i]);
DumpPassIR(func_graph, oss.str());
} else {
MS_LOG(INFO) << "pass " << GetPassFullname(i, passes_[i]) << " is disabled.";
}
}
return changed;
}
} // namespace mindspore::graphkernel

+ 50
- 0
mindspore/ccsrc/backend/optimizer/graph_kernel/lite_adapter/graph_kernel_pass_manager.h View File

@@ -0,0 +1,50 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_LITE_ADAPTER_GRAPH_KERNEL_PASS_MANAGER_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_LITE_ADAPTER_GRAPH_KERNEL_PASS_MANAGER_H_
#include <utility>
#include <vector>
#include <string>
#include <memory>
#include "utils/context/graph_kernel_flags.h"
#include "backend/optimizer/common/pass_manager.h"
namespace mindspore::graphkernel {
using opt::PassManager;
class GraphKernelPassManager : public PassManager {
public:
GraphKernelPassManager(size_t stage, const std::string &name)
: PassManager(name, true), stage_(stage), flags_(GraphKernelFlags::GetInstance()) {}
~GraphKernelPassManager() = default;
// Add graph pass, the pass object will be freed when pass manager freed.
virtual void AddPass(const opt::PassPtr &pass, unsigned int pass_level, bool default_enable = true);
// Run passes on the func_graph
bool Run(const FuncGraphPtr &func_graph) const override;
protected:
bool RunPass(const FuncGraphPtr &func_graph, size_t pass_id, const opt::PassPtr &pass) const override;
std::string GetPassFullname(size_t pass_id, const opt::PassPtr &pass) const override;
size_t stage_;
std::vector<bool> enabled_;
const GraphKernelFlags &flags_;
};
} // namespace mindspore::graphkernel
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_LITE_ADAPTER_GRAPH_KERNEL_PASS_MANAGER_H_

+ 1
- 1
mindspore/ccsrc/backend/session/ascend_session.cc View File

@@ -50,7 +50,7 @@
#include "debug/debugger/debugger_utils.h"
#endif
#include "debug/anf_ir_utils.h"
#include "backend/optimizer/graph_kernel/graph_kernel_optimization.h"
#include "backend/optimizer/graph_kernel/adapter/graph_kernel_optimization.h"
#include "backend/session/ascend_auto_monad.h"
#include "debug/anf_ir_dump.h"
#include "debug/dump_proto.h"


+ 1
- 1
mindspore/ccsrc/backend/session/cpu_session.cc View File

@@ -31,7 +31,7 @@
#include "backend/optimizer/common/pass_manager.h"
#include "backend/optimizer/cpu/insert_cast_cpu.h"
#include "backend/optimizer/cpu/insert_format_transform_op.h"
#include "backend/optimizer/graph_kernel/graph_kernel_optimization.h"
#include "backend/optimizer/graph_kernel/adapter/graph_kernel_optimization.h"
#include "backend/optimizer/pass/replace_node_by_proxy.h"
#include "backend/optimizer/pass/erase_visit_attr.h"
#include "debug/anf_ir_dump.h"


+ 1
- 1
mindspore/ccsrc/backend/session/gpu_session.cc View File

@@ -52,7 +52,7 @@
#ifdef ENABLE_GPU_INFER
#include "backend/optimizer/trt_pass/graph_converter.h"
#endif
#include "backend/optimizer/graph_kernel/graph_kernel_optimization.h"
#include "backend/optimizer/graph_kernel/adapter/graph_kernel_optimization.h"
#include "backend/optimizer/pass/communication_op_fusion.h"
#include "backend/optimizer/gpu/concat_outputs_for_all_gather.h"
#include "backend/optimizer/pass/getitem_tuple.h"


+ 1
- 1
mindspore/ccsrc/runtime/hardware/ascend/ascend_device_context.cc View File

@@ -21,7 +21,7 @@
#include "acl/acl_rt.h"
#include "runtime/dev.h"
#include "backend/optimizer/ascend/ascend_backend_optimization.h"
#include "backend/optimizer/graph_kernel/graph_kernel_optimization.h"
#include "backend/optimizer/graph_kernel/adapter/graph_kernel_optimization.h"
#include "utils/context/graph_kernel_flags.h"
#include "utils/utils.h"
#include "runtime/device/ascend/kernel_select_ascend.h"


+ 1
- 1
mindspore/ccsrc/runtime/hardware/ascend/ascend_graph_optimization.cc View File

@@ -19,7 +19,7 @@
#include <string>
#include "backend/optimizer/common/common_backend_optimization.h"
#include "backend/optimizer/ascend/ascend_backend_optimization.h"
#include "backend/optimizer/graph_kernel/graph_kernel_optimization.h"
#include "backend/optimizer/graph_kernel/adapter/graph_kernel_optimization.h"
#include "backend/session/ascend_auto_monad.h"
#include "utils/context/graph_kernel_flags.h"
#include "runtime/device/ascend/kernel_select_ascend.h"


+ 1
- 1
mindspore/ccsrc/runtime/hardware/cpu/cpu_device_context.cc View File

@@ -31,7 +31,7 @@
#include "backend/optimizer/cpu/insert_format_transform_op.h"
#include "backend/optimizer/pass/replace_node_by_proxy.h"
#include "backend/optimizer/pass/erase_visit_attr.h"
#include "backend/optimizer/graph_kernel/graph_kernel_optimization.h"
#include "backend/optimizer/graph_kernel/adapter/graph_kernel_optimization.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "profiler/device/cpu/cpu_profiling.h"
#if ((defined ENABLE_CPU) && (!defined _WIN32))


+ 1
- 1
mindspore/ccsrc/runtime/hardware/gpu/optimizer.h View File

@@ -46,7 +46,7 @@
#include "backend/optimizer/gpu/relu_v2_pass.h"
#include "backend/optimizer/gpu/add_relu_v2_fusion.h"
#include "backend/optimizer/gpu/add_relu_grad_v2_fusion.h"
#include "backend/optimizer/graph_kernel/graph_kernel_optimization.h"
#include "backend/optimizer/graph_kernel/adapter/graph_kernel_optimization.h"
#include "backend/optimizer/pass/communication_op_fusion.h"
#include "backend/optimizer/gpu/concat_outputs_for_all_gather.h"
#include "backend/optimizer/pass/getitem_tuple.h"


+ 4
- 0
tests/ut/cpp/CMakeLists.txt View File

@@ -218,6 +218,10 @@ list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/backend/optimizer/
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/backend/optimizer/graph_kernel/lite_adapter/akg_build.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST
"../../../mindspore/ccsrc/backend/optimizer/graph_kernel/lite_adapter/callback_impl.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST
"../../../mindspore/ccsrc/backend/optimizer/graph_kernel/lite_adapter/graph_kernel_optimization.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST
"../../../mindspore/ccsrc/backend/optimizer/graph_kernel/lite_adapter/graph_kernel_pass_manager.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_compile.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/backend/kernel_compiler/akg/cpu/akg_cpu_kernel_mod.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/backend/kernel_compiler/akg/cpu/akg_cpu_kernel_build.cc")


Loading…
Cancel
Save