Browse Source

!24229 add dynamic shape attr pass

Merge pull request !24229 from hwjiaorui/dynamic-shape-pass
tags/v1.6.0
i-robot Gitee 4 years ago
parent
commit
18960015b0
14 changed files with 93 additions and 62 deletions
  1. +12
    -0
      mindspore/ccsrc/backend/optimizer/common/common_backend_optimization.cc
  2. +1
    -0
      mindspore/ccsrc/backend/optimizer/common/common_backend_optimization.h
  3. +40
    -0
      mindspore/ccsrc/backend/optimizer/pass/add_dynamic_shape_attr.cc
  4. +34
    -0
      mindspore/ccsrc/backend/optimizer/pass/add_dynamic_shape_attr.h
  5. +0
    -2
      mindspore/ccsrc/backend/session/ascend_session.cc
  6. +2
    -2
      mindspore/ccsrc/backend/session/cpu_session.cc
  7. +1
    -3
      mindspore/ccsrc/backend/session/gpu_session.cc
  8. +1
    -0
      mindspore/ccsrc/backend/session/kernel_graph.h
  9. +0
    -24
      mindspore/ccsrc/backend/session/session_basic.cc
  10. +0
    -2
      mindspore/ccsrc/backend/session/session_basic.h
  11. +1
    -11
      mindspore/ccsrc/runtime/hardware/cpu/cpu_device_context.cc
  12. +0
    -3
      mindspore/ccsrc/runtime/hardware/cpu/cpu_device_context.h
  13. +1
    -12
      mindspore/ccsrc/runtime/hardware/gpu/gpu_device_context.cc
  14. +0
    -3
      mindspore/ccsrc/runtime/hardware/gpu/gpu_device_context.h

+ 12
- 0
mindspore/ccsrc/backend/optimizer/common/common_backend_optimization.cc View File

@@ -28,6 +28,7 @@
#include "backend/optimizer/pass/optimize_updatestate.h"
#include "backend/optimizer/pass/conv_transpose_to_conv_bp.h"
#include "backend/optimizer/pass/reduce_sum_optimizer.h"
#include "backend/optimizer/pass/add_dynamic_shape_attr.h"
#include "utils/ms_context.h"
#include "debug/anf_ir_dump.h"

@@ -47,6 +48,7 @@ void BackendCommonOptimization(const std::shared_ptr<session::KernelGraph> &kern
#endif
auto optimizer = std::make_shared<GraphOptimizer>();
auto common_pm = std::make_shared<PassManager>("common_pm");
common_pm->AddPass(std::make_shared<AddDynamicShapeAttr>());
common_pm->AddPass(std::make_shared<ReduceSumOptimizer>());
common_pm->AddPass(std::make_shared<ConvertConstInputToAttr>());
common_pm->AddPass(std::make_shared<ConvertAttrToUnifyMindIR>());
@@ -114,5 +116,15 @@ void CommonUnifyMindIROptimization(const std::shared_ptr<session::KernelGraph> &
}
#endif
}

void AddDynamicShapeAttrPass(const std::shared_ptr<session::KernelGraph> &kernel_graph) {
auto opt = std::make_shared<GraphOptimizer>();
auto pm = std::make_shared<PassManager>("add_dynamic_shape_attr");
pm->AddPass(std::make_shared<AddDynamicShapeAttr>());
opt->AddPassManager(pm);
(void)opt->Optimize(kernel_graph);
kernel_graph->SetExecOrderByDefault();
}

} // namespace opt
} // namespace mindspore

+ 1
- 0
mindspore/ccsrc/backend/optimizer/common/common_backend_optimization.h View File

@@ -22,6 +22,7 @@ namespace opt {
void BackendCommonOptimization(const std::shared_ptr<session::KernelGraph> &kernel_graph);
void CommonFinalOptimization(const std::shared_ptr<session::KernelGraph> &kernel_graph);
void CommonUnifyMindIROptimization(const std::shared_ptr<session::KernelGraph> &kernel_graph);
void AddDynamicShapeAttrPass(const std::shared_ptr<session::KernelGraph> &kernel_graph);
} // namespace opt
} // namespace mindspore



+ 40
- 0
mindspore/ccsrc/backend/optimizer/pass/add_dynamic_shape_attr.cc View File

@@ -0,0 +1,40 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "backend/optimizer/pass/add_dynamic_shape_attr.h"
#include "ir/anf.h"
#include "utils/convert_utils.h"
#include "backend/optimizer/common/optimizer.h"
#include "backend/session/anf_runtime_algorithm.h"

namespace mindspore {
namespace opt {

const AnfNodePtr AddDynamicShapeAttr::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const EquivPtr &) const {
MS_EXCEPTION_IF_NULL(func_graph);
MS_EXCEPTION_IF_NULL(node);
if (AnfAlgo::IsNodeDynamicShape(node)) {
AnfAlgo::SetNodeAttr(kAttrIsDynamicShape, MakeValue(true), node);
MS_LOG(INFO) << "Set Dynamic Shape Attr to Node:" << node->fullname_with_scope();
auto kernel_graph = func_graph->cast<KernelGraphPtr>();
MS_EXCEPTION_IF_NULL(kernel_graph);
kernel_graph->SetGraphDynamicAttr(true);
}
return node;
}
} // namespace opt
} // namespace mindspore

+ 34
- 0
mindspore/ccsrc/backend/optimizer/pass/add_dynamic_shape_attr.h View File

@@ -0,0 +1,34 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_ADD_DYNAMIC_SHAPE_ATTR_H
#define MINDSPORE_ADD_DYNAMIC_SHAPE_ATTR_H
#include <string>
#include "ir/anf.h"
#include "utils/convert_utils.h"
#include "backend/optimizer/common/optimizer.h"
namespace mindspore {
namespace opt {
class AddDynamicShapeAttr : public PatternProcessPass {
public:
explicit AddDynamicShapeAttr(bool multigraph = true) : PatternProcessPass("add_dynamic_shape_attr", multigraph) {}
~AddDynamicShapeAttr() override = default;
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;
};
} // namespace opt
} // namespace mindspore

#endif // MINDSPORE_ADD_DYNAMIC_SHAPE_ATTR_H

+ 0
- 2
mindspore/ccsrc/backend/session/ascend_session.cc View File

@@ -541,8 +541,6 @@ GraphId AscendSession::CompileGraphImpl(NotNull<FuncGraphPtr> func_graph) {
graph->set_root_graph_id(root_graph->graph_id());
}
UnifyMindIR(root_graph);
// Update Graph Dynamic Shape Attr
UpdateAllGraphDynamicShapeAttr(all_graphs);
opt::BackendCommonOptimization(root_graph);
// empty graph dont entry to backend
if (root_graph->execution_order().empty()) {


+ 2
- 2
mindspore/ccsrc/backend/session/cpu_session.cc View File

@@ -32,6 +32,7 @@
#include "backend/optimizer/pass/replace_node_by_proxy.h"
#include "backend/optimizer/pass/erase_visit_attr.h"
#include "debug/anf_ir_dump.h"
#include "backend/optimizer/common/common_backend_optimization.h"
#include "debug/dump_proto.h"
#ifndef ENABLE_SECURITY
#include "debug/data_dump/dump_json_parser.h"
@@ -115,8 +116,7 @@ GraphId CPUSession::CompileGraphImpl(const AnfNodePtrList &lst, const AnfNodePtr
auto graph_id = graph_sum_;
auto graph = ConstructKernelGraph(lst, outputs);
MS_EXCEPTION_IF_NULL(graph);
UpdateGraphDynamicShapeAttr(NOT_NULL(graph));
graph->UpdateGraphDynamicAttr();
opt::AddDynamicShapeAttrPass(graph);
MS_LOG(INFO) << "Set kernel info";
SetKernelInfo(graph.get());
MS_LOG(INFO) << "Set kernel info end";


+ 1
- 3
mindspore/ccsrc/backend/session/gpu_session.cc View File

@@ -439,9 +439,7 @@ GraphId GPUSession::CompileGraphImpl(KernelGraphPtr graph) {
DumpIRProto(graph, "before_removeNop_" + std::to_string(graph->graph_id()));
}
#endif
// Update Graph Dynamic Shape Attr.
UpdateGraphDynamicShapeAttr(NOT_NULL(graph));
graph->UpdateGraphDynamicAttr();
opt::AddDynamicShapeAttrPass(graph);
const bool pynative_mode = context_ptr->get_param<int>(MS_CTX_EXECUTION_MODE) == kPynativeMode;
// Hide NopOp from execution graph in graph mode
if (!pynative_mode) {


+ 1
- 0
mindspore/ccsrc/backend/session/kernel_graph.h View File

@@ -273,6 +273,7 @@ class KernelGraph : public FuncGraph {
}
void RemoveNodeFromGraph(const AnfNodePtr &node);
void UpdateGraphDynamicAttr();
void SetGraphDynamicAttr(bool is_dynamic_shape) { is_dynamic_shape_ = is_dynamic_shape; }
bool is_dynamic_shape() const { return is_dynamic_shape_; }
void SetOptimizerFlag();
void SetInputNodes();


+ 0
- 24
mindspore/ccsrc/backend/session/session_basic.cc View File

@@ -1166,8 +1166,6 @@ KernelGraphPtr SessionBasic::ConstructKernelGraph(const AnfNodePtrList &lst, con
#endif

UnifyMindIR(graph);
// Update Graph Dynamic Shape Attr
UpdateGraphDynamicShapeAttr(NOT_NULL(graph));
UpdateGraphAquireGilAttr(NOT_NULL(graph));
if (common_opt) {
opt::BackendCommonOptimization(graph);
@@ -2330,28 +2328,6 @@ void SessionBasic::EraseValueNodeTensor(const std::vector<int64_t> &tensors_mask
*input_tensors = new_input_tensors;
}

void SessionBasic::UpdateAllGraphDynamicShapeAttr(const std::vector<KernelGraphPtr> &all_graphs) {
bool is_dynamic = false;
for (const auto &graph : all_graphs) {
UpdateGraphDynamicShapeAttr(NOT_NULL(graph));
is_dynamic = graph->is_dynamic_shape() || is_dynamic;
}
if (is_dynamic && all_graphs.size() > 1) {
MS_LOG(EXCEPTION)
<< "Dynamic shape is not supported with control flow(loop control statements and condition control statements).";
}
}

void SessionBasic::UpdateGraphDynamicShapeAttr(const NotNull<KernelGraphPtr> &root_graph) {
for (const auto &cnode : root_graph->execution_order()) {
if (AnfAlgo::IsNodeDynamicShape(cnode)) {
AnfAlgo::SetNodeAttr(kAttrIsDynamicShape, MakeValue(true), cnode);
MS_LOG(INFO) << "Set Dynamic Shape Attr to Node:" << cnode->fullname_with_scope();
}
}
root_graph->UpdateGraphDynamicAttr();
}

bool SessionBasic::IsGetNextGraph(const std::shared_ptr<KernelGraph> &kernel_graph, std::string *channel_name) {
MS_EXCEPTION_IF_NULL(kernel_graph);
for (const auto &kernel_node : kernel_graph->execution_order()) {


+ 0
- 2
mindspore/ccsrc/backend/session/session_basic.h View File

@@ -298,8 +298,6 @@ class SessionBasic : public std::enable_shared_from_this<SessionBasic> {
void AddParameterToGraphInputs(const std::vector<AnfNodePtr> &parameters, KernelGraph *graph);
void InitInternalOutputParameter(const AnfNodePtr &out_node, const AnfNodePtr &parameter);
AnfNodePtr FindPullNode(const AnfNodePtr &push_node, const std::vector<AnfNodePtr> &node_list);
void UpdateGraphDynamicShapeAttr(const NotNull<KernelGraphPtr> &root_graph);
void UpdateAllGraphDynamicShapeAttr(const std::vector<KernelGraphPtr> &all_graphs);
virtual std::shared_ptr<device::Bucket> CreateBucket(uint32_t bucket_id, uint32_t bucket_size) { return nullptr; }
void InitAllBucket(const KernelGraphPtr &graph, const device::DeviceContext *device_context = nullptr);
void AddGradAddrToBucket(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &grad_tensor);


+ 1
- 11
mindspore/ccsrc/runtime/hardware/cpu/cpu_device_context.cc View File

@@ -98,7 +98,7 @@ DeviceAddressPtr CPUDeviceContext::CreateDeviceAddress(void *const device_ptr, s

void CPUDeviceContext::OptimizeGraph(const KernelGraphPtr &graph) const {
// Update Graph Dynamic Shape Attr.
UpdateGraphDynamicShapeAttr(NOT_NULL(graph));
opt::AddDynamicShapeAttrPass(graph);

SetOperatorInfo(graph->execution_order());
OptimizeGraphImpl(graph);
@@ -123,16 +123,6 @@ void CPUDeviceContext::OptimizeGraphImpl(const KernelGraphPtr &graph) const {
graph->SetExecOrderByDefault();
}

void CPUDeviceContext::UpdateGraphDynamicShapeAttr(const NotNull<KernelGraphPtr> &graph) const {
for (const auto &cnode : graph->execution_order()) {
if (AnfAlgo::IsNodeDynamicShape(cnode)) {
AnfAlgo::SetNodeAttr(kAttrIsDynamicShape, MakeValue(true), cnode);
MS_LOG(INFO) << "Set Dynamic Shape Attr to Node:" << cnode->fullname_with_scope();
}
}
graph->UpdateGraphDynamicAttr();
}

namespace {
void SetControlOpInfo(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);


+ 0
- 3
mindspore/ccsrc/runtime/hardware/cpu/cpu_device_context.h View File

@@ -60,9 +60,6 @@ class CPUDeviceContext : public DeviceContext {
private:
DISABLE_COPY_AND_ASSIGN(CPUDeviceContext);

// Update Graph Dynamic Shape Attr.
void UpdateGraphDynamicShapeAttr(const NotNull<KernelGraphPtr> &graph) const;

void OptimizeGraphImpl(const KernelGraphPtr &graph) const;
#ifndef ENABLE_SECURITY
// Launch a kernel and record the elapsed time end to end.


+ 1
- 12
mindspore/ccsrc/runtime/hardware/gpu/gpu_device_context.cc View File

@@ -231,7 +231,7 @@ void GPUDeviceContext::OptimizeGraphWithoutDeviceInfo(const KernelGraphPtr &grap
FuseOperators(graph);

// Update Graph Dynamic Shape Attr.
UpdateGraphDynamicShapeAttr(NOT_NULL(graph));
opt::AddDynamicShapeAttrPass(graph);
}

void GPUDeviceContext::OptimizeGraphWithDeviceInfo(const KernelGraphPtr &graph) const {
@@ -292,17 +292,6 @@ void GPUDeviceContext::FuseOperators(const KernelGraphPtr &graph) const {
graph->SetExecOrderByDefault();
}

void GPUDeviceContext::UpdateGraphDynamicShapeAttr(const NotNull<KernelGraphPtr> &graph) const {
for (const auto &cnode : graph->execution_order()) {
MS_EXCEPTION_IF_NULL(cnode);
if (AnfAlgo::IsNodeDynamicShape(cnode)) {
AnfAlgo::SetNodeAttr(kAttrIsDynamicShape, MakeValue(true), cnode);
MS_LOG(INFO) << "Set Dynamic Shape Attr to Node:" << cnode->fullname_with_scope();
}
}
graph->UpdateGraphDynamicAttr();
}

namespace {
void RunOpOptimize(const KernelGraphPtr &kernel_graph) {
MS_EXCEPTION_IF_NULL(kernel_graph);


+ 0
- 3
mindspore/ccsrc/runtime/hardware/gpu/gpu_device_context.h View File

@@ -83,9 +83,6 @@ class GPUDeviceContext : public DeviceContext {
// Operator fusion optimization.
void FuseOperators(const KernelGraphPtr &graph) const;

// Update Graph Dynamic Shape Attr.
void UpdateGraphDynamicShapeAttr(const NotNull<KernelGraphPtr> &graph) const;

bool BindDeviceToCurrentThread() const;
#ifndef ENABLE_SECURITY
// Launch a kernel and record the elapsed time end to end.


Loading…
Cancel
Save