From c88528990bc52b530cb24db7a48ba71a413a6388 Mon Sep 17 00:00:00 2001 From: tanghuikang Date: Wed, 21 Apr 2021 15:44:53 +0800 Subject: [PATCH] PyNative support dynamic shape on cpu --- .../ccsrc/backend/session/cpu_session.cc | 27 ++++++++++++++----- mindspore/ccsrc/backend/session/cpu_session.h | 3 ++- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/mindspore/ccsrc/backend/session/cpu_session.cc b/mindspore/ccsrc/backend/session/cpu_session.cc index b0c6d52ea3..0315667367 100644 --- a/mindspore/ccsrc/backend/session/cpu_session.cc +++ b/mindspore/ccsrc/backend/session/cpu_session.cc @@ -191,16 +191,28 @@ void CPUSession::BuildOpImpl(const OpRunInfo &op_run_info, const GraphInfo &grap run_op_graphs_[graph_info] = kernel_graph; } -void CPUSession::SetOutputFlags(const VectorRef &base_ref, std::vector *outputs_tensors) { +void CPUSession::SetOutputFlags(const VectorRef &base_ref) { for (size_t i = 0; i < base_ref.size(); ++i) { if (utils::isa(base_ref[i])) { auto ref_iter = utils::cast(base_ref[i]); - SetOutputFlags(ref_iter, outputs_tensors); + SetOutputFlags(ref_iter); } else if (utils::isa(base_ref[i])) { auto tensor_ptr = utils::cast>(base_ref[i]); tensor_ptr->SetNeedWait(false); tensor_ptr->data_sync(false); - outputs_tensors->push_back(tensor_ptr); + } + } +} + +void CPUSession::UpdateDynamicOutputShape(const std::map &tensor_to_node) { + for (const auto &tensor_node : tensor_to_node) { + if (AnfAlgo::IsDynamicShape(tensor_node.second.first)) { + const auto &kernel = tensor_node.second.first; + const auto &output_index = tensor_node.second.second; + const auto &shape = AnfAlgo::GetOutputInferShape(kernel, output_index); + std::vector refresh_shape; + (void)std::copy(shape.begin(), shape.end(), std::back_inserter(refresh_shape)); + tensor_node.first->set_shape(refresh_shape); } } } @@ -236,9 +248,12 @@ void CPUSession::RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_info, if (!ret) { MS_LOG(EXCEPTION) << "Run Op failed"; } - - std::vector output_tensors; - SetOutputFlags(*outputs, &output_tensors); + UpdateDynamicOutputShape(tensor_to_node); + // update output abstract of dynamic op to op_run_info + if (op_run_info->is_dynamic_shape) { + UpdateOutputAbstract(kernel_graph, op_run_info); + } + SetOutputFlags(*outputs); runtime_.RunOpClearMemory(kernel_graph.get()); MS_LOG(INFO) << "Run Op end"; } diff --git a/mindspore/ccsrc/backend/session/cpu_session.h b/mindspore/ccsrc/backend/session/cpu_session.h index 53e99e0a5e..e573db2519 100644 --- a/mindspore/ccsrc/backend/session/cpu_session.h +++ b/mindspore/ccsrc/backend/session/cpu_session.h @@ -55,7 +55,8 @@ class CPUSession : public SessionBasic { void Reorder(std::vector *node_list); void SetKernelInfo(const KernelGraph *kernel_graph); void BuildKernel(const KernelGraph *kernel_graph); - void SetOutputFlags(const VectorRef &base_ref, std::vector *outputs_tensors); + void SetOutputFlags(const VectorRef &base_ref); + void UpdateDynamicOutputShape(const std::map &tensor_to_node); device::cpu::CPUKernelRuntime runtime_; }; MS_REG_SESSION(kCPUDevice, CPUSession);