From ddae425e0ca05e34f3f41ffc181994749ed29250 Mon Sep 17 00:00:00 2001 From: John Tzanakakis Date: Thu, 22 Apr 2021 16:43:44 -0400 Subject: [PATCH] use host instead of device shape for debugger --- mindspore/ccsrc/debug/debugger/debugger.cc | 10 ++-------- mindspore/ccsrc/debug/debugger/debugger.h | 1 + .../ccsrc/runtime/device/gpu/gpu_kernel_runtime.cc | 10 ++-------- 3 files changed, 5 insertions(+), 16 deletions(-) diff --git a/mindspore/ccsrc/debug/debugger/debugger.cc b/mindspore/ccsrc/debug/debugger/debugger.cc index 622b674133..dac8872f60 100644 --- a/mindspore/ccsrc/debug/debugger/debugger.cc +++ b/mindspore/ccsrc/debug/debugger/debugger.cc @@ -1145,10 +1145,7 @@ void Debugger::LoadSingleAnfnode(const AnfNodePtr &anf_node, const size_t output } auto format = kOpFormat_DEFAULT; string tensor_name = node_name + ':' + "0"; - ShapeVector int_shapes; - auto shape = AnfAlgo::GetOutputDeviceShape(anf_node, output_index); - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes), - [](size_t inner_item) { return SizeToInt(inner_item); }); + ShapeVector int_shapes = trans::GetRuntimePaddingShape(anf_node, output_index); bool keep_prev; if (anf_node->isa()) { keep_prev = true; @@ -1210,10 +1207,7 @@ void Debugger::LoadGraphOutputs() { } auto format = kOpFormat_DEFAULT; string tensor_name = kernel_name + ':' + std::to_string(j); - ShapeVector int_shapes; - auto shape = AnfAlgo::GetOutputDeviceShape(node, j); - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes), - [](size_t inner_item) { return SizeToInt(inner_item); }); + ShapeVector int_shapes = trans::GetRuntimePaddingShape(node, j); auto ret = addr->LoadMemToHost(tensor_name, exec_order, format, int_shapes, type, j, false); if (!ret) { MS_LOG(ERROR) << "LoadMemToHost:" diff --git a/mindspore/ccsrc/debug/debugger/debugger.h b/mindspore/ccsrc/debug/debugger/debugger.h index 3676ff4d78..db6eda97b2 100644 --- a/mindspore/ccsrc/debug/debugger/debugger.h +++ b/mindspore/ccsrc/debug/debugger/debugger.h @@ -25,6 +25,7 @@ #include "backend/session/kernel_graph.h" #include "debug/debugger/grpc_client.h" #include "debug/debug_services.h" +#include "common/trans.h" using debugger::Chunk; using debugger::DataType; diff --git a/mindspore/ccsrc/runtime/device/gpu/gpu_kernel_runtime.cc b/mindspore/ccsrc/runtime/device/gpu/gpu_kernel_runtime.cc index 9c2116df31..ae3156ddb4 100644 --- a/mindspore/ccsrc/runtime/device/gpu/gpu_kernel_runtime.cc +++ b/mindspore/ccsrc/runtime/device/gpu/gpu_kernel_runtime.cc @@ -157,10 +157,7 @@ void LoadKernelData(Debugger *debugger, const CNodePtr &kernel, auto format = kOpFormat_DEFAULT; auto gpu_addr = std::make_unique(addr->addr, addr->size, format, type); string input_tensor_name = input_kernel_name + ':' + "0"; - ShapeVector int_shapes; - auto shape = AnfAlgo::GetOutputDeviceShape(input_kernel, PARAMETER_OUTPUT_INDEX); - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes), - [](size_t inner_item) { return SizeToInt(inner_item); }); + ShapeVector int_shapes = trans::GetRuntimePaddingShape(input_kernel, PARAMETER_OUTPUT_INDEX); auto ret = gpu_addr->LoadMemToHost(input_tensor_name, exec_order, format, int_shapes, type, 0, true); if (!ret) { MS_LOG(ERROR) << "LoadMemToHost:" @@ -187,10 +184,7 @@ void LoadKernelData(Debugger *debugger, const CNodePtr &kernel, auto format = kOpFormat_DEFAULT; auto gpu_addr = std::make_unique(addr->addr, addr->size, format, type); string tensor_name = kernel_name + ':' + std::to_string(j); - ShapeVector int_shapes; - auto shape = AnfAlgo::GetOutputDeviceShape(kernel, j); - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes), - [](size_t inner_item) { return SizeToInt(inner_item); }); + ShapeVector int_shapes = trans::GetRuntimePaddingShape(kernel, j); auto ret = gpu_addr->LoadMemToHost(tensor_name, exec_order, format, int_shapes, type, j, false); if (!ret) { MS_LOG(ERROR) << "LoadMemToHost:"