Browse Source

fix kernel by kernel and mindRT parameter bug

r1.7
Parastoo Ashtari 4 years ago
parent
commit
c61e56cbe2
7 changed files with 11 additions and 9 deletions
  1. +2
    -1
      mindspore/ccsrc/debug/debugger/debugger.cc
  2. +2
    -1
      mindspore/ccsrc/debug/debugger/debugger_utils.cc
  3. +2
    -2
      mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_device_address.cc
  4. +1
    -1
      mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_device_address.h
  5. +2
    -2
      mindspore/ccsrc/plugin/device/gpu/hal/device/gpu_device_address.cc
  6. +1
    -1
      mindspore/ccsrc/plugin/device/gpu/hal/device/gpu_device_address.h
  7. +1
    -1
      mindspore/ccsrc/runtime/device/device_address.h

+ 2
- 1
mindspore/ccsrc/debug/debugger/debugger.cc View File

@@ -1574,7 +1574,8 @@ void Debugger::LoadSingleParameterMindRT(const AnfNodePtr &node) {
debug_services_->MoveTensorCurrentToPrev(tensor_name);
}
// Keep_prev is True for parameters.
bool ret = device_addr->LoadMemToHost(tensor_name, 0, format, int_shapes, type, 0, true, root_graph_id);
// force update for parameters.
bool ret = device_addr->LoadMemToHost(tensor_name, 0, format, int_shapes, type, 0, true, root_graph_id, true);

if (!ret) {
MS_LOG(ERROR) << "LoadMemToHost:"


+ 2
- 1
mindspore/ccsrc/debug/debugger/debugger_utils.cc View File

@@ -155,7 +155,8 @@ bool CheckReadData(const CNodePtr &cnode) {
if (dump_json_parser.NeedDump(kernel_name)) {
read_data = true;
}
} else if (debugger->debugger_enabled()) {
}
if (debugger->debugger_enabled()) {
read_data = debugger->ReadNodeDataRequired(cnode);
}
return read_data;


+ 2
- 2
mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_device_address.cc View File

@@ -649,11 +649,11 @@ bool AscendDeviceAddress::DumpMemToFile(const std::string &filepath, const std::
*/
bool AscendDeviceAddress::LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &,
const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev,
uint32_t root_graph_id) const {
uint32_t root_graph_id, bool force_update) const {
bool ret = false;
auto debugger = Debugger::GetInstance();
MS_EXCEPTION_IF_NULL(debugger);
if (debugger->TensorExistsInCurrent(tensor_name)) {
if (debugger->TensorExistsInCurrent(tensor_name) && !force_update) {
MS_LOG(INFO) << tensor_name << " already loaded for this step so not loading it again.";
return true;
}


+ 1
- 1
mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_device_address.h View File

@@ -62,7 +62,7 @@ class AscendDeviceAddress : public DeviceAddress {
#ifdef ENABLE_DEBUGGER
bool LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev,
uint32_t root_graph_id = 0) const override;
uint32_t root_graph_id = 0, bool force_update = 0) const override;
#endif

private:


+ 2
- 2
mindspore/ccsrc/plugin/device/gpu/hal/device/gpu_device_address.cc View File

@@ -185,14 +185,14 @@ GPUDeviceAddress::~GPUDeviceAddress() { ClearDeviceMemory(); }
#ifdef ENABLE_DEBUGGER
bool GPUDeviceAddress::LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev,
uint32_t root_graph_id) const {
uint32_t root_graph_id, bool force_update) const {
bool ret = false;
if (size_ == 0) {
return true;
}

MS_EXCEPTION_IF_NULL(Debugger::GetInstance());
if (Debugger::GetInstance()->TensorExistsInCurrent(tensor_name)) {
if (Debugger::GetInstance()->TensorExistsInCurrent(tensor_name) && !force_update) {
MS_LOG(INFO) << tensor_name << " already loaded for this step so not loading it again.";
return true;
}


+ 1
- 1
mindspore/ccsrc/plugin/device/gpu/hal/device/gpu_device_address.h View File

@@ -56,7 +56,7 @@ class GPUDeviceAddress : public DeviceAddress {
#ifdef ENABLE_DEBUGGER
bool LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev,
uint32_t root_graph_id = 0) const override;
uint32_t root_graph_id = 0, bool force_update = 0) const override;
#endif
private:


+ 1
- 1
mindspore/ccsrc/runtime/device/device_address.h View File

@@ -141,7 +141,7 @@ class DeviceAddress : public mindspore::DeviceSync {
#ifdef ENABLE_DEBUGGER
virtual bool LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev,
uint32_t root_graph_id = 0) const {
uint32_t root_graph_id = 0, bool force_update = 0) const {
return true;
}
#endif


Loading…
Cancel
Save