Browse Source

fix bug.

pull/1506/head
zhaozhixuan 4 years ago
parent
commit
3ab7d135d5
90 changed files with 2861 additions and 570 deletions
  1. +2
    -0
      ge/CMakeLists.txt
  2. +1
    -1
      ge/common/debug/memory_dumper.cc
  3. +5
    -1
      ge/common/dump/dump_op.cc
  4. +241
    -0
      ge/common/dump/exception_dumper.cc
  5. +48
    -0
      ge/common/dump/exception_dumper.h
  6. +1
    -0
      ge/common/helper/model_helper.cc
  7. +1
    -0
      ge/executor/CMakeLists.txt
  8. +79
    -13
      ge/graph/build/memory/graph_mem_assigner.cc
  9. +4
    -1
      ge/graph/build/memory/graph_mem_assigner.h
  10. +77
    -3
      ge/graph/execute/graph_execute.cc
  11. +8
    -2
      ge/graph/execute/graph_execute.h
  12. +0
    -1
      ge/graph/load/graph_loader.cc
  13. +0
    -172
      ge/graph/load/model_manager/data_dumper.cc
  14. +0
    -8
      ge/graph/load/model_manager/data_dumper.h
  15. +2
    -0
      ge/graph/load/model_manager/data_inputer.h
  16. +49
    -4
      ge/graph/load/model_manager/davinci_model.cc
  17. +20
    -2
      ge/graph/load/model_manager/davinci_model.h
  18. +35
    -9
      ge/graph/load/model_manager/model_manager.cc
  19. +4
    -2
      ge/graph/load/model_manager/model_manager.h
  20. +1
    -0
      ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc
  21. +1
    -0
      ge/graph/load/model_manager/task_info/kernel_task_info.cc
  22. +350
    -168
      ge/graph/manager/graph_manager.cc
  23. +42
    -0
      ge/graph/manager/graph_manager.h
  24. +9
    -0
      ge/graph/manager/graph_manager_utils.cc
  25. +16
    -0
      ge/graph/manager/graph_manager_utils.h
  26. +12
    -0
      ge/graph/passes/dimension_adjust_pass.cc
  27. +9
    -2
      ge/graph/passes/net_output_pass.cc
  28. +3
    -0
      ge/graph/passes/pass_utils.cc
  29. +104
    -0
      ge/graph/passes/same_transdata_breadth_fusion_pass.cc
  30. +5
    -1
      ge/graph/passes/save_pass.cc
  31. +25
    -0
      ge/graph/passes/set_input_output_offset_pass.cc
  32. +2
    -0
      ge/graph/passes/snapshot_pass.cc
  33. +3
    -0
      ge/graph/passes/stop_gradient_pass.cc
  34. +22
    -0
      ge/graph/passes/subexpression_migration_pass.cc
  35. +17
    -0
      ge/graph/passes/subgraph_const_migration_pass.cc
  36. +21
    -0
      ge/graph/passes/subgraph_pass.cc
  37. +20
    -0
      ge/graph/passes/switch_data_edges_bypass.cc
  38. +13
    -0
      ge/graph/passes/switch_dead_branch_elimination.cc
  39. +12
    -0
      ge/graph/passes/switch_logic_remove_pass.cc
  40. +112
    -16
      ge/graph/passes/switch_to_stream_switch_pass.cc
  41. +4
    -1
      ge/graph/passes/transop_breadth_fusion_pass.cc
  42. +6
    -0
      ge/graph/passes/transop_depth_fusion_pass.cc
  43. +11
    -0
      ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc
  44. +24
    -0
      ge/graph/passes/transop_symmetry_elimination_pass.cc
  45. +107
    -7
      ge/graph/passes/transop_without_reshape_fusion_pass.cc
  46. +11
    -0
      ge/graph/passes/transpose_transdata_pass.cc
  47. +6
    -0
      ge/graph/passes/unused_args_clean_pass.cc
  48. +2
    -0
      ge/graph/passes/unused_const_pass.cc
  49. +36
    -0
      ge/graph/passes/var_is_initialized_op_pass.cc
  50. +38
    -0
      ge/graph/passes/variable_op_pass.cc
  51. +10
    -0
      ge/graph/passes/variable_ref_delete_op_pass.cc
  52. +22
    -0
      ge/hybrid/executor/hybrid_execution_context.cc
  53. +4
    -0
      ge/hybrid/executor/hybrid_execution_context.h
  54. +6
    -1
      ge/hybrid/executor/hybrid_model_async_executor.cc
  55. +10
    -0
      ge/hybrid/executor/hybrid_model_async_executor.h
  56. +12
    -1
      ge/hybrid/executor/hybrid_model_executor.cc
  57. +8
    -0
      ge/hybrid/executor/hybrid_model_pipeline_executor.cc
  58. +42
    -0
      ge/hybrid/executor/worker/execution_engine.cc
  59. +55
    -3
      ge/hybrid/hybrid_davinci_model.cc
  60. +10
    -0
      ge/hybrid/hybrid_davinci_model.h
  61. +20
    -0
      ge/hybrid/hybrid_davinci_model_stub.cc
  62. +11
    -5
      ge/hybrid/model/hybrid_model_builder.cc
  63. +2
    -0
      ge/hybrid/node_executor/aicore/aicore_node_executor.cc
  64. +2
    -0
      ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc
  65. +31
    -15
      ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc
  66. +1
    -0
      ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h
  67. +30
    -33
      ge/hybrid/node_executor/controlop/control_op_executor.cc
  68. +33
    -13
      ge/hybrid/node_executor/ge_local/ge_local_node_executor.cc
  69. +67
    -25
      ge/hybrid/node_executor/hccl/hccl_node_executor.cc
  70. +13
    -5
      ge/hybrid/node_executor/host_cpu/host_cpu_node_executor.cc
  71. +4
    -2
      ge/hybrid/node_executor/host_cpu/kernel/assign_kernel.cc
  72. +2
    -1
      ge/hybrid/node_executor/host_cpu/kernel/data_kernel.cc
  73. +20
    -10
      ge/hybrid/node_executor/host_cpu/kernel/random_uniform_kernel.cc
  74. +3
    -2
      ge/hybrid/node_executor/host_cpu/kernel/variable_kernel.cc
  75. +4
    -1
      ge/hybrid/node_executor/host_cpu/kernel_factory.cc
  76. +17
    -9
      ge/hybrid/node_executor/node_executor.cc
  77. +3
    -4
      ge/hybrid/node_executor/partitioned_call/partitioned_call_node_executor.cc
  78. +67
    -21
      ge/hybrid/node_executor/task_context.cc
  79. +10
    -0
      ge/model/ge_model.cc
  80. +6
    -0
      ge/model/ge_model.h
  81. +27
    -1
      ge/model/ge_root_model.h
  82. +5
    -0
      tests/ut/ge/CMakeLists.txt
  83. +54
    -0
      tests/ut/ge/common/dump_exception_unittest.cc
  84. +41
    -1
      tests/ut/ge/graph/build/mem_assigner_unittest.cc
  85. +129
    -0
      tests/ut/ge/graph/execute/graph_execute_unittest.cc
  86. +12
    -0
      tests/ut/ge/graph/load/davinci_model_unittest.cc
  87. +375
    -0
      tests/ut/ge/graph/manager/graph_manager_unittest.cc
  88. +21
    -1
      tests/ut/ge/graph/passes/dimension_adjust_pass_unittest.cc
  89. +17
    -0
      tests/ut/ge/graph/passes/net_output_pass_unittest.cc
  90. +34
    -2
      tests/ut/ge/hybrid/ge_hybrid_unittest.cc

+ 2
- 0
ge/CMakeLists.txt View File

@@ -108,6 +108,7 @@ set(TRAIN_SRC_LIST
"common/helper/model_cache_helper.cc"
"common/profiling/profiling_manager.cc"
"common/dump/dump_manager.cc"
"common/dump/exception_dumper.cc"
"common/dump/dump_properties.cc"
"common/dump/opdebug_register.cc"
"common/dump/dump_op.cc"
@@ -437,6 +438,7 @@ set(INFER_SRC_LIST
"common/formats/formats.cc"
"common/profiling/profiling_manager.cc"
"common/dump/dump_properties.cc"
"common/dump/exception_dumper.cc"
"common/dump/dump_manager.cc"
"common/dump/dump_op.cc"
"common/dump/opdebug_register.cc"


+ 1
- 1
ge/common/debug/memory_dumper.cc View File

@@ -161,7 +161,7 @@ int MemoryDumper::OpenFile(const char *filename) {
// Using the O_EXCL, if the file already exists,return failed to avoid privilege escalation vulnerability.
mmMode_t mode = M_IRUSR | M_IWUSR;

int32_t fd = mmOpen2(real_path.c_str(), M_RDWR | M_CREAT | O_TRUNC, mode);
int32_t fd = mmOpen2(real_path.c_str(), M_RDWR | M_CREAT | M_APPEND, mode);
if (fd == EN_ERROR || fd == EN_INVALID_PARAM) {
GELOGE(kInvalidFd, "[Open][File]Failed. errno = %d, error:%s, filename:%s.",
fd, strerror(errno), filename);


+ 5
- 1
ge/common/dump/dump_op.cc View File

@@ -204,6 +204,10 @@ Status DumpOp::ExecutorDumpOp(aicpu::dump::OpMappingInfo &op_mapping_info) {
}

Status DumpOp::SetDumpModelName(aicpu::dump::OpMappingInfo &op_mapping_info) {
if (dynamic_model_name_.empty() && dynamic_om_name_.empty()) {
GELOGI("Single op dump, no need set model name");
return SUCCESS;
}
std::set<std::string> model_list = dump_properties_.GetAllDumpModel();
bool not_find_by_omname = model_list.find(dynamic_om_name_) == model_list.end();
bool not_find_by_modelname = model_list.find(dynamic_model_name_) == model_list.end();
@@ -219,7 +223,7 @@ Status DumpOp::SetDumpModelName(aicpu::dump::OpMappingInfo &op_mapping_info) {
}
}
if (!dump_model_name.empty() && dump_properties_.IsDumpOpen()) {
GELOGD("Dump model name is %s", dump_model_name.c_str());
GELOGI("Dump model name is %s", dump_model_name.c_str());
op_mapping_info.set_model_name(dump_model_name);
}
return SUCCESS;


+ 241
- 0
ge/common/dump/exception_dumper.cc View File

@@ -0,0 +1,241 @@
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "common/dump/exception_dumper.h"

#include "common/ge/datatype_util.h"
#include "common/debug/memory_dumper.h"
#include "framework/common/debug/log.h"
#include "graph/manager/util/debug.h"
#include "graph/utils/tensor_utils.h"
#include "graph/load/model_manager/model_utils.h"
#include "proto/dump_task.pb.h"

namespace {
static uint64_t GetNowTime() {
uint64_t ret = 0;
mmTimeval tv;
if (mmGetTimeOfDay(&tv, nullptr) == 0) {
ret = tv.tv_sec * 1000000ULL + tv.tv_usec;
}

return ret;
}

static void ReplaceStringElem(std::string &str) {
for_each(str.begin(), str.end(), [](char &ch) {
if ((ch == ' ') || (ch == '.') || (ch == '/') || (ch == '\\')) {
ch = '_';
}
});
}

static void SetDumpData(const ge::OpDescInfo &op_desc_info, toolkit::dumpdata::DumpData &dump_data) {
dump_data.set_version("2.0");
dump_data.set_dump_time(GetNowTime());
dump_data.set_op_name(op_desc_info.op_name);
for (size_t i = 0; i < op_desc_info.input_format.size(); ++i) {
toolkit::dumpdata::OpInput input;
input.set_data_type(toolkit::dumpdata::OutputDataType(
ge::DataTypeUtil::GetIrDataType(op_desc_info.input_data_type[i])));
input.set_format(toolkit::dumpdata::OutputFormat(op_desc_info.input_format[i]));
for (auto dim : op_desc_info.input_shape[i]) {
input.mutable_shape()->add_dim(dim);
}
input.set_size(op_desc_info.input_size[i]);
GELOGI("[Set][DumpData] The input size int exception is %ld", op_desc_info.input_size[i]);
dump_data.mutable_input()->Add(std::move(input));
}

for (size_t j = 0; j < op_desc_info.output_format.size(); ++j) {
toolkit::dumpdata::OpOutput output;
output.set_data_type(toolkit::dumpdata::OutputDataType(
ge::DataTypeUtil::GetIrDataType(op_desc_info.output_data_type[j])));
output.set_format(toolkit::dumpdata::OutputFormat(op_desc_info.output_format[j]));
for (auto dim : op_desc_info.output_shape[j]) {
output.mutable_shape()->add_dim(dim);
}
output.set_size(op_desc_info.output_size[j]);
GELOGI("[Set][DumpData] The output size int exception is %ld", op_desc_info.output_size[j]);
dump_data.mutable_output()->Add(std::move(output));
}
}
} // namespace

namespace ge {
ExceptionDumper::~ExceptionDumper() {}

void ExceptionDumper::SaveDumpOpInfo(const OpDescPtr &op, uint32_t task_id, uint32_t stream_id,
vector<void *> &input_addrs, vector<void *> &output_addrs) {
OpDescInfo op_desc_info;
SaveOpDescInfo(op, task_id, stream_id, op_desc_info);
op_desc_info.input_addrs = input_addrs;
op_desc_info.output_addrs = output_addrs;
op_desc_info_.emplace_back(std::move(op_desc_info));
}

void ExceptionDumper::SaveDumpOpInfo(const RuntimeParam &model_param, const OpDescPtr &op,
uint32_t task_id, uint32_t stream_id) {
OpDescInfo op_desc_info;
SaveOpDescInfo(op, task_id, stream_id, op_desc_info);
op_desc_info.input_addrs = ModelUtils::GetInputDataAddrs(model_param, op);
op_desc_info.output_addrs = ModelUtils::GetOutputDataAddrs(model_param, op);
op_desc_info_.emplace_back(std::move(op_desc_info));
}

void ExceptionDumper::SaveOpDescInfo(const OpDescPtr &op, uint32_t task_id, uint32_t stream_id,
OpDescInfo &op_desc_info) {
if (op == nullptr) {
GELOGW("[Save][OpExceptionInfo] op desc ptr is null.");
return;
}
GELOGD("[Save][OpExceptionInfo] Start to save dump op [%s] info of task_id: %u, stream_id: %u",
op->GetName().c_str(), task_id, stream_id);
op_desc_info.op_name = op->GetName();
op_desc_info.op_type = op->GetType();
op_desc_info.task_id = task_id;
op_desc_info.stream_id = stream_id;
for (size_t i = 0; i < op->GetAllInputsSize(); ++i) {
GeTensorDescPtr input_tensor_desc = op->MutableInputDesc(i);
if (input_tensor_desc == nullptr) {
continue;
}
op_desc_info.input_format.emplace_back(input_tensor_desc->GetFormat());
op_desc_info.input_shape.emplace_back(input_tensor_desc->GetShape().GetDims());
op_desc_info.input_data_type.emplace_back(input_tensor_desc->GetDataType());
int64_t input_size = 0;

if (TensorUtils::GetTensorSizeInBytes(*input_tensor_desc, input_size) != SUCCESS) {
GELOGW("[Save][OpExceptionInfo] Op [%s] get input size failed.", op->GetName().c_str());
return;
}
GELOGD("[Save][OpExceptionInfo] Save dump op info, the input size is %ld", input_size);
op_desc_info.input_size.emplace_back(input_size);
}
for (size_t j = 0; j < op->GetOutputsSize(); ++j) {
GeTensorDescPtr output_tensor_desc = op->MutableOutputDesc(j);
if (output_tensor_desc == nullptr) {
continue;
}
op_desc_info.output_format.emplace_back(output_tensor_desc->GetFormat());
op_desc_info.output_shape.emplace_back(output_tensor_desc->GetShape().GetDims());
op_desc_info.output_data_type.emplace_back(output_tensor_desc->GetDataType());
int64_t output_size = 0;
if (TensorUtils::GetTensorSizeInBytes(*output_tensor_desc, output_size) != SUCCESS) {
GELOGW("[Save][OpExceptionInfo] Op [%s] get output size failed.", op->GetName().c_str());
return;
}
GELOGD("[Save][OpExceptionInfo] Save dump op info, the output size is %ld.", output_size);
op_desc_info.output_size.emplace_back(output_size);
}
}

Status ExceptionDumper::DumpExceptionInfo(const std::vector<rtExceptionInfo> &exception_infos) const {
GELOGI("[Dump][Exception] Start to dump exception info");
for (const rtExceptionInfo &iter : exception_infos) {
OpDescInfo op_desc_info;
if (GetOpDescInfo(iter.streamid, iter.taskid, op_desc_info)) {
toolkit::dumpdata::DumpData dump_data;
SetDumpData(op_desc_info, dump_data);
uint64_t now_time = GetNowTime();
std::string op_name = op_desc_info.op_name;
std::string op_type = op_desc_info.op_type;
ReplaceStringElem(op_name);
ReplaceStringElem(op_type);
string dump_file_path =
"./" + op_type + "." + op_name + "." + std::to_string(op_desc_info.task_id) + "." + std::to_string(now_time);
GELOGI("[Dump][Exception] The exception dump file path is %s", dump_file_path.c_str());

uint64_t proto_size = dump_data.ByteSizeLong();
std::unique_ptr<char[]> proto_msg(new (std::nothrow) char[proto_size]);
bool ret = dump_data.SerializeToArray(proto_msg.get(), proto_size);
if (!ret || proto_size == 0) {
REPORT_INNER_ERROR("E19999", "Serialize proto to string fail");
GELOGE(PARAM_INVALID, "[Dump][Exception] Dump data proto serialize failed");
return PARAM_INVALID;
}

GE_CHK_STATUS_RET(MemoryDumper::DumpToFile(dump_file_path.c_str(), &proto_size, sizeof(uint64_t)),
"Failed to dump proto size");
GE_CHK_STATUS_RET(MemoryDumper::DumpToFile(dump_file_path.c_str(), proto_msg.get(), proto_size),
"Failed to dump proto msg");
if (DumpExceptionInput(op_desc_info, dump_file_path) != SUCCESS) {
GELOGE(PARAM_INVALID, "[Dump][Exception] Dump exception input failed");
return PARAM_INVALID;
}

if (DumpExceptionOutput(op_desc_info, dump_file_path) != SUCCESS) {
GELOGE(PARAM_INVALID, "[Dump][Exception] Dump exception output failed");
return PARAM_INVALID;
}
GELOGI("[Dump][Exception] Dump exception info SUCCESS");
} else {
GELOGE(PARAM_INVALID, "[Dump][Exception] Get op desc info failed,task id:%u,stream id:%u",
iter.taskid, iter.streamid);
return PARAM_INVALID;
}
}
return SUCCESS;
}

bool ExceptionDumper::GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const {
GELOGI("[Get][OpDescInfo] There are %zu op need to dump.", op_desc_info_.size());
for (size_t index = 0; index < op_desc_info_.size(); ++index) {
OpDescInfo dump_op_info = op_desc_info_.at(index);
if (dump_op_info.task_id == task_id && dump_op_info.stream_id == stream_id) {
GELOGI("[Get][OpDescInfo] Find exception op [%s] of task_id: %u, stream_id: %u.",
dump_op_info.op_name.c_str(), task_id, stream_id);
op_desc_info = dump_op_info;
return true;
}
}
return false;
}

Status ExceptionDumper::DumpExceptionInput(const OpDescInfo &op_desc_info, const string &dump_file) const {
GELOGI("[Dump][ExceptionInput] Start to dump exception input");
for (size_t i = 0; i < op_desc_info.input_addrs.size(); i++) {
if (Debug::DumpDevMem(dump_file.data(), op_desc_info.input_addrs.at(i), op_desc_info.input_size.at(i)) != SUCCESS) {
GELOGE(PARAM_INVALID, "[Dump][ExceptionInput] Dump the %zu input data of op [%s] failed",
i, op_desc_info.op_name.c_str());
return PARAM_INVALID;
}
}
return SUCCESS;
}

Status ExceptionDumper::DumpExceptionOutput(const OpDescInfo &op_desc_info, const string &dump_file) const {
GELOGI("[Dump][ExceptionOutput] Start to dump exception output");
for (size_t i = 0; i < op_desc_info.output_addrs.size(); i++) {
if (Debug::DumpDevMem(dump_file.data(), op_desc_info.output_addrs.at(i), op_desc_info.output_size.at(i)) !=
SUCCESS) {
GELOGE(PARAM_INVALID, "[Dump][ExceptionInput] Dump the %zu input data of op [%s] failed",
i, op_desc_info.op_name.c_str());
return PARAM_INVALID;
}
}
return SUCCESS;
}

OpDescInfo *ExceptionDumper::MutableOpDescInfo(uint32_t task_id, uint32_t stream_id) {
for (OpDescInfo &op_desc_info : op_desc_info_) {
if (op_desc_info.task_id == task_id && op_desc_info.stream_id == stream_id) {
return &op_desc_info;
}
}
return nullptr;
}
} // namespace ge

+ 48
- 0
ge/common/dump/exception_dumper.h View File

@@ -0,0 +1,48 @@
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef GE_COMMON_DUMP_EXCEPTION_DUMPER_H_
#define GE_COMMON_DUMP_EXCEPTION_DUMPER_H_

#include <vector>

#include "graph/op_desc.h"
#include "framework/common/ge_types.h"
#include "graph/load/model_manager/task_info/task_info.h"

namespace ge {
class ExceptionDumper {
public:
ExceptionDumper() = default;
~ExceptionDumper();

void SaveDumpOpInfo(const OpDescPtr &op, uint32_t task_id, uint32_t stream_id,
std::vector<void *> &input_addrs, std::vector<void *> &output_addrs);
void SaveDumpOpInfo(const RuntimeParam &model_param, const OpDescPtr &op, uint32_t task_id, uint32_t stream_id);
Status DumpExceptionInfo(const std::vector<rtExceptionInfo> &exception_infos) const;
bool GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const;
OpDescInfo *MutableOpDescInfo(uint32_t task_id, uint32_t stream_id);

private:
void SaveOpDescInfo(const OpDescPtr &op, uint32_t task_id, uint32_t stream_id, OpDescInfo &op_desc_info);
Status DumpExceptionInput(const OpDescInfo &op_desc_info, const std::string &dump_file) const;
Status DumpExceptionOutput(const OpDescInfo &op_desc_info, const std::string &dump_file) const;

std::vector<OpDescInfo> op_desc_info_;
};
} // namespace ge

#endif // GE_COMMON_DUMP_EXCEPTION_DUMPER_H_

+ 1
- 0
ge/common/helper/model_helper.cc View File

@@ -599,6 +599,7 @@ Status ModelHelper::GenerateGeRootModel(OmFileLoadHelper &om_load_helper) {
is_first_model = false;
root_model_->SetRootGraph(GraphUtils::GetComputeGraph(cur_model->GetGraph()));
root_model_->SetModelId(cur_model->GetModelId());
root_model_->SetModelName(cur_model->GetName());
model_ = cur_model;
continue;
}


+ 1
- 0
ge/executor/CMakeLists.txt View File

@@ -16,6 +16,7 @@ set(SRC_LIST
"../common/ge/plugin_manager.cc"
"../common/ge/op_tiling_manager.cc"
"../common/dump/dump_properties.cc"
"../common/dump/exception_dumper.cc"
"../common/dump/dump_manager.cc"
"../common/dump/dump_op.cc"
"../common/dump/opdebug_register.cc"


+ 79
- 13
ge/graph/build/memory/graph_mem_assigner.cc View File

@@ -560,7 +560,7 @@ Status GraphMemoryAssigner::AssignContinuousInputMemory(const ge::NodePtr &node,
bool is_allocated_first_input = is_continuous_input_allocated && (in_data_anchor->GetIdx() == 0);
if (is_allocated_first_input) {
std::map<int32_t, int32_t> out2ins;
GE_CHK_STATUS_RET(GetAllRef(node, out2ins), "[Get][AllRef]fail for node: %s", node->GetName().c_str());
GE_CHK_STATUS_RET(TryGetNodeRefIndexes(node, out2ins), "[Get][RefIndexes]fail for node: %s", node->GetName().c_str());
// output is beginning offset, set offset for input; only support this case now
if ((out2ins.size() == 1) && (out2ins.begin()->second == 0) && (reverse_refresh)) {
auto peer_output_offset = output_list.at(peer_out_data_anchor->GetIdx());
@@ -1250,10 +1250,46 @@ Status GraphMemoryAssigner::CheckOffset() {
return FAILED;
}
}
// check reuse input and output
GE_CHK_STATUS_RET(CheckRefNodeOffset(node), "[Check][Offset]fail for node: %s", node->GetName().c_str());
}

return SUCCESS;
}

ge::Status GraphMemoryAssigner::CheckRefNodeOffset(const NodePtr &node) {
std::map<int32_t, int32_t> out2ins;
GE_CHK_STATUS_RET(TryGetNodeRefIndexes(node, out2ins), "[Get][RefIndexes]fail for node: %s", node->GetName().c_str());
auto opdesc = node->GetOpDesc();
GE_CHECK_NOTNULL(opdesc);
auto output_list = opdesc->GetOutputOffset();
auto input_list = opdesc->GetInputOffset();
for (const auto &out2in : out2ins) {
auto out_i = out2in.first;
if (static_cast<size_t>(out_i) >= output_list.size()) {
std::string error = "Node" + FmtToStr(opdesc->GetName()) + "output offset size" +
FmtToStr(output_list.size()) + "should bigger than ref out index" + FmtToStr(out_i);
GE_ERRORLOG_AND_ERRORMSG(ge::FAILED, error.c_str());
return ge::FAILED;
}
auto in_i = out2in.second;
if (static_cast<size_t>(in_i) >= input_list.size()) {
std::string error = "Node" + FmtToStr(opdesc->GetName()) + "input offset size" +
FmtToStr(input_list.size()) + "should bigger than ref input index" + FmtToStr(in_i);
GE_ERRORLOG_AND_ERRORMSG(ge::FAILED, error.c_str());
return ge::FAILED;
}
if (output_list[out_i] != input_list[in_i]) {
std::string error = "Node" + FmtToStr(opdesc->GetName()) + "input offset " + FmtToStr(input_list[in_i]) +
"should equal to output offset" + FmtToStr(output_list[out_i]) + "with ref in" +
FmtToStr(in_i) + "to output" + FmtToStr(out_i);
GE_ERRORLOG_AND_ERRORMSG(ge::FAILED, error.c_str());
return ge::FAILED;
}
}
return ge::SUCCESS;
}

ge::Status GraphMemoryAssigner::SetInputOffset() {
if (memory_offset_.empty()) {
REPORT_INNER_ERROR("E19999", "InnerData memory_offset_ empty, not expected, graph_id:%u, graph_name:%s",
@@ -1330,6 +1366,8 @@ ge::Status GraphMemoryAssigner::UpdateOpInputOffset(const NodePtr &node, vector<
origin_input_list = tmp_op_desc->GetInputOffset();
int64_t valid_input_index = 0;
bool has_mem_type_attr = ge::AttrUtils::GetListInt(tmp_op_desc, ATTR_NAME_INPUT_MEM_TYPE_LIST, memory_type);
std::map<int32_t, int32_t> out2ins;
GE_CHK_STATUS_RET(TryGetNodeRefIndexes(node, out2ins), "[Get][RefIndexes]fail for node: %s", node->GetName().c_str());
for (const auto &anchor : node->GetAllInDataAnchors()) {
vector<int64_t> output_list;
auto peer_out_anchor = anchor->GetPeerOutAnchor();
@@ -1350,17 +1388,25 @@ ge::Status GraphMemoryAssigner::UpdateOpInputOffset(const NodePtr &node, vector<
auto ori_input_offset_list_size = origin_input_list.size();
auto mem_type_size = memory_type.size();
if ((input_size != mem_type_size) || (input_size != ori_input_offset_list_size)) {
std::string error = "fusion: node" + FmtToStr(tmp_op_desc->GetName()) +
std::string error = "Node" + FmtToStr(tmp_op_desc->GetName()) +
+ " input_size" + FmtToStr(input_size) + " diff from memory_type_size" +
FmtToStr(mem_type_size) + " from ori_input_offset_list_size" +
FmtToStr(ori_input_offset_list_size);
GE_ERRORLOG_AND_ERRORMSG(ge::FAILED, error.c_str());
return ge::FAILED;
}
// not hbm keep orignal inputoffest
// hbm inputoffset = original inputoffset + outputoffset
input_offset = (memory_type[valid_input_index] == RT_MEMORY_L1 ? origin_input_list[valid_input_index]
: origin_input_list[valid_input_index] + output_list.at(out_index));
GELOGD("Node[%s] input[%d] has origin offset[%ld]", tmp_op_desc->GetName().c_str(), anchor->GetIdx(),
origin_input_list[valid_input_index]);
// L1 keep original input_offset
if (memory_type[valid_input_index] == RT_MEMORY_L1) {
input_offset = origin_input_list[valid_input_index];
} else {
// hbm input_offset = original input_offset + output_offset
input_offset = origin_input_list[valid_input_index] + output_list.at(out_index);
// update ref output_offset when input change
GE_CHK_STATUS_RET(UpdateRefOpOutputOffset(node, out2ins, anchor->GetIdx(), input_offset),
"[Update][RefOffset]fail for node: %s", node->GetName().c_str());
}
}
const auto &in_node = GetKnownInputNode(peer_out_anchor->GetOwnerNode());
if (in_node->GetType() == CONSTANT) {
@@ -1368,12 +1414,8 @@ ge::Status GraphMemoryAssigner::UpdateOpInputOffset(const NodePtr &node, vector<
GE_CHK_STATUS(TensorUtils::GetDataOffset(tensor_desc, input_offset));
}

GELOGD("%s node[%s] input[%ld] is set from node[%s] out index[%lu] offset[%ld]",
has_mem_type_attr ? "Fusion" : "",
tmp_op_desc->GetName().c_str(),
valid_input_index,
peer_out_anchor->GetOwnerNode()->GetOpDesc()->GetName().c_str(),
out_index,
GELOGD("Node[%s] input[%d] is set from node[%s] out index[%lu] offset[%ld]", tmp_op_desc->GetName().c_str(),
anchor->GetIdx(), peer_out_anchor->GetOwnerNode()->GetOpDesc()->GetName().c_str(), out_index,
input_offset);
input_list.emplace_back(input_offset);
valid_input_index++;
@@ -1382,6 +1424,30 @@ ge::Status GraphMemoryAssigner::UpdateOpInputOffset(const NodePtr &node, vector<
return ge::SUCCESS;
}

ge::Status GraphMemoryAssigner::UpdateRefOpOutputOffset(const NodePtr &node, const std::map<int32_t, int32_t> &out2ins,
const int ref_in, const int64_t input_offset) const {
auto opdesc = node->GetOpDesc();
GE_CHECK_NOTNULL(opdesc);
for (const auto &out2in : out2ins) {
auto out_i = out2in.first;
auto in_i = out2in.second;
if (in_i == ref_in) {
auto origin_output_list = opdesc->GetOutputOffset();
if (static_cast<size_t>(out_i) >= origin_output_list.size()) {
std::string error = "Node" + FmtToStr(opdesc->GetName()) + "output offset size" +
FmtToStr(origin_output_list.size()) + "should bigger than ref out index" + FmtToStr(out_i);
GE_ERRORLOG_AND_ERRORMSG(ge::FAILED, error.c_str());
return ge::FAILED;
}
origin_output_list[out_i] = input_offset;
opdesc->SetOutputOffset(origin_output_list);
GELOGI("Node[%s] output[%d] is updated from reuse input index[%d] to offset[%ld]", opdesc->GetName().c_str(),
out_i, ref_in, input_offset);
}
}
return ge::SUCCESS;
}

ge::Status GraphMemoryAssigner::UpdateOpInputOffset(const NodePtr &node) const {
GE_CHECK_NOTNULL(node->GetOpDesc());
vector<int64_t> input_list;
@@ -1626,7 +1692,7 @@ void GraphMemoryAssigner::PrintMemoryOffset() {
}
}

ge::Status GraphMemoryAssigner::GetAllRef(const NodePtr &node, map<int32_t, int32_t> &out2ins) {
ge::Status GraphMemoryAssigner::TryGetNodeRefIndexes(const NodePtr &node, map<int32_t, int32_t> &out2ins) const{
for (const auto &out_data_anchor : node->GetAllOutDataAnchors()) {
int32_t reuse_in_index = -1;
bool reuse_input_flag = GraphUtils::IsRefFromInput(out_data_anchor, reuse_in_index);


+ 4
- 1
ge/graph/build/memory/graph_mem_assigner.h View File

@@ -110,8 +110,11 @@ class GraphMemoryAssigner {
ge::Status SetInputOffset();

ge::Status UpdateOpInputOffset(const NodePtr &node) const;
ge::Status UpdateRefOpOutputOffset(const NodePtr &node, const std::map<int32_t, int32_t> &out2ins, const int ref_in,
const int64_t input_offset) const;

ge::Status CheckOffset();
ge::Status CheckRefNodeOffset(const NodePtr &node);

ge::Status AssignReferenceMemory();

@@ -125,7 +128,7 @@ class GraphMemoryAssigner {

ge::Status ReAssignAtomicMemory(bool is_loop_graph);

ge::Status GetAllRef(const NodePtr &node, std::map<int32_t, int32_t> &out2ins);
ge::Status TryGetNodeRefIndexes(const NodePtr &node, std::map<int32_t, int32_t> &out2ins) const;

bool AssignContinuousInputMemoryWithAtomicProcessDirectly(const NodePtr &input_continuous_node,
std::map<NodePtr, uint32_t> &node_2_continuous_type);


+ 77
- 3
ge/graph/execute/graph_execute.cc View File

@@ -20,9 +20,12 @@
#include <string>

#include "graph/load/model_manager/model_manager.h"
#include "graph/load/model_manager/davinci_model.h"
#include "omm/csa_interact.h"

namespace ge {
using Uint32Pair = pair<uint32_t, uint32_t>;
const uint32_t kInvalidModelId = UINT32_MAX;
GraphExecutor::GraphExecutor()
: init_flag_(false),
train_graph_flag_(false),
@@ -380,7 +383,8 @@ Status GraphExecutor::ExecuteGraph(GraphId graph_id, const GeRootModelPtr &ge_ro
}

Status GraphExecutor::ExecuteGraphAsync(GraphId graph_id, const GeRootModelPtr &ge_root_model,
const std::vector<InputTensorInfo> &input_tensor) {
const std::vector<InputTensorInfo> &input_tensor,
const RunAsyncCallback& callback) {
GELOGI("[GraphExecutor] Start to async execute graph, graph_id=%u", graph_id);
if (graph_id != last_graph_id_) {
auto ret = FreeExecuteMemory();
@@ -390,7 +394,7 @@ Status GraphExecutor::ExecuteGraphAsync(GraphId graph_id, const GeRootModelPtr &
}
last_graph_id_ = graph_id;
GE_CHECK_NOTNULL_EXEC(ge_root_model, return FAILED);
Status ret = AsyncExecuteModel(ge_root_model->GetModelId(), input_tensor);
Status ret = AsyncExecuteModel(ge_root_model, input_tensor, callback);
if (ret != SUCCESS) {
GELOGE(GE_GRAPH_SYNC_MODEL_FAILED, "[GraphExecutor] AsyncExecuteModel Error!");
return GE_GRAPH_SYNC_MODEL_FAILED;
@@ -463,11 +467,81 @@ Status GraphExecutor::ExecuteGraphWithStream(GraphId graph_id,
return SUCCESS;
}

Status GraphExecutor::AsyncExecuteModel(uint32_t model_id, const std::vector<InputTensorInfo> &inputs) {
bool CompareByLoad(const Uint32Pair &lhs, const Uint32Pair &rhs) {
return lhs.second < rhs.second;
}

uint32_t GraphExecutor::GetExecuteModelId(const GeRootModelPtr &ge_root_model) {
std::vector<uint32_t> model_ids = ge_root_model->GetAllModelId();
if (model_ids.empty()) {
return kInvalidModelId;
}
if (model_ids.size() == 1) {
return ge_root_model->GetModelId();
}
std::vector<Uint32Pair> model_id_to_loads;
auto model_manager = ModelManager::GetInstance();
GE_CHECK_NOTNULL(model_manager);
for (auto model_id : model_ids) {
auto davinci_model = model_manager->GetModel(model_id);
auto hybrid_model = model_manager->GetHybridModel(model_id);
if (hybrid_model == nullptr) {
GE_CHECK_NOTNULL(davinci_model);
}
uint32_t input_load = hybrid_model != nullptr ? hybrid_model->GetDataInputerSize() :
davinci_model->GetDataInputerSize();
uint32_t running_load = hybrid_model != nullptr ? static_cast<uint32_t>(hybrid_model->GetRunningFlag()) :
static_cast<uint32_t>(davinci_model->GetRunningFlag());
uint32_t load = input_load + running_load;
if (load == 0) {
return model_id;
}
model_id_to_loads.emplace_back(model_id, load);
}
sort(model_id_to_loads.begin(), model_id_to_loads.end(), CompareByLoad);
if (model_id_to_loads.empty()) {
return kInvalidModelId;
}
return model_id_to_loads.begin()->first;
}

Status GraphExecutor::SetCallback(uint32_t model_id, const GeRootModelPtr &ge_root_model,
const RunAsyncCallback &callback) {
auto model_manager = ge::ModelManager::GetInstance();
GE_CHECK_NOTNULL(model_manager);
if (model_manager->IsNeedHybridLoad(*ge_root_model)) {
auto model = model_manager->GetHybridModel(model_id);
GE_CHECK_NOTNULL(model);
if (model->SetRunAsyncListenerCallback(callback) != SUCCESS) {
GELOGE(FAILED, "SetRunAsyncListenerCallback failed.");
return FAILED;
}
} else {
auto model = model_manager->GetModel(model_id);
GE_CHECK_NOTNULL(model);
if (model->SetRunAsyncListenerCallback(callback) != SUCCESS) {
GELOGE(FAILED, "SetRunAsyncListenerCallback failed.");
return FAILED;
}
}
return SUCCESS;
}

Status GraphExecutor::AsyncExecuteModel(const GeRootModelPtr &ge_root_model, const std::vector<InputTensorInfo> &inputs,
const RunAsyncCallback &callback) {
uint32_t model_id = GetExecuteModelId(ge_root_model);
if (model_id == kInvalidModelId) {
GELOGE(INTERNAL_ERROR, "No valid model id.");
return INTERNAL_ERROR;
}
try {
auto model_manager = ge::ModelManager::GetInstance();
GE_CHECK_NOTNULL(model_manager);
GELOGI("RunAsync begin.model_id %u", model_id);
if (SetCallback(model_id, ge_root_model, callback) != SUCCESS) {
GELOGE(FAILED, "RunAsync: SetCallBack for model fail");
return FAILED;
}

Status ret = model_manager->DataInputTensor(model_id, inputs);
if (ret != SUCCESS) {


+ 8
- 2
ge/graph/execute/graph_execute.h View File

@@ -50,7 +50,7 @@ class GraphExecutor {
std::vector<GeTensor> &output_tensor);

ge::Status ExecuteGraphAsync(GraphId graph_id, const GeRootModelPtr &ge_root_model,
const std::vector<InputTensorInfo> &input_tensor);
const std::vector<InputTensorInfo> &input_tensor, const RunAsyncCallback &callback);

Status ExecuteGraphWithStream(GraphId graph_id,
const GeRootModelPtr &ge_root_model,
@@ -122,6 +122,8 @@ class GraphExecutor {

static Status GetOpDescInfo(uint32_t device_id, uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info);

uint32_t GetExecuteModelId(const GeRootModelPtr &ge_root_model);

private:
Status PrepareInputData(const std::vector<GeTensor> &input_tensor, InputData &graph_input_data,
OutputData &graph_output_data, std::vector<InputOutputDescInfo> &output_desc);
@@ -132,7 +134,8 @@ class GraphExecutor {
Status SyncExecuteModel(uint32_t model_id, const std::vector<GeTensor> &input_tensor,
std::vector<GeTensor> &output_tensor);

Status AsyncExecuteModel(uint32_t model_id, const std::vector<InputTensorInfo> &input_tensor);
Status AsyncExecuteModel(const GeRootModelPtr &ge_root_model, const std::vector<InputTensorInfo> &input_tensor,
const RunAsyncCallback &callback);

void InitModelIdInfo(std::vector<uint32_t> &out_model_id_info, std::vector<SubGraphInfoPtr> &sub_graph_vec,
uint32_t output_size);
@@ -141,6 +144,9 @@ class GraphExecutor {

Status MallocInOutBuffer(const std::vector<uint64_t> &buffer_size, std::vector<void *> &data_addr);

static Status SetCallback(uint32_t model_id, const GeRootModelPtr &ge_root_model,
const RunAsyncCallback &callback);

bool init_flag_;

bool train_graph_flag_;


+ 0
- 1
ge/graph/load/graph_loader.cc View File

@@ -63,7 +63,6 @@ Status GraphLoader::LoadModelOnline(uint32_t &model_id, const std::shared_ptr<ge
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[LoadGraph] GE load graph model_ptr is nullptr.");
return GE_GRAPH_PARAM_NULLPTR;
}
model_id = ge_root_model_ptr->GetModelId();

auto model_manager = ModelManager::GetInstance();
GE_CHECK_NOTNULL(model_manager);


+ 0
- 172
ge/graph/load/model_manager/data_dumper.cc View File

@@ -72,24 +72,6 @@ static bool ParseNameIndex(const std::string &node_name_index, std::string &node
static bool IsTensorDescWithSkipDumpAddrType(bool has_mem_type_attr, vector<int64_t> v_memory_type, size_t i) {
return has_mem_type_attr && (v_memory_type[i] == RT_MEMORY_L1);
}

static uint64_t GetNowTime() {
uint64_t ret = 0;
mmTimeval tv;
if (mmGetTimeOfDay(&tv, nullptr) == 0) {
ret = tv.tv_sec * 1000000ULL + tv.tv_usec;
}

return ret;
}

static void ReplaceStringElem(std::string &str) {
for_each(str.begin(), str.end(), [](char &ch) {
if ((ch == ' ') || (ch == '.') || (ch == '/') || (ch == '\\')) {
ch = '_';
}
});
}
} // namespace

static int32_t GetIrDataType(ge::DataType data_type) {
@@ -194,66 +176,6 @@ void DataDumper::SaveOpDebugId(uint32_t task_id, uint32_t stream_id, void *op_de
is_op_debug_ = is_op_debug;
}

void DataDumper::SaveDumpOpInfo(const RuntimeParam &model_param, const OpDescPtr &op, uint32_t task_id,
uint32_t stream_id) {
GELOGD("Start SaveDumpOpInfo of task_id: %u, stream_id: %u", task_id, stream_id);
OpDescInfo op_desc_info;
op_desc_info.op_name = op->GetName();
op_desc_info.op_type = op->GetType();
op_desc_info.task_id = task_id;
op_desc_info.stream_id = stream_id;
for (size_t i = 0; i < op->GetAllInputsSize(); ++i) {
GeTensorDescPtr input_tensor_desc = op->MutableInputDesc(i);
if (input_tensor_desc == nullptr) {
continue;
}
op_desc_info.input_format.emplace_back(input_tensor_desc->GetFormat());
op_desc_info.input_shape.emplace_back(input_tensor_desc->GetShape().GetDims());
op_desc_info.input_data_type.emplace_back(input_tensor_desc->GetDataType());
int64_t input_size = 0;

if (TensorUtils::GetTensorSizeInBytes(*input_tensor_desc, input_size) != SUCCESS) {
GELOGW("Get input size failed");
return;
}
GELOGD("Save dump op info, the input size is %ld", input_size);
op_desc_info.input_size.emplace_back(input_size);
}
for (size_t j = 0; j < op->GetOutputsSize(); ++j) {
GeTensorDescPtr output_tensor_desc = op->MutableOutputDesc(j);
if (output_tensor_desc == nullptr) {
continue;
}
op_desc_info.output_format.emplace_back(output_tensor_desc->GetFormat());
op_desc_info.output_shape.emplace_back(output_tensor_desc->GetShape().GetDims());
op_desc_info.output_data_type.emplace_back(output_tensor_desc->GetDataType());
int64_t output_size = 0;
if (TensorUtils::GetTensorSizeInBytes(*output_tensor_desc, output_size) != SUCCESS) {
GELOGW("Get input size failed");
return;
}
GELOGD("Save dump op info, the output size is %ld", output_size);
op_desc_info.output_size.emplace_back(output_size);
}
op_desc_info.input_addrs = ModelUtils::GetInputDataAddrs(model_param, op);
op_desc_info.output_addrs = ModelUtils::GetOutputDataAddrs(model_param, op);

op_desc_info_.emplace_back(op_desc_info);
}

bool DataDumper::GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const {
GELOGI("There are %zu op need to dump.", op_desc_info_.size());
for (size_t index = 0; index < op_desc_info_.size(); ++index) {
OpDescInfo dump_op_info = op_desc_info_.at(index);
if (dump_op_info.task_id == task_id && dump_op_info.stream_id == stream_id) {
GELOGI("find exception op of task_id: %u, stream_id: %u.", task_id, stream_id);
op_desc_info = dump_op_info;
return true;
}
}
return false;
}

void DataDumper::SaveDumpTask(uint32_t task_id, uint32_t stream_id, const std::shared_ptr<OpDesc> &op_desc,
uintptr_t args) {
if (op_desc == nullptr) {
@@ -904,98 +826,4 @@ void DataDumper::PrintCheckLog(string &dump_list_key) {
}
}
}

Status DataDumper::DumpExceptionInput(const OpDescInfo &op_desc_info, const string &dump_file) {
GELOGI("Start to dump exception input");
for (size_t i = 0; i < op_desc_info.input_addrs.size(); i++) {
if (Debug::DumpDevMem(dump_file.data(), op_desc_info.input_addrs.at(i), op_desc_info.input_size.at(i)) != SUCCESS) {
GELOGE(PARAM_INVALID, "Dump the %zu input data failed", i);
return PARAM_INVALID;
}
}
return SUCCESS;
}

Status DataDumper::DumpExceptionOutput(const OpDescInfo &op_desc_info, const string &dump_file) {
GELOGI("Start to dump exception output");
for (size_t i = 0; i < op_desc_info.output_addrs.size(); i++) {
if (Debug::DumpDevMem(dump_file.data(), op_desc_info.output_addrs.at(i), op_desc_info.output_size.at(i)) !=
SUCCESS) {
GELOGE(PARAM_INVALID, "Dump the %zu input data failed", i);
return PARAM_INVALID;
}
}
return SUCCESS;
}

Status DataDumper::DumpExceptionInfo(const std::vector<rtExceptionInfo> exception_infos) {
GELOGI("Start to dump exception info");
for (const rtExceptionInfo &iter : exception_infos) {
OpDescInfo op_desc_info;
if (GetOpDescInfo(iter.streamid, iter.taskid, op_desc_info)) {
toolkit::dumpdata::DumpData dump_data;
dump_data.set_version("2.0");
dump_data.set_dump_time(GetNowTime());
dump_data.set_op_name(op_desc_info.op_name);
for (size_t i = 0; i < op_desc_info.input_format.size(); ++i) {
toolkit::dumpdata::OpInput input;
input.set_data_type(toolkit::dumpdata::OutputDataType(GetIrDataType(op_desc_info.input_data_type[i])));
input.set_format(toolkit::dumpdata::OutputFormat(op_desc_info.input_format[i]));
for (auto dim : op_desc_info.input_shape[i]) {
input.mutable_shape()->add_dim(dim);
}
input.set_size(op_desc_info.input_size[i]);
GELOGI("The input size int exception is %ld", op_desc_info.input_size[i]);
dump_data.mutable_input()->Add(std::move(input));
}
for (size_t j = 0; j < op_desc_info.output_format.size(); ++j) {
toolkit::dumpdata::OpOutput output;
output.set_data_type(toolkit::dumpdata::OutputDataType(GetIrDataType(op_desc_info.output_data_type[j])));
output.set_format(toolkit::dumpdata::OutputFormat(op_desc_info.output_format[j]));
for (auto dim : op_desc_info.output_shape[j]) {
output.mutable_shape()->add_dim(dim);
}
output.set_size(op_desc_info.output_size[j]);
GELOGI("The output size int exception is %ld", op_desc_info.output_size[j]);
dump_data.mutable_output()->Add(std::move(output));
}
uint64_t now_time = GetNowTime();
std::string op_name = op_desc_info.op_name;
std::string op_type = op_desc_info.op_type;
ReplaceStringElem(op_name);
ReplaceStringElem(op_type);
string dump_file_path =
"./" + op_type + "." + op_name + "." + std::to_string(op_desc_info.task_id) + "." + std::to_string(now_time);
GELOGI("The exception dump file path is %s", dump_file_path.c_str());

uint64_t proto_size = dump_data.ByteSizeLong();
std::unique_ptr<char[]> proto_msg(new (std::nothrow) char[proto_size]);
bool ret = dump_data.SerializeToArray(proto_msg.get(), proto_size);
if (!ret || proto_size == 0) {
REPORT_INNER_ERROR("E19999", "Serialize proto to string fail");
GELOGE(PARAM_INVALID, "Dump data proto serialize failed");
return PARAM_INVALID;
}

GE_CHK_STATUS_RET(MemoryDumper::DumpToFile(dump_file_path.c_str(), &proto_size, sizeof(uint64_t)),
"Failed to dump proto size");
GE_CHK_STATUS_RET(MemoryDumper::DumpToFile(dump_file_path.c_str(), proto_msg.get(), proto_size),
"Failed to dump proto msg");
if (DumpExceptionInput(op_desc_info, dump_file_path) != SUCCESS) {
GELOGE(PARAM_INVALID, "Dump exception input failed");
return PARAM_INVALID;
}

if (DumpExceptionOutput(op_desc_info, dump_file_path) != SUCCESS) {
GELOGE(PARAM_INVALID, "Dump exception output failed");
return PARAM_INVALID;
}
GELOGI("Dump exception info SUCCESS");
} else {
GELOGE(PARAM_INVALID, "Get op desc info failed,task id:%u,stream id:%u", iter.taskid, iter.streamid);
return PARAM_INVALID;
}
}
return SUCCESS;
}
} // namespace ge

+ 0
- 8
ge/graph/load/model_manager/data_dumper.h View File

@@ -70,8 +70,6 @@ class DataDumper {

void SaveDumpInput(const std::shared_ptr<Node> &node);

void SaveDumpOpInfo(const RuntimeParam &model_param, const OpDescPtr &op, uint32_t task_id, uint32_t stream_id);

// args is device memory stored first output addr
void SaveDumpTask(uint32_t task_id, uint32_t stream_id, const std::shared_ptr<OpDesc> &op_desc, uintptr_t args);
void SaveEndGraphId(uint32_t task_id, uint32_t stream_id);
@@ -87,14 +85,8 @@ class DataDumper {

void SetDumpProperties(const DumpProperties &dump_properties) { dump_properties_ = dump_properties; }
const DumpProperties &GetDumpProperties() const { return dump_properties_; }
bool GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const;
const std::vector<OpDescInfo> &GetAllOpDescInfo() const { return op_desc_info_; }

// Dump exception info
Status DumpExceptionInput(const OpDescInfo &op_desc_info, const string &dump_file);
Status DumpExceptionOutput(const OpDescInfo &op_desc_info, const string &dump_file);
Status DumpExceptionInfo(const std::vector<rtExceptionInfo> exception_infos);

private:
void ReleaseDevMem(void **ptr) noexcept;



+ 2
- 0
ge/graph/load/model_manager/data_inputer.h View File

@@ -134,6 +134,8 @@ class DataInputer {
///
void Stop() { queue_.Stop(); }

uint32_t Size() { return queue_.Size(); }

private:
///
/// @ingroup domi_ome


+ 49
- 4
ge/graph/load/model_manager/davinci_model.cc View File

@@ -2656,9 +2656,9 @@ Status DavinciModel::ReturnResult(uint32_t data_id, const bool rslt_flg, const b
GE_CHECK_NOTNULL(model_manager);
auto exception_infos = model_manager->GetExceptionInfos();
if (exception_infos.size() > 0) {
GE_CHK_STATUS_RET(data_dumper_.DumpExceptionInfo(exception_infos), "Dump exception info failed");
GE_CHK_STATUS_RET(DumpExceptionInfo(exception_infos), "[Dump][Exception] Dump exception info failed.");
} else {
GELOGI("Exception info is null");
GELOGI("[Dump][Exception] Exception info is null.");
}
GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_id, INTERNAL_ERROR, outputs), "OnComputeDone failed.");
return INTERNAL_ERROR;
@@ -2737,6 +2737,8 @@ void *DavinciModel::Run(DavinciModel *model) {

ErrorManager::GetInstance().SetStage(ErrorMessage::kModelExecute, ErrorMessage::kModelExecute);
while (model->RunFlag()) {
// Model hasn't truly started runing before received data
model->SetRunningFlag(false);
bool rslt_flg = true;
if (model->GetDataInputer() == nullptr) {
GELOGW("Data inputer is nullptr.");
@@ -2746,6 +2748,8 @@ void *DavinciModel::Run(DavinciModel *model) {

std::shared_ptr<InputDataWrapper> data_wrapper;
Status ret = model->GetDataInputer()->Pop(data_wrapper);
// Model run indeedly start after received data.
model->SetRunningFlag(true);
if (data_wrapper == nullptr || ret != SUCCESS) {
GELOGI("data_wrapper is null!");
continue;
@@ -2832,7 +2836,9 @@ void *DavinciModel::Run(DavinciModel *model) {

model->iterator_count_++;
model->is_first_execute_ = false;
GELOGI("run iterator count is %lu", model->iterator_count_);
// model run finished
model->SetRunningFlag(false);
GELOGI("run iterator count is %lu, model_id:%u", model->iterator_count_, model->model_id_);
}

CsaInteract::GetInstance().WriteInternalErrorCode();
@@ -2890,7 +2896,7 @@ Status DavinciModel::ModelRunStart() {

error_context_ = ErrorManager::GetInstance().GetErrorContext();
CREATE_STD_THREAD(thread_id_, DavinciModel::Run, this);
GELOGI("model tread create success, model id:%u.", model_id_);
GELOGI("model thread create success, model id:%u.", model_id_);
return SUCCESS;
}

@@ -4340,4 +4346,43 @@ Status DavinciModel::InitL1DataDumperArgs() {
return SUCCESS;
}

Status DavinciModel::SetRunAsyncListenerCallback(const RunAsyncCallback &callback) {
auto listener = dynamic_cast<RunAsyncListener *>(listener_.get());
GE_CHECK_NOTNULL(listener);
listener->SetCallback(callback);
return SUCCESS;
}

void DavinciModel::UpdateOpIOAddrs(uint32_t task_id, uint32_t stream_id, const std::vector<void *> &io_addrs) {
if (fixed_mem_base_ == reinterpret_cast<uintptr_t>(mem_base_)) {
GELOGD("[Update][OpIOAddrs] No need to update op input output addr.");
return;
}

OpDescInfo *op_desc_info = exception_dumper_.MutableOpDescInfo(task_id, stream_id);
if (op_desc_info == nullptr) {
GELOGW("[Update][OpIOAddrs] Find op desc failed, task_id: %u, stream_id: %u.", task_id, stream_id);
return;
}
size_t input_size = op_desc_info->input_addrs.size();
size_t output_size = op_desc_info->output_addrs.size();
if (input_size + output_size != io_addrs.size()) {
GELOGW("[Update][OpIOAddrs] Op[%s] input size[%zu] and output size[%zu] is not equal to io addr size[%zu]",
op_desc_info->op_name.c_str(), input_size, output_size, io_addrs.size());
return;
}

vector<void *> input_addrs;
vector<void *> output_addrs;
for (size_t i = 0; i < io_addrs.size(); i++) {
if (i < input_size) {
input_addrs.emplace_back(GetRunAddress(io_addrs[i]));
} else {
output_addrs.emplace_back(GetRunAddress(io_addrs[i]));
}
}
op_desc_info->input_addrs = input_addrs;
op_desc_info->output_addrs = output_addrs;
GELOGD("[Update][OpIOAddrs] Op [%s] update input output addr success.", op_desc_info->op_name.c_str());
}
} // namespace ge

+ 20
- 2
ge/graph/load/model_manager/davinci_model.h View File

@@ -29,6 +29,7 @@
#include "common/helper/om_file_helper.h"
#include "common/opskernel/ge_task_info.h"
#include "common/properties_manager.h"
#include "common/dump/exception_dumper.h"
#include "common/dump/opdebug_register.h"
#include "common/types.h"
#include "framework/common/util.h"
@@ -221,6 +222,11 @@ class DavinciModel {
///
DataInputer *const GetDataInputer() const { return data_inputer_; }

uint32_t GetDataInputerSize() {
GE_CHECK_NOTNULL(data_inputer_);
return data_inputer_->Size();
}

// get Stream number
uint32_t StreamNum() const { return runtime_param_.stream_num; }

@@ -471,13 +477,17 @@ class DavinciModel {
Status ReportProfilingData();

void SaveDumpOpInfo(const RuntimeParam &model_param, const OpDescPtr &op, uint32_t task_id, uint32_t stream_id) {
data_dumper_.SaveDumpOpInfo(model_param, op, task_id, stream_id);
exception_dumper_.SaveDumpOpInfo(model_param, op, task_id, stream_id);
}

void SaveDumpTask(uint32_t task_id, uint32_t stream_id, const shared_ptr<OpDesc> &op_desc, uintptr_t args) {
data_dumper_.SaveDumpTask(task_id, stream_id, op_desc, args);
}

Status DumpExceptionInfo(const std::vector<rtExceptionInfo> &exception_infos) const {
return exception_dumper_.DumpExceptionInfo(exception_infos);
}

void SetKnownShapeGlobalStep(void *global_step) {
known_shape_global_step_ = global_step;
}
@@ -557,8 +567,13 @@ class DavinciModel {
const DumpProperties &GetDumpProperties() const { return data_dumper_.GetDumpProperties(); }

bool GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const {
return data_dumper_.GetOpDescInfo(stream_id, task_id, op_desc_info);
return exception_dumper_.GetOpDescInfo(stream_id, task_id, op_desc_info);
}
void UpdateOpIOAddrs(uint32_t task_id, uint32_t stream_id, const std::vector<void *> &io_addrs);

bool GetRunningFlag() const { return running_flg_; }
void SetRunningFlag(bool flag) { running_flg_ = flag; }
Status SetRunAsyncListenerCallback(const RunAsyncCallback &callback);

private:
// memory address of weights
@@ -924,6 +939,8 @@ class DavinciModel {
shared_ptr<ModelListener> listener_;

bool run_flg_;
// check whether model is running with data
bool running_flg_ = false;

mutex mux_run_flg_;

@@ -1001,6 +1018,7 @@ class DavinciModel {
int64_t maxDumpOpNum_;
// for data dump
DataDumper data_dumper_;
ExceptionDumper exception_dumper_;
OpdebugRegister opdebug_register_;
uint64_t iterator_count_;
bool is_l1_fusion_enable_;


+ 35
- 9
ge/graph/load/model_manager/model_manager.cc View File

@@ -280,6 +280,7 @@ ModelManager::~ModelManager() {
model_map_.clear();
model_aicpu_kernel_.clear();
cust_aicpu_so_.clear();
dump_exception_flag_ = false;

GE_IF_BOOL_EXEC(device_count > 0, GE_CHK_RT(rtDeviceReset(0)));
}
@@ -330,6 +331,7 @@ Status ModelManager::LoadModelOnline(uint32_t &model_id, const shared_ptr<ge::Ge
GE_CHK_BOOL_RET_STATUS(listener.get() != nullptr, PARAM_INVALID, "Param incorrect, listener is null");
if (model_id == INVALID_MODEL_ID) {
GenModelId(&model_id);
GELOGD("Generate new model_id:%u", model_id);
}
auto name_to_model = ge_root_model->GetSubgraphInstanceNameToModel();
string om_name;
@@ -363,7 +365,18 @@ Status ModelManager::LoadModelOnline(uint32_t &model_id, const shared_ptr<ge::Ge
GE_IF_BOOL_EXEC(SUCCESS != (ret = davinci_model->Assign(ge_model)), GELOGW("assign model to modeldef failed.");
break;);
GE_TIMESTAMP_END(Assign, "GraphLoader::ModelAssign");

/// In multi-threaded inference, using the same session_id among multiple threads may cause some threads to fail.
/// These session_ids come from the same model, so the values of session_id are the same.
/// Update session_id for infer in load model to avoid the same session_id.
if (!ge_root_model->GetTrainFlag()) {
uint64_t new_session_id;
ret = GenSessionId(new_session_id);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return ret, "Generate session_id for infer failed.");
ret = davinci_model->UpdateSessionId(new_session_id);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return ret, "Update session_id for infer failed.");
ge_model->InsertSessionMap(model_id, new_session_id);
GELOGD("Update new session id: %lu.", new_session_id);
}
GE_TIMESTAMP_START(Init);
GE_IF_BOOL_EXEC(SUCCESS != (ret = davinci_model->Init()), GELOGW("DavinciInit failed."); break;);
GE_TIMESTAMP_END(Init, "GraphLoader::ModelInit");
@@ -376,16 +389,16 @@ Status ModelManager::LoadModelOnline(uint32_t &model_id, const shared_ptr<ge::Ge
return ret;
}

void ModelManager::InsertModel(uint32_t id, std::shared_ptr<DavinciModel> &davinci_model) {
GE_CHK_BOOL_EXEC(davinci_model != nullptr, return, "davinci_model ptr is null, id: %u", id);
void ModelManager::InsertModel(uint32_t model_id, std::shared_ptr<DavinciModel> &davinci_model) {
GE_CHK_BOOL_EXEC(davinci_model != nullptr, return, "davinci_model ptr is null, id: %u", model_id);
std::lock_guard<std::recursive_mutex> lock(map_mutex_);
model_map_[id] = davinci_model;
model_map_[model_id] = davinci_model;
}

void ModelManager::InsertModel(uint32_t id, shared_ptr<hybrid::HybridDavinciModel> &hybrid_model) {
GE_CHK_BOOL_EXEC(hybrid_model != nullptr, return, "hybrid_model ptr is null, id: %u", id);
void ModelManager::InsertModel(uint32_t model_id, shared_ptr<hybrid::HybridDavinciModel> &hybrid_model) {
GE_CHK_BOOL_EXEC(hybrid_model != nullptr, return, "hybrid_model ptr is null, id: %u", model_id);
std::lock_guard<std::recursive_mutex> lock(map_mutex_);
hybrid_model_map_[id] = hybrid_model;
hybrid_model_map_[model_id] = hybrid_model;
}

Status ModelManager::DeleteModel(uint32_t id) {
@@ -1575,9 +1588,21 @@ Status ModelManager::GetOpDescInfo(uint32_t device_id, uint32_t stream_id, uint3
for (const auto &model : model_map_) {
auto davinci_model = model.second;
if (davinci_model->GetDeviceId() == device_id) {
GELOGI("Start to GetOpDescInfo of device_id: %u.", device_id);
GELOGI("[Get][OpDescInfo] Start to GetOpDescInfo of device_id: %u in davinci model.", device_id);
if (davinci_model->GetOpDescInfo(stream_id, task_id, op_desc_info)) {
GELOGI("Find specific node of stream_id: %u, task_id: %u.", stream_id, task_id);
GELOGI("[Get][OpDescInfo] Find specific node of stream_id: %u, task_id: %u in davinci model.",
stream_id, task_id);
return SUCCESS;
}
}
}
for (const auto &model : hybrid_model_map_) {
auto hybrid_model = model.second;
if (hybrid_model->GetDeviceId() == device_id) {
GELOGI("[Get][OpDescInfo] Start to GetOpDescInfo of device_id: %u in hybrid model.", device_id);
if (hybrid_model->GetOpDescInfo(stream_id, task_id, op_desc_info)) {
GELOGI("[Get][OpDescInfo] Find specific node of stream_id: %u, task_id: %u in hybrid model.",
stream_id, task_id);
return SUCCESS;
}
}
@@ -1590,6 +1615,7 @@ Status ModelManager::EnableExceptionDump(const std::map<string, string> &options
if (iter != options.end()) {
GELOGI("Find option enable_exeception_dump is %s", iter->second.c_str());
if (iter->second == "1") {
dump_exception_flag_ = true;
rtError_t rt_ret = rtSetTaskFailCallback(reinterpret_cast<rtTaskFailCallback>(ExceptionCallback));
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtSetTaskFailCallback fail, ret = 0x%X",


+ 4
- 2
ge/graph/load/model_manager/model_manager.h View File

@@ -313,6 +313,7 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelManager {
instance->AddExceptionInfo(*rt_exception_info);
}

bool IsDumpExceptionOpen() { return dump_exception_flag_; }
private:
///
/// @ingroup domi_ome
@@ -330,8 +331,8 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelManager {
/// @ingroup domi_ome
/// @brief insert new model into model manager set
///
void InsertModel(uint32_t id, std::shared_ptr<DavinciModel> &davinci_model);
void InsertModel(uint32_t id, std::shared_ptr<hybrid::HybridDavinciModel> &hybrid_model);
void InsertModel(uint32_t model_id, std::shared_ptr<DavinciModel> &davinci_model);
void InsertModel(uint32_t model_id, std::shared_ptr<hybrid::HybridDavinciModel> &hybrid_model);

///
/// @ingroup domi_ome
@@ -356,6 +357,7 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelManager {
std::map<uintptr_t, std::map<std::string, CustAICPUKernelPtr>> cust_aicpu_so_;

static DumpProperties dump_properties_;
bool dump_exception_flag_ = false;
};
} // namespace ge



+ 1
- 0
ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc View File

@@ -357,6 +357,7 @@ void KernelExTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) {
Status KernelExTaskInfo::UpdateArgs() {
GELOGI("KernelExTaskInfo::UpdateArgs in.");
davinci_model_->SetTotalIOAddrs(io_addrs_);
davinci_model_->UpdateOpIOAddrs(task_id_, stream_id_, io_addrs_);
GELOGI("KernelExTaskInfo::UpdateArgs success.");
return SUCCESS;
}


+ 1
- 0
ge/graph/load/model_manager/task_info/kernel_task_info.cc View File

@@ -523,6 +523,7 @@ Status KernelTaskInfo::UpdateArgs() {
return CopyNoncontinuousArgs(io_addr_offset_);
}
davinci_model_->SetTotalIOAddrs(io_addrs_);
davinci_model_->UpdateOpIOAddrs(task_id_, stream_id_, io_addrs_);
} else if (kernel_type_ == ccKernelType::AI_CPU || kernel_type_ == ccKernelType::CUST_AI_CPU) {
return CopyNoncontinuousArgs(sizeof(aicpu::AicpuParamHead));
}


+ 350
- 168
ge/graph/manager/graph_manager.cc View File

@@ -121,6 +121,10 @@ const char *const kAIcoreEngine = "AIcoreEngine";
const int32_t kDynamicDimsTypeIsGetNext = 0;
const int32_t kDynamicDimsTypeIsData = 1;
const char *const kGetNextName = "IteratorV2";
const uint32_t kInitGraphCount = 1;
const uint32_t kNotAdded = 0;
const uint32_t kStartAdd = 1;
const uint32_t kDoneAdded = 2;

bool IsTailingOptimization() {
string is_tailing_optimization_option;
@@ -202,6 +206,8 @@ Status GraphManager::Initialize(const std::map<string, string> &options) {

graph_map_.clear();
cache_helper_map_.clear();
graph_id_to_add_graph_cond_.clear();
graph_count_.clear();
init_flag_ = true;

thread_run_flag_ = true;
@@ -211,6 +217,20 @@ Status GraphManager::Initialize(const std::map<string, string> &options) {
return SUCCESS;
}

Status GraphManager::UnloadModel(GeRootModelPtr ge_root_model, uint32_t graph_id) {
Status ret = SUCCESS;
for (size_t i = 0; i < ge_root_model->GetAllModelId().size(); ++i) {
uint32_t model_id = ge_root_model->GetAllModelId()[i];
GELOGI("Unload model %u.", model_id);
ret = GraphLoader::UnloadModel(model_id);
if (ret != SUCCESS) {
GELOGW("[GraphManager] unload model failed, modelId=%u, graphId=%u.", model_id, graph_id);
return ret;
}
}
return ret;
}

Status GraphManager::Finalize() {
if (!init_flag_) {
GELOGW("GraphManager has not been initialized.");
@@ -241,7 +261,6 @@ Status GraphManager::Finalize() {
unload_model_ret = GE_GRAPH_GRAPH_IS_RUNNING;
continue;
}

// unload model
auto ge_root_model = graph_node->GetGeRootModel();
if (ge_root_model != nullptr && ge_root_model->GetModelId() != INVALID_MODEL_ID && graph_node->GetLoadFlag()) {
@@ -251,15 +270,14 @@ Status GraphManager::Finalize() {
unload_model_ret = FAILED;
continue;
}
ret = GraphLoader::UnloadModel(ge_root_model->GetModelId());
ret = UnloadModel(ge_root_model, iter->first);
if (ret != SUCCESS) {
GELOGW("[GraphManager] unload model failed, modelId=%u, graphId=%u.", ge_root_model->GetModelId(), iter->first);
GELOGW("[GraphManager] unload model failed, graph_id=%u.", iter->first);
unload_model_ret = ret;
}
rt_ret = rtDeviceReset(GetContext().DeviceId());
if (rt_ret != RT_ERROR_NONE) {
GELOGW("[GraphManager] rtDeviceReset failed, modelId=%u, graphId=%u.", ge_root_model->GetModelId(),
iter->first);
GELOGW("[GraphManager] rtDeviceReset failed, graphId=%u.", iter->first);
unload_model_ret = FAILED;
continue;
}
@@ -274,6 +292,7 @@ Status GraphManager::Finalize() {
}
graph_map_.clear();
cache_helper_map_.clear();
graph_count_.clear();

// graph context
if (graph_context_ != nullptr) {
@@ -326,35 +345,59 @@ Status GraphManager::InitDynamicParams(ComputeGraphPtr &compute_graph) {
return SUCCESS;
}

Status GraphManager::AddGraph(const GraphId &graph_id, const Graph &graph,
const std::map<std::string, std::string> &options,
const OmgContext &omg_context) {
if (HasGraphNode(graph_id)) {
REPORT_INNER_ERROR("E19999", "graph_id:%u is exist, check invalid", graph_id);
GELOGE(GE_GRAPH_GRAPH_ALREADY_EXIST, "[GraphManager] graph exists, graph_id = %u.", graph_id);
return GE_GRAPH_GRAPH_ALREADY_EXIST;
void GraphManager::SetAddGraphCondition(GraphId graph_id, uint32_t cond) {
std::lock_guard<std::mutex> lock(add_graph_cond_mutex_);
graph_id_to_add_graph_cond_[graph_id] = cond;
GELOGD("Graph [id:%u] has been added.", graph_id);
}

uint32_t GraphManager::GetAddGraphCondition(GraphId graph_id) {
std::lock_guard<std::mutex> lock(add_graph_cond_mutex_);
auto it = graph_id_to_add_graph_cond_.find(graph_id);
if (it != graph_id_to_add_graph_cond_.end()) {
return it->second;
} else {
GELOGD("Graph [id:%u] has not been added.", graph_id);
return kNotAdded;
}
}

auto compute_graph = GraphUtils::GetComputeGraph(graph);
if (compute_graph != nullptr) {
compute_graph->SetGraphID(graph_id);
bool graph_has_been_added = false;
if (AttrUtils::GetBool(*compute_graph, ATTR_NAME_GRAPH_HAS_BEEN_ADDED, graph_has_been_added)
&& graph_has_been_added) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s from graph:%u fail",
ATTR_NAME_GRAPH_HAS_BEEN_ADDED.c_str(), graph_id);
GELOGE(GE_GRAPH_GRAPH_ALREADY_EXIST,
"[GraphManager] same graph object can not be added again, graph_id = %u.", graph_id);
return GE_GRAPH_GRAPH_ALREADY_EXIST;
}
(void)AttrUtils::SetBool(*compute_graph, ATTR_NAME_GRAPH_HAS_BEEN_ADDED, true);
compute_graph_ = compute_graph;
void GraphManager::RemoveAddGraphCondition(GraphId graph_id) {
std::lock_guard<std::mutex> lock(add_graph_cond_mutex_);
auto it = graph_id_to_add_graph_cond_.find(graph_id);
if (it != graph_id_to_add_graph_cond_.end()) {
graph_id_to_add_graph_cond_.erase(it);
GELOGD("Successfully removed add_graph_cond of graph [id:%u].", graph_id);
} else {
REPORT_INNER_ERROR("E19999", "compute_graph from graph:%u is nullptr, check invalid",
graph_id);
GELOGE(FAILED, "compute graph is null");
return FAILED;
GELOGD("Graph [id:%u] has not been added. no need to remove.", graph_id);
}
}

Status GraphManager::CheckRepeatAdd(uint32_t graph_id, bool &is_added) {
uint32_t count = 0;
if (GetGraphCount(graph_id, count) != SUCCESS) {
GELOGE(INTERNAL_ERROR, "Get graph [id:%u] count failed, graph might have not been added.", graph_id);
return INTERNAL_ERROR;
}
// previous thread owns same graph_id has been in the middle of the AddGraph procession
if (count > 1 && GetAddGraphCondition(graph_id) == kStartAdd) {
std::unique_lock<std::mutex> lock(add_graph_mutex_);
GELOGD("Waitting for build end of previous thread.");
while (GetAddGraphCondition(graph_id) != kDoneAdded) {
add_graph_cv_.wait(lock);
}
GraphNodePtr graph_node;
Status ret = GetGraphNode(graph_id, graph_node);
if (ret != SUCCESS) {
GELOGE(ret, "[AddGraph] GetGraphNode failed, graph_id = %u.", graph_id);
return ret;
}
is_added = true;
}
return SUCCESS;
}

void GraphManager::SetSessionGraphId(ComputeGraphPtr compute_graph, uint32_t graph_id) {
std::string session_graph_id;
if (!AttrUtils::GetStr(*compute_graph, ATTR_NAME_SESSION_GRAPH_ID, session_graph_id) || session_graph_id.empty()) {
session_graph_id = "-1_" + to_string(graph_id);
@@ -366,7 +409,24 @@ Status GraphManager::AddGraph(const GraphId &graph_id, const Graph &graph,
}
GELOGD("Get graph session_graph_id attr failed, set session id to default value: [0]");
}
}

Status GraphManager::NotifyWaittingGraph(uint32_t graph_id) {
uint32_t count = 0;
if (GetGraphCount(graph_id, count) != SUCCESS) {
GELOGE(INTERNAL_ERROR, "Get graph [id:%u] count failed, graph might have not been added.", graph_id);
return INTERNAL_ERROR;
}
GELOGD("Add graph finished, graph_id:%u", graph_id);
if (count > 1) {
GELOGD("Finish addgraph, graph_id:%u, graph_count:%u, start to notify.", graph_id, count);
add_graph_cv_.notify_all();
}
return SUCCESS;
}

Status GraphManager::CreateGraphNode(uint32_t graph_id, const Graph &graph,
const std::map<std::string, std::string> &options) {
GraphNodePtr graph_node = MakeShared<ge::GraphNode>(graph_id);
GE_IF_BOOL_EXEC(graph_node == nullptr,
REPORT_CALL_ERROR("E19999", "New GraphNode fail, graph_id:%u",
@@ -385,7 +445,62 @@ Status GraphManager::AddGraph(const GraphId &graph_id, const Graph &graph,
ParseOption(options, TUNING_PATH, options_.tuning_path);
graph_node->SetGraph(graph_ptr);
graph_node->SetOptions(options);
graph_node->IncreaseLoadCount();
AddGraphNode(graph_id, graph_node);
return SUCCESS;
}

Status GraphManager::SetStagesOptions(uint32_t graph_id, const GraphManagerOptions &options) {
CompilerStages &stages = GetCompilerStages(graph_id);
stages.preparer.SetOptions(options_);
Status status = stages.optimizer.SetOptions(options_);
if (status != SUCCESS) {
GELOGE(status, "Graph optimizer set options failed.");
return status;
}
stages.builder.SetOptions(options_);
return SUCCESS;
}

Status GraphManager::AddGraph(const GraphId &graph_id, const Graph &graph,
const std::map<std::string, std::string> &options,
const OmgContext &omg_context) {
IncreaseGraphCount(graph_id);
// validation for adding graphs of same graph_id in multi-thread secenario
// 1.previous thread owns same graph_id has finished the AddGraph procession
if (GetAddGraphCondition(graph_id) == kDoneAdded) {
GraphNodePtr graph_node;
if (GetGraphNode(graph_id, graph_node) != SUCCESS) {
GELOGE(GE_GRAPH_GRAPH_NOT_EXIST, "Graph not exist while done adding previously, graph_id = %u.", graph_id);
return GE_GRAPH_GRAPH_NOT_EXIST;
}
graph_node->IncreaseLoadCount();
return SUCCESS;
}
// In multi-thread scenario, former thread owns same graph_id has been
// in the middle of the AddGraph procession while following threads have to wait until
// done adding graph of the former graph, avoiding repeatively adding same graph.
bool is_added = false;
if (CheckRepeatAdd(graph_id, is_added) != SUCCESS) {
GELOGE(INTERNAL_ERROR, "CheckRepeatAdd for graph[id:%u] failed.", graph_id);
return INTERNAL_ERROR;
}
// The former graph (from different thread) owns same graph id has been successfully added.
if (is_added) {
return SUCCESS;
}
// Do add graph
SetAddGraphCondition(graph_id, kStartAdd);
auto compute_graph = GraphUtils::GetComputeGraph(graph);
GE_CHECK_NOTNULL(compute_graph);
compute_graph->SetGraphID(graph_id);

SetSessionGraphId(compute_graph, graph_id);

if (CreateGraphNode(graph_id, graph, options) != SUCCESS) {
GELOGE(FAILED, "Failed to create graph_node.");
return FAILED;
}

AddLocalOmgContext(graph_id, omg_context);
if (!options_.output_datatype.empty()) {
@@ -396,16 +511,18 @@ Status GraphManager::AddGraph(const GraphId &graph_id, const Graph &graph,
return GRAPH_PARAM_INVALID;
}

CompilerStages &stages = GetCompilerStages(graph_id);
stages.preparer.SetOptions(options_);
Status status = stages.optimizer.SetOptions(options_);
if (status != SUCCESS) {
GELOGE(status, "Graph optimizer set options failed.");
return status;
if (SetStagesOptions(graph_id, options_) != SUCCESS) {
GELOGE(INTERNAL_ERROR, "Set stage options failed.");
return INTERNAL_ERROR;
}
stages.builder.SetOptions(options_);

var_acc_ctrl_.AddGraph(graph_id, compute_graph);
SetAddGraphCondition(graph_id, kDoneAdded);
// There are threads waitting for adding same graph
if (NotifyWaittingGraph(graph_id) != SUCCESS) {
GELOGE(INTERNAL_ERROR, "NotifyWaittingGraph failed.");
return INTERNAL_ERROR;
}
return SUCCESS;
}

@@ -864,7 +981,7 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vector<Ge
}
}

ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kPrepareOptimize);
ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kSubGraphOptimize);
// set fuzz compile flag after origin graph optimize
GE_CHK_STATUS_RET(SetFuzzCompileFlag(compute_graph), "Set fuzz compile flag failed.");
ret = PreRunOptimizeSubGraph(graph_node, compute_graph, session_id);
@@ -962,6 +1079,7 @@ Status GraphManager::StartForRunGraph(const GraphNodePtr &graph_node, const std:
if (!graph_node->IsAsync()) {
ret = LoadGraph(ge_root_model, graph_node);
} else {
GE_CHECK_NOTNULL(ge_root_model);
ret = LoadGraphAsync(ge_root_model, graph_node);
}
if (ret != SUCCESS) {
@@ -976,6 +1094,7 @@ Status GraphManager::StartForRunGraph(const GraphNodePtr &graph_node, const std:
if (!graph_node->IsAsync()) {
ret = LoadGraph(ge_root_model_ptr, graph_node);
} else {
GE_CHECK_NOTNULL(ge_root_model);
ret = LoadGraphAsync(ge_root_model_ptr, graph_node);
}
if (ret != SUCCESS) {
@@ -988,6 +1107,7 @@ Status GraphManager::StartForRunGraph(const GraphNodePtr &graph_node, const std:
Status GraphManager::LoadGraph(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) {
GELOGI("[LoadGraph] run_graph_flag[%d], graph_id[%u]", options_.run_graph_flag, graph_node->GetGraphId());
if (options_.run_graph_flag && ge_root_model != nullptr) {
ge_root_model->SetTrainFlag(GetTrainFlag());
// synchronization run graph with model
std::shared_ptr<GraphModelListener> model_listener = GetModelListener();
ModelIdInfo model_id_info;
@@ -1471,62 +1591,29 @@ bool GraphManager::CheckModelLoad(const GeRootModelPtr &ge_root_model, bool load
}

Status GraphManager::RemoveGraph(const GraphId &graph_id) {
auto it = to_be_deleted_graphs_.find(graph_id);
if (it != to_be_deleted_graphs_.end()) {
to_be_deleted_graphs_.erase(it);
}
GraphNodePtr graph_node = nullptr;
Status ret = GetGraphNode(graph_id, graph_node);
if (ret != SUCCESS) {
REPORT_INNER_ERROR("E19999", "Graph:%u not exist in graph_map, check invalid",
graph_id);
if (ret != SUCCESS || graph_node == nullptr) {
REPORT_INNER_ERROR("E19999", "Graph:%u not exist in graph_map, check invalid when GraphManager %s",
graph_id, __FUNCTION__);
GELOGE(GE_GRAPH_GRAPH_NOT_EXIST, "[GraphManager] Id %u does not exists.", graph_id);
return GE_GRAPH_GRAPH_NOT_EXIST;
}

if ((graph_node == nullptr) || (graph_node->GetRunFlag())) {
REPORT_INNER_ERROR("E19999", "Graph:%u is running, can't be remove, check invalid",
graph_id);
GELOGE(GE_GRAPH_GRAPH_IS_RUNNING, "[GraphManager] Id %u is running, can't be deleted.", graph_id);
return GE_GRAPH_GRAPH_IS_RUNNING;
if (graph_node->GetRunFlag()) {
// only put graph into to-be-deleted list when exceptional scenario
to_be_deleted_graphs_.insert(graph_id);
GELOGI("[GraphManager] Trying to remove running graph[Id:%u], added into to_be_deleted_graphs_.", graph_id);
return SUCCESS;
}

std::lock_guard<std::mutex> lock(unload_model_mutex_);

Status middle_ret;
rtError_t rt_ret;
const std::vector<SubGraphInfoPtr> &all_sub_graph = graph_node->GetAllSubGraph();
for (size_t i = 0; i < all_sub_graph.size(); ++i) {
// must free buffer firstly
middle_ret = all_sub_graph[i]->FreeInOutBuffer();
if (middle_ret != SUCCESS) {
GELOGE(middle_ret, "[GraphManager] RemoveGraph free mem failed, graph_id=%u.", graph_id);
ret = middle_ret;
}
if (all_sub_graph[i]->GeModelIsValid() && all_sub_graph[i]->GetModelIdInfo().model_id != INVALID_MODEL_ID) {
// unload model
GELOGI("UnloadModel via new ome.");
rt_ret = rtSetDevice(GetContext().DeviceId());
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u, graph_id:%u",
GetContext().DeviceId(), graph_id);
GELOGE(RT_FAILED, "[GraphManager:] rtSetDevice failed, modelId=%u, graphId=%u.",
all_sub_graph[i]->GetModelIdInfo().model_id, graph_id);
ret = FAILED;
continue;
}
middle_ret = GraphLoader::UnloadModel(all_sub_graph[i]->GetModelIdInfo().model_id);
if (middle_ret != SUCCESS) {
GELOGE(middle_ret, "[GraphManager:] unload model failed, modelId=%u, graph_id=%u.",
all_sub_graph[i]->GetModelIdInfo().model_id, graph_id);
ret = middle_ret;
}
rt_ret = rtDeviceReset(GetContext().DeviceId());
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtDeviceReset fail, device_id:%u, graph_id:%u",
GetContext().DeviceId(), graph_id);
GELOGE(RT_FAILED, "[GraphManager:] unload model failed, modelId=%u, graphId=%u.",
all_sub_graph[i]->GetModelIdInfo().model_id, graph_id);
ret = FAILED;
}
}
}
var_acc_ctrl_.RemoveGraph(graph_id);
RemoveGraphNode(graph_id);

@@ -1534,7 +1621,6 @@ Status GraphManager::RemoveGraph(const GraphId &graph_id) {

auto ge_root_model = graph_node->GetGeRootModel();
if (CheckModelLoad(ge_root_model, graph_node->GetLoadFlag())) {
GELOGI("Unload model %u.", ge_root_model->GetModelId());
rt_ret = rtSetDevice(GetContext().DeviceId());
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u, graph_id:%u",
@@ -1543,23 +1629,27 @@ Status GraphManager::RemoveGraph(const GraphId &graph_id) {
graph_id);
return FAILED;
}
middle_ret = GraphLoader::UnloadModel(ge_root_model->GetModelId());
// same graph may be added for several times, different models were created separately,
// unload them respectively.
middle_ret = UnloadModel(ge_root_model, graph_id);
if (middle_ret != SUCCESS) {
GELOGE(middle_ret, "[GraphManager:] unload model failed, modelId=%u, graph_id=%u.", ge_root_model->GetModelId(),
graph_id);
REPORT_INNER_ERROR("E19999", "UnloadModel for graph:%u failed, check unload detail in GraphLoader %s",
graph_id, __FUNCTION__);
GELOGE(middle_ret, "[GraphManager:] unload model failed, graph_id=%u.", graph_id);
ret = middle_ret;
}
rt_ret = rtDeviceReset(GetContext().DeviceId());
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u, graph_id:%u",
GetContext().DeviceId(), graph_id);
GELOGE(RT_FAILED, "[GraphManager:] rtDeviceReset failed, modelId=%u, graphId=%u.", ge_root_model->GetModelId(),
graph_id);
REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u, graph_id:%u, when GraphManager %s",
GetContext().DeviceId(), graph_id, __FUNCTION__);
GELOGE(RT_FAILED, "[GraphManager:] rtDeviceReset failed, graphId=%u.", graph_id);
ret = FAILED;
}
}

RemoveCompilerStages(graph_id);
RemoveGraphCount(graph_id);
RemoveAddGraphCondition(graph_id);

GE_CHK_STATUS_RET(ret, "[GraphManager:] Remove graph failed, graph_id=%u.", graph_id);
GELOGI("[GraphManager] remove graph success, graph_id=%u.", graph_id);
@@ -2646,6 +2736,7 @@ void GraphManager::ChangeConstTypeWhenTraining(const ComputeGraphPtr &compute_gr
Status GraphManager::LoadGraphAsync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) {
GELOGI("[LoadGraphAsync] run_graph_flag[%d], graph_id[%u]", options_.run_graph_flag, graph_node->GetGraphId());
if (options_.run_graph_flag && ge_root_model != nullptr) {
ge_root_model->SetTrainFlag(GetTrainFlag());
// synchronization run graph with model
ModelIdInfo model_id_info;
bool is_unknown_shape = false;
@@ -2662,9 +2753,9 @@ Status GraphManager::LoadGraphAsync(const GeRootModelPtr &ge_root_model, const G
}
}
GE_TIMESTAMP_START(LoadGraph);
GE_CHECK_NOTNULL(graph_node->graph_run_async_listener_);
Status ret =
GraphLoader::LoadModelOnline(model_id_info.model_id, ge_root_model, graph_node->graph_run_async_listener_);
auto listener = MakeShared<RunAsyncListener>();
GE_CHECK_NOTNULL(listener);
Status ret = GraphLoader::LoadModelOnline(model_id_info.model_id, ge_root_model, listener);
GE_TIMESTAMP_EVENT_END(LoadGraph, "GraphManager::LoadGraphAsync");
if (ret != SUCCESS) {
GELOGE(ret, "[LoadGraphAsync] LoadGraphAsync Failed");
@@ -2678,6 +2769,52 @@ Status GraphManager::LoadGraphAsync(const GeRootModelPtr &ge_root_model, const G
return SUCCESS;
}

void GraphManager::ReleaseMemory(const GeModelPtr &ge_model, GraphNodePtr &graph_node,
const std::vector<uint32_t> &model_ids, uint32_t graph_id, uint64_t session_id) {
rtError_t rt_ret = rtSetDevice(GetContext().DeviceId());
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u, when GraphManager %s",
GetContext().DeviceId(), __FUNCTION__);
GELOGE(RT_FAILED, "[GraphManager:] rtSetDevice failed, graphId=%u.", graph_id);
return;
}
for (auto model_id : model_ids) {
uint64_t max_memory_size = 0;
Status result = GraphLoader::GetMaxUsedMemory(model_id, max_memory_size);
if (result != SUCCESS) {
continue;
}
GELOGI("CheckAndReleaseMemory try to UnloadGraph[%u], model[%u] which MaxUsedMemory[%lu].", graph_id, model_id,
max_memory_size);
if (model_ids.size() > 1) {
result = ge_model->GetSessionId(model_id, session_id);
if (result != SUCCESS) {
GELOGW("[GraphManager:] get session failed when dynamic memory, modelId=%u, graphId=%u.", model_id,
graph_id);
continue;
}
}
result = GraphLoader::DestroyAicpuKernel(session_id, model_id, 0);
if (result != SUCCESS) {
GELOGW("[GraphManager:] destroy aicpu kernel failed when dynamic memory, modelId=%u, graphId=%u.", model_id,
graph_id);
}
result = GraphLoader::UnloadModel(model_id);
if (result != SUCCESS) {
GELOGW("[GraphManager:] unload model failed, modelId=%u, graphId=%u.", model_id, graph_id);
}
GELOGI("CheckAndReleaseMemory UnloadGraph[%u], model[%u] success.", graph_id, model_id);
}
graph_node->SetLoadFlag(false);
rt_ret = rtDeviceReset(GetContext().DeviceId());
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u, when GraphManager %s",
GetContext().DeviceId(), __FUNCTION__);
GELOGE(RT_FAILED, "[GraphManager:] rtDeviceReset failed, graphId=%u.", graph_id);
return;
}
}

Status GraphManager::CheckAndReleaseMemory(const GeModelPtr &ge_model, const GraphNodePtr &graph_node) {
GELOGI("CheckAndReleaseMemory graph_id[%u]", graph_node->GetGraphId());
int64_t value = 0;
@@ -2723,6 +2860,7 @@ Status GraphManager::CheckAndReleaseMemory(const GeModelPtr &ge_model, const Gra
continue;
}
auto model_id = model->GetModelId();
auto model_ids = model->GetAllModelId();
// unload model not release
bool is_unknown_shape = false;
GE_CHK_STATUS_RET(model->CheckIsUnknownShape(is_unknown_shape));
@@ -2735,38 +2873,7 @@ Status GraphManager::CheckAndReleaseMemory(const GeModelPtr &ge_model, const Gra
GELOGI("CheckAndReleaseMemory graph[%u] has not been loaded.", graph_id);
continue;
}
uint64_t max_memory_size = 0;
result = GraphLoader::GetMaxUsedMemory(model_id, max_memory_size);
if (result != SUCCESS) {
continue;
}
GELOGI("CheckAndReleaseMemory try to UnloadGraph[%u], model[%u] which MaxUsedMemory[%lu].", graph_id, model_id,
max_memory_size);
rtError_t rt_ret = rtSetDevice(GetContext().DeviceId());
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u",
GetContext().DeviceId());
GELOGE(RT_FAILED, "[GraphManager:] rtSetDevice failed, modelId=%u, graphId=%u.", model_id, graph_id);
continue;
}
result = GraphLoader::DestroyAicpuKernel(session_id, model_id, 0);
if (result != SUCCESS) {
GELOGW("[GraphManager:] destroy aicpu kernel failed when dynamic memory, modelId=%u, graphId=%u.", model_id,
graph_id);
}
result = GraphLoader::UnloadModel(model_id);
if (result != SUCCESS) {
GELOGW("[GraphManager:] unload model failed, modelId=%u, graphId=%u.", model_id, graph_id);
}
rt_ret = rtDeviceReset(GetContext().DeviceId());
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u",
GetContext().DeviceId());
GELOGE(RT_FAILED, "[GraphManager:] rtDeviceReset failed, modelId=%u, graphId=%u.", model_id, graph_id);
continue;
}
it.second->SetLoadFlag(false);
GELOGI("CheckAndReleaseMemory UnloadGraph[%u], model[%u] success and set LoadFlag to false.", graph_id, model_id);
ReleaseMemory(ge_model, it.second, model_ids, graph_id, session_id);
}

return SUCCESS;
@@ -2907,6 +3014,38 @@ void GraphManager::ConstructGeInput(const vector<InputTensorInfo> &inputs, vecto
}
}

Status GraphManager::CheckIncreBuildAndPreRun(GraphManager *graph_manager, const PreRunArgs &args,
GraphNodePtr &graph_node, GeRootModelPtr &ge_root_model) {
if (!graph_manager->IsGraphNeedBuild(graph_node)) {
ge_root_model = graph_node->GetGeRootModel();
return SUCCESS;
}
if (graph_node->GetBuildFlag()) {
ReturnError(graph_manager, args.callback, PARAM_INVALID,
"The graph " + std::to_string(graph_node->GetGraphId()) +
" need to re-build, you should remove it"
" from GE first, then AddGraph again and rebuild it.");
graph_node->Unlock();
return PARAM_INVALID;
}
// check need incre build.
GeModelPtr ge_model = nullptr;
if (graph_manager->IncreBuild(graph_node, ge_model) != SUCCESS) {
std::vector<GeTensor> ge_inputs;
ConstructGeInput(args.input_tensor, ge_inputs);
Status ret = graph_manager->PreRun(graph_node, ge_inputs, ge_root_model, args.session_id);
// release rts generate context
RtContextUtil::GetInstance().DestroyRtContexts(args.session_id, graph_node->GetGraphId());
if (ret != SUCCESS) {
ReturnError(graph_manager, args.callback, ret, "PreRun Failed.");
return ret;
}
}
graph_node->SetBuildFlag(true);
graph_manager->var_acc_ctrl_.SetGraphBuildEnd(graph_node->GetGraphId());
return SUCCESS;
}

void GraphManager::PreRunThread(GraphManager *graph_manager) {
if (prctl(PR_SET_NAME, ("GE_PreRun")) != 0) {
GELOGW("Set thread name failed.");
@@ -2919,7 +3058,7 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) {
continue;
}

GELOGI("A new loop start.");
GELOGI("[PreRunThread] A new loop start, graph_id:%u.", args.graph_id);

ErrorManager::GetInstance().SetErrorContext(args.error_context);
ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kOther);
@@ -2935,7 +3074,24 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) {
"[RunGraph] graph not exist, graph_id=" + std::to_string(args.graph_id));
return;
}

// more than one graph owns same graph_id
uint32_t count = 0;
if (graph_manager->GetGraphCount(args.graph_id, count) != SUCCESS) {
GELOGE(INTERNAL_ERROR, "Get graph [id:%u] count failed.", args.graph_id);
return;
}
// Avoid repeatively prerun for graphs owns same graph_id in online inference concurrency
if (count > 1 && graph_node->GetBuildFlag()) {
graph_node->Lock();
GELOGD("Avoid repeatively prerun, graph_id:%u.", args.graph_id);
// In online inference concurrency senario, graph_node is allowed to be locked for 'count' times
graph_node->SetSemSize(count);
graph_manager->run_args_q_.Push(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context,
args.input_tensor, graph_node->GetGeRootModel(), GetThreadLocalContext(), args.callback }));
GELOGI("[PreRunThread] Loop end. Start to run with cached build model.");
continue;
}
// Cannot be put ahead of the repeatively prerun judgement
graph_node->Lock();

if (graph_node->GetRunFlag()) {
@@ -2967,46 +3123,24 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) {
// it will not execute graph preprocess, optimize, parition, build if the graph has built successful.
GELOGI("Start for run graph async.");
GeRootModelPtr ge_root_model = nullptr;
if (graph_manager->IsGraphNeedBuild(graph_node)) {
if (graph_node->GetBuildFlag()) {
ReturnError(graph_manager, args.callback, PARAM_INVALID,
"The graph " + std::to_string(graph_node->GetGraphId()) +
" need to re-build, you should remove it"
" from GE first, then AddGraph again and rebuild it.");
ret = CheckIncreBuildAndPreRun(graph_manager, args, graph_node, ge_root_model);
if (ret != SUCCESS) {
graph_node->SetRunFlag(false);
if (!ge::Analyzer::GetInstance()->IsEnableNetAnalyzeDebug()) {
ReturnError(graph_manager, args.callback, ret, "CheckIncreBuildAndPreRun Failed, thread exit..");
graph_node->Unlock();
return;
} else {
ReturnError(graph_manager, graph_node, args.callback, ret,
"CheckIncreBuildAndPreRun Failed, keep geop continue!");
graph_node->Unlock();
continue;
}

// check need incre build.
GeModelPtr ge_model = nullptr;
if (graph_manager->IncreBuild(graph_node, ge_model) != SUCCESS) {
std::vector<GeTensor> ge_inputs;
ConstructGeInput(args.input_tensor, ge_inputs);
ret = graph_manager->PreRun(graph_node, ge_inputs, ge_root_model, args.session_id);
// release rts generate context
RtContextUtil::GetInstance().DestroyRtContexts(args.session_id, graph_node->GetGraphId());
if (ret != SUCCESS) {
graph_node->SetRunFlag(false);
if (!ge::Analyzer::GetInstance()->IsEnableNetAnalyzeDebug()) {
ReturnError(graph_manager, args.callback, ret, "PreRun Failed, thread exit..");
graph_node->Unlock();
return;
} else {
ReturnError(graph_manager, graph_node, args.callback, ret, "PreRun Failed, keep geop continue!");
graph_node->Unlock();
continue;
}
}
}
graph_node->SetBuildFlag(true);
graph_manager->var_acc_ctrl_.SetGraphBuildEnd(graph_node->GetGraphId());
} else {
ge_root_model = graph_node->GetGeRootModel();
}

graph_manager->run_args_q_.Push(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context,
args.input_tensor, ge_root_model, GetThreadLocalContext(), args.callback }));
GELOGI("Loop end.");
GELOGI("[PreRunThread] Loop end.");
}
}

@@ -3109,16 +3243,13 @@ void GraphManager::RunThread(GraphManager *graph_manager) {
continue;
}

GELOGI("A new loop start.");
GELOGI("[RunThread] A new loop start, graph_id:%u.", args.graph_id);

ErrorManager::GetInstance().SetErrorContext(args.error_context);
GetContext().SetSessionId(args.session_id);
GetThreadLocalContext() = args.context;
graph_manager->UpdateLocalOmgContext(args.graph_id);

if (args.graph_node->graph_run_async_listener_ != nullptr) {
args.graph_node->graph_run_async_listener_->SetCallback(args.callback);
}
Status ret;
// parse inputs.dims to vector<vector<uint64_t>> dynamic_dims
ret = graph_manager->ParseInputsDims(args.input_tensor);
@@ -3128,8 +3259,10 @@ void GraphManager::RunThread(GraphManager *graph_manager) {
return;
}

args.graph_node->UpdateLoadFlag();
if (!args.graph_node->GetLoadFlag()) {
ErrorManager::GetInstance().SetStage(ErrorMessage::kModelLoad, ErrorMessage::kModelLoad);
args.ge_root_model->SetTrainFlag(graph_manager->GetTrainFlag());
ret = graph_manager->LoadGraphAsync(args.ge_root_model, args.graph_node);
if (ret != SUCCESS || args.ge_root_model == nullptr) {
StopQueue(graph_manager);
@@ -3137,6 +3270,10 @@ void GraphManager::RunThread(GraphManager *graph_manager) {
args.graph_node->Unlock();
return;
}
// control the times of graph loading in multi-thread scenario
args.graph_node->DecreaseLoadCount();
args.graph_node->IncreaseLoadRecord();

args.graph_node->SetLoadFlag(true);
GELOGI("LoadGraph[%u], model[%u] success and set LoadFlag to true.", args.graph_node->GetGraphId(),
args.ge_root_model->GetModelId());
@@ -3151,9 +3288,9 @@ void GraphManager::RunThread(GraphManager *graph_manager) {
graph_manager->graph_executor_.SetTrainFlag(graph_manager->options_.train_graph_flag);
}

args.graph_node->SetRunFlag(false);
ret = graph_manager->graph_executor_.ExecuteGraphAsync(args.graph_id, args.graph_node->GetGeRootModel(),
args.input_tensor);
args.input_tensor, args.callback);
args.graph_node->SetRunFlag(false);
if (ret != SUCCESS) {
ReturnError(graph_manager, args.callback, ret, "ExecuteGraphAsync failed, thread exit.");
args.graph_node->Unlock();
@@ -3604,4 +3741,49 @@ void GraphManager::RemoveCompilerStages(GraphId graph_id) {
std::lock_guard<std::mutex> lock(member_mutex_);
compiler_stages_.erase(graph_id);
}

void GraphManager::IncreaseGraphCount(GraphId graph_id) {
std::lock_guard<std::mutex> lock(graph_count_mutex_);
auto it = graph_count_.find(graph_id);
if (it == graph_count_.end()) {
graph_count_.insert({graph_id, kInitGraphCount});
GELOGD("After increaseGraphCount, graph count of id[%u] is %u.", graph_id, graph_count_[graph_id]);
} else {
++graph_count_[graph_id];
GELOGD("After increaseGraphCount, graph count of id[%u] is %u.", graph_id, graph_count_[graph_id]);
}
}

void GraphManager::RemoveGraphCount(GraphId graph_id) {
std::lock_guard<std::mutex> lock(graph_count_mutex_);
auto it = graph_count_.find(graph_id);
if (it == graph_count_.end()) {
GELOGW("Graph of id: %u has not been added, count cannot be decreased.", graph_id);
} else {
GELOGD("RemoveGraphCount success, graph count of id[%u] is %u.", graph_id, graph_count_[graph_id]);
graph_count_.erase(it);
}
}

void GraphManager::DecreaseGraphCount(GraphId graph_id) {
std::lock_guard<std::mutex> lock(graph_count_mutex_);
auto it = graph_count_.find(graph_id);
if (it == graph_count_.end()) {
GELOGW("Graph of id: %u has not been added, count cannot be decreased.", graph_id);
} else {
--it->second;
GELOGD("After DecreaseGraphCount, graph count of id[%u] is %u.", graph_id, graph_count_[graph_id]);
}
}

Status GraphManager::GetGraphCount(GraphId graph_id, uint32_t &count) {
std::lock_guard<std::mutex> lock(graph_count_mutex_);
auto it = graph_count_.find(graph_id);
if (it == graph_count_.end()) {
GELOGW("Graph [id:%u] has not been added.", graph_id);
return FAILED;
}
count = it->second;
return SUCCESS;
}
} // namespace ge

+ 42
- 0
ge/graph/manager/graph_manager.h View File

@@ -196,6 +196,20 @@ class GraphManager {

Status SaveCheckPointResult(const Graph &graph, const std::vector<Tensor> &outputs, map<string, Tensor> &var_results);

void RemoveGraphCount(GraphId graph_id);

void IncreaseGraphCount(GraphId graph_id);

void DecreaseGraphCount(GraphId graph_id);

Status GetGraphCount(GraphId graph_id, uint32_t &count);

void SetAddGraphCondition(GraphId graph_id, uint32_t cond);

uint32_t GetAddGraphCondition(GraphId graph_id);

void RemoveAddGraphCondition(GraphId graph_id);

private:
struct CompilerStages {
GraphPrepare preparer;
@@ -393,6 +407,24 @@ class GraphManager {
CompilerStages &GetCompilerStages(GraphId graph_id);
void RemoveCompilerStages(GraphId graph_id);

static Status CheckIncreBuildAndPreRun(GraphManager *graph_manager, const PreRunArgs &args, GraphNodePtr &graph_node,
GeRootModelPtr &ge_root_model);

void ReleaseMemory(const GeModelPtr &ge_model, GraphNodePtr &graph_node, const std::vector<uint32_t> &model_ids,
uint32_t graph_id, uint64_t session_id);

Status CheckRepeatAdd(uint32_t graph_id, bool &is_added);

Status NotifyWaittingGraph(uint32_t graph_id);

Status CreateGraphNode(uint32_t graph_id, const Graph &graph, const std::map<std::string, std::string> &options);

Status SetStagesOptions(uint32_t graph_id, const GraphManagerOptions &options);

Status UnloadModel(GeRootModelPtr ge_root_model, uint32_t graph_id);

void SetSessionGraphId(ComputeGraphPtr compute_graph, uint32_t graph_id);

std::atomic_bool thread_run_flag_;
BlockingQueue<PreRunArgs> prerun_args_q_{};
BlockingQueue<RunArgs> run_args_q_{};
@@ -428,6 +460,16 @@ class GraphManager {

std::mutex member_mutex_;
std::mutex unload_model_mutex_;
// avoid repeatively add same graph (owns same graph id)
std::mutex add_graph_mutex_;
std::mutex add_graph_cond_mutex_;
std::condition_variable add_graph_cv_;

std::map<GraphId, uint32_t> graph_id_to_add_graph_cond_;
// use for multi-thread online-infer scenario
std::set<GraphId> to_be_deleted_graphs_;
std::map<GraphId, uint32_t> graph_count_;
std::mutex graph_count_mutex_;
};
} // namespace ge



+ 9
- 0
ge/graph/manager/graph_manager_utils.cc View File

@@ -61,6 +61,15 @@ void GraphNode::Unlock() {
sem_.Pop(unused);
}

void GraphNode::IncreaseLoadCount() {
std::unique_lock<std::mutex> lock(load_count_mu_);
if (load_record_ == kMaxLoadNum) {
GELOGW("Reach the maximum of load_count:%u", kMaxLoadNum);
return;
}
++load_count_;
}

SubGraphInfo::SubGraphInfo() : subgraph_ptr_(nullptr), ge_model_ptr_(nullptr), malloc_flag_(false) {}

SubGraphInfo::~SubGraphInfo() {


+ 16
- 0
ge/graph/manager/graph_manager_utils.h View File

@@ -55,6 +55,7 @@ using ConstGraphPtr = std::shared_ptr<const ge::Graph>;
using GraphPtr = std::shared_ptr<ge::Graph>;

const uint64_t INVALID_SESSION_ID = 0xffffffffffffffffULL;
const uint32_t kMaxLoadNum = 8;

struct ModelIdInfo {
uint32_t model_id{INVALID_MODEL_ID};
@@ -162,6 +163,8 @@ class GraphNode {
bool GetBuildFlag() const { return build_flag_; }
void SetBuildFlag(bool buildFlag) { build_flag_ = buildFlag; }
bool GetLoadFlag() const { return load_flag_; }
// allow repeatively load graph owns same graph id
void UpdateLoadFlag() { load_flag_ = load_count_ == 0 || load_record_ >= kMaxLoadNum; }
void SetLoadFlag(bool load_flag) { load_flag_ = load_flag; }
void SetGeModel(const GeModelPtr &ge_model) { ge_model_ = ge_model; }
void SetIsSpecificStream(bool specific_stream) { is_specific_stream_ = specific_stream; }
@@ -174,6 +177,13 @@ class GraphNode {
void Lock();
void Unlock();

void SetSemSize(uint32_t size) { sem_.SetMaxSize(size); }

uint32_t GetLoadCount() const { return load_count_; }
void IncreaseLoadCount();
void DecreaseLoadCount() { --load_count_; }
void IncreaseLoadRecord() { ++load_record_; }

// run graph asynchronous listener
std::shared_ptr<RunAsyncListener> graph_run_async_listener_;

@@ -186,12 +196,18 @@ class GraphNode {
GraphPtr graph_;
ComputeGraphPtr compute_graph_;
bool build_flag_;
// load_flag_ is true if more than 1 model were loaded
bool load_flag_;
bool async_;
bool is_specific_stream_;
GeModelPtr ge_model_;
GeRootModelPtr ge_root_model_;
BlockingQueue<uint8_t> sem_;
// consist with graph_count of same graph_id in graph_manager
uint32_t load_count_ = 0;
// total times of loading a graph with same graph_id.
uint32_t load_record_ = 0;
std::mutex load_count_mu_;
};

using GraphNodePtr = std::shared_ptr<GraphNode>;


+ 12
- 0
ge/graph/passes/dimension_adjust_pass.cc View File

@@ -78,7 +78,12 @@ Status DimensionAdjustPass::Run(ge::NodePtr &node) {
GELOGE(ret, "DimensionAdjustPass compute failed");
return ret;
}
// Need to handle axis_input of node like ExpandDims
if (node->GetAllInDataAnchors().size() > static_cast<size_t>(kRemoveInputIndex)) {
auto axis_node_out_anchor = node->GetInDataAnchor(kRemoveInputIndex)->GetPeerOutAnchor();
GE_CHECK_NOTNULL(axis_node_out_anchor);
auto axis_node = axis_node_out_anchor->GetOwnerNode();
// 1.Copy control dependency of axis node
ret = PassUtils::UnlinkNodeWithControlCopy(node, kRemoveInputIndex);
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Unlink op:%s(%s) data input:%u with control edge copy failed",
@@ -86,6 +91,13 @@ Status DimensionAdjustPass::Run(ge::NodePtr &node) {
GELOGE(ret, "DimensionAdjustPass unlink node with control copy fail.");
return ret;
}
// 2.Remove const axis node without any output
if ((axis_node->GetType() == CONSTANT || axis_node->GetType() == CONSTANTOP) &&
axis_node->GetOutDataNodesSize() == 0) {
ret = IsolateAndDeleteNode(axis_node, {});
GE_CHK_GRAPH_STATUS_RET(ret, "Fail to remove node %s.", axis_node->GetName().c_str());
GELOGI("Remove useless axis input const %s", axis_node->GetName().c_str());
}
}

ret = DealWithInNodes(node);


+ 9
- 2
ge/graph/passes/net_output_pass.cc View File

@@ -514,7 +514,7 @@ Status NetOutputPass::Run(ge::ComputeGraphPtr graph) {
GELOGE(GE_GRAPH_PARAM_NULLPTR, "Compute graph is null.");
return GE_GRAPH_PARAM_NULLPTR;
}
GELOGI("NetOutputPass Run.graph is [%s]", graph->GetName().c_str());
GELOGI("[NETOUTPUT PASS] Run.graph is [%s]", graph->GetName().c_str());
NodePtr output_node = graph->FindFirstNodeMatchType(NETOUTPUT);
// save user targets node
SaveAndRemoveTargets(graph);
@@ -552,10 +552,17 @@ Status NetOutputPass::AddNetOutputNodeToGraph(const ge::ComputeGraphPtr &graph,
// If user does not set out nodes and targets and no retval node, also add netoutput node
if ((graph->GetGraphOutNodesInfo().empty()) && (graph->GetGraphTargetNodesInfo().empty()) &&
!is_include_special_node_) {
GELOGI("[NETOUTPUT PASS] output_nodes and target_nodes and special nodes is empty!Add netoutput!");
GELOGI("[NETOUTPUT PASS] Both output, target and special nodes are empty! add net output node");
output_node = graph->AddNode(net_output_desc);
GE_CHK_STATUS_RET(AddCtrlEdgesBetweenLeafAndNetOutput(graph, output_node),
"add ctrl edge between leaf and netoutput failed");
if (!ge::AttrUtils::SetInt(output_node->GetOpDesc(), ATTR_NAME_TRUE_BRANCH_STREAM, 0)) {
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_NAME_TRUE_BRANCH_STREAM.c_str(),
output_node->GetName().c_str(), output_node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "set ATTR_NAME_TRUE_BRANCH_STREAM failed");
return INTERNAL_ERROR;
}
GELOGI("[NETOUTPUT PASS] Add net output node succeed");
return SUCCESS;
}
GELOGI("[NETOUTPUT PASS] Output node size:%lu.", output_nodes_info.size());


+ 3
- 0
ge/graph/passes/pass_utils.cc View File

@@ -334,6 +334,9 @@ Status PassUtils::UnlinkNodeWithControlCopy(NodePtr &node, int index) {
auto father_node = out_data_anchor->GetOwnerNode();
// link father_node's in control nodes to node
if (GraphUtils::CopyInCtrlEdges(father_node, node) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Copy in control edge from node:%s(%s) to node:%s(%s) failed",
father_node->GetName().c_str(), father_node->GetType().c_str(),
node->GetName().c_str(), node->GetType().c_str());
return FAILED;
}
return SUCCESS;


+ 104
- 0
ge/graph/passes/same_transdata_breadth_fusion_pass.cc View File

@@ -71,6 +71,7 @@ OpDescPtr SameTransdataBreadthFusionPass::GetCastOp(const GeTensorDesc &in_desc,
auto cast_op = ge::OpDescUtils::GetOpDescFromOperator(node_op);
node_op.BreakConnect();
if (cast_op == nullptr) {
REPORT_INNER_ERROR("E19999", "Create Operator:%s(%s) failed", cast_op_name.str().c_str(), CAST);
GELOGE(INTERNAL_ERROR, "new fusion cast op failed!");
return nullptr;
}
@@ -96,6 +97,8 @@ OpDescPtr SameTransdataBreadthFusionPass::GetCastOp(const GeTensorDesc &in_desc,
}
}
if (!AttrUtils::SetInt(cast_op, CAST_ATTR_DST_TYPE, static_cast<int64_t>(out_desc.GetDataType()))) {
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", CAST_ATTR_DST_TYPE.c_str(),
cast_op->GetName().c_str(), cast_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "set dst_type attr failed");
return nullptr;
}
@@ -204,6 +207,12 @@ graphStatus SameTransdataBreadthFusionPass::ReLinkDataOutput2PreNode(const NodeP
GELOGI("remove edge.src:%s, dst:%s", out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::RemoveEdge(out_anchor, transdata_peer_in_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%d) failed",
out_anchor->GetOwnerNode()->GetName().c_str(),
out_anchor->GetOwnerNode()->GetType().c_str(), out_anchor->GetIdx(),
transdata_peer_in_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_anchor->GetOwnerNode()->GetType().c_str(),
transdata_peer_in_anchor->GetIdx());
GELOGE(GRAPH_FAILED, "remove edge failed!src node:%s, dst node:%s", transdata_node->GetName().c_str(),
transdata_peer_in_anchor->GetOwnerNode()->GetName().c_str());
return GRAPH_FAILED;
@@ -211,6 +220,12 @@ graphStatus SameTransdataBreadthFusionPass::ReLinkDataOutput2PreNode(const NodeP
GELOGI("add edge.src:%s, dst:%s", pre_out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::AddEdge(pre_out_anchor, transdata_peer_in_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%d) failed",
pre_out_anchor->GetOwnerNode()->GetName().c_str(),
pre_out_anchor->GetOwnerNode()->GetType().c_str(), pre_out_anchor->GetIdx(),
transdata_peer_in_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_anchor->GetOwnerNode()->GetType().c_str(),
transdata_peer_in_anchor->GetIdx());
GELOGE(GRAPH_FAILED, "add edge failed!src node:%s, dst node:%s",
pre_out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_anchor->GetOwnerNode()->GetName().c_str());
@@ -231,6 +246,11 @@ graphStatus SameTransdataBreadthFusionPass::ReLinkOutDataPeerInControlNodes2PreN
GELOGD("remove edge.src:%s, dst:%s", out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::RemoveEdge(out_anchor, transdata_peer_in_control_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove control edge between op:%s(%s) and op:%s(%s) failed",
out_anchor->GetOwnerNode()->GetName().c_str(),
out_anchor->GetOwnerNode()->GetType().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetType().c_str());
GELOGE(GRAPH_FAILED, "remove edge failed!src node:%s, dst node:%s", transdata_node->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str());
return GRAPH_FAILED;
@@ -240,6 +260,11 @@ graphStatus SameTransdataBreadthFusionPass::ReLinkOutDataPeerInControlNodes2PreN
GELOGD("add edge.src:%s, dst:%s", pre_out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::AddEdge(pre_out_anchor, transdata_peer_in_control_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
pre_out_anchor->GetOwnerNode()->GetName().c_str(),
pre_out_anchor->GetOwnerNode()->GetType().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetType().c_str());
GELOGE(GRAPH_FAILED, "add edge failed!src node:%s, dst node:%s",
pre_out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str());
@@ -249,6 +274,11 @@ graphStatus SameTransdataBreadthFusionPass::ReLinkOutDataPeerInControlNodes2PreN
GELOGD("add edge.src node:%s, dst node:%s", pre_out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::AddEdge(transdata_peer_out_control_anchor, transdata_peer_in_control_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
transdata_peer_out_control_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_out_control_anchor->GetOwnerNode()->GetType().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetType().c_str());
GELOGE(GRAPH_FAILED, "add edge failed!src node:%s, dst node:%s",
pre_out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str());
@@ -290,6 +320,11 @@ graphStatus SameTransdataBreadthFusionPass::ReLinkOutControlPeerInControlAnchors
GELOGD("remove edge.src:%s, dst:%s", transdata_node_keep->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::RemoveEdge(out_control_anchor, transdata_peer_in_control_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove control edge between op:%s(%s) and op:%s(%s) failed",
out_control_anchor->GetOwnerNode()->GetName().c_str(),
out_control_anchor->GetOwnerNode()->GetType().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetType().c_str());
GELOGE(GRAPH_FAILED, "remove transdata control edge failed!");
return GRAPH_FAILED;
}
@@ -298,6 +333,11 @@ graphStatus SameTransdataBreadthFusionPass::ReLinkOutControlPeerInControlAnchors
GELOGD("add edge.src:%s, dst:%s", pre_out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::AddEdge(pre_out_anchor, transdata_peer_in_control_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
pre_out_anchor->GetOwnerNode()->GetName().c_str(),
pre_out_anchor->GetOwnerNode()->GetType().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetType().c_str());
GELOGE(GRAPH_FAILED, "add control edge failed!");
return GRAPH_FAILED;
}
@@ -305,6 +345,11 @@ graphStatus SameTransdataBreadthFusionPass::ReLinkOutControlPeerInControlAnchors
GELOGD("add edge.src:%s, dst:%s", transdata_peer_out_control_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::AddEdge(transdata_peer_out_control_anchor, transdata_peer_in_control_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
transdata_peer_out_control_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_out_control_anchor->GetOwnerNode()->GetType().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_control_anchor->GetOwnerNode()->GetType().c_str());
GELOGE(GRAPH_FAILED, "add control edge failed!");
return GRAPH_FAILED;
}
@@ -329,6 +374,11 @@ graphStatus SameTransdataBreadthFusionPass::ReLinkOutControlPeerInDataAnchors(
GELOGD("remove edge.src:%s, dst:%s", transdata_node_keep->GetName().c_str(),
transdata_peer_in_data_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::RemoveEdge(out_control_anchor, transdata_peer_in_data_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove control edge between op:%s(%s) and op:%s(%s) failed",
out_control_anchor->GetOwnerNode()->GetName().c_str(),
out_control_anchor->GetOwnerNode()->GetType().c_str(),
transdata_peer_in_data_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_data_anchor->GetOwnerNode()->GetType().c_str());
GELOGE(GRAPH_FAILED, "remove transdata control edge failed!");
return GRAPH_FAILED;
}
@@ -337,6 +387,12 @@ graphStatus SameTransdataBreadthFusionPass::ReLinkOutControlPeerInDataAnchors(
GELOGD("add edge.src:%s, dst:%s", pre_out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_data_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::AddEdge(pre_out_anchor, transdata_peer_in_data_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%d) failed",
pre_out_anchor->GetOwnerNode()->GetName().c_str(),
pre_out_anchor->GetOwnerNode()->GetType().c_str(), pre_out_anchor->GetIdx(),
transdata_peer_in_data_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_data_anchor->GetOwnerNode()->GetType().c_str(),
transdata_peer_in_data_anchor->GetIdx());
GELOGE(GRAPH_FAILED, "add control edge failed!");
return GRAPH_FAILED;
}
@@ -344,6 +400,11 @@ graphStatus SameTransdataBreadthFusionPass::ReLinkOutControlPeerInDataAnchors(
GELOGD("add edge.src:%s, dst:%s", transdata_peer_out_control_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_data_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::AddEdge(transdata_peer_out_control_anchor, transdata_peer_in_data_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
transdata_peer_out_control_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_out_control_anchor->GetOwnerNode()->GetType().c_str(),
transdata_peer_in_data_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_in_data_anchor->GetOwnerNode()->GetType().c_str());
GELOGE(GRAPH_FAILED, "add control edge failed!");
return GRAPH_FAILED;
}
@@ -460,6 +521,12 @@ graphStatus SameTransdataBreadthFusionPass::RelinkRemainTransdata(const ComputeG

GELOGI("add edge.out node %s, in node %s", head_node->GetName().c_str(), transdata_node_keep->GetName().c_str());
if (GraphUtils::AddEdge(head_node_anchor, transdata_in_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%d) failed",
head_node_anchor->GetOwnerNode()->GetName().c_str(),
head_node_anchor->GetOwnerNode()->GetType().c_str(), head_node_anchor->GetIdx(),
transdata_in_anchor->GetOwnerNode()->GetName().c_str(),
transdata_in_anchor->GetOwnerNode()->GetType().c_str(),
transdata_in_anchor->GetIdx());
GELOGE(GRAPH_FAILED, "add edge failed!out node %s, in node %s", head_node->GetName().c_str(),
transdata_node_keep->GetName().c_str());
return GRAPH_FAILED;
@@ -545,6 +612,12 @@ graphStatus SameTransdataBreadthFusionPass::ReuseNodesBeforeTransdata(int anchor
GELOGI("add edge.src:%s, dst:%s", transdata_node_keep->GetName().c_str(),
head_node_peer_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::AddEdge(transdata_out_anchor, head_node_peer_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%d) failed",
transdata_out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_out_anchor->GetOwnerNode()->GetType().c_str(), transdata_out_anchor->GetIdx(),
head_node_peer_anchor->GetOwnerNode()->GetName().c_str(),
head_node_peer_anchor->GetOwnerNode()->GetType().c_str(),
head_node_peer_anchor->GetIdx());
GELOGE(GRAPH_FAILED, "add edge.src:%s, dst:%s", transdata_node_keep->GetName().c_str(),
head_node_peer_anchor->GetOwnerNode()->GetName().c_str());
return GRAPH_FAILED;
@@ -562,6 +635,8 @@ graphStatus SameTransdataBreadthFusionPass::ReuseNodesBeforeTransdata(int anchor
auto input_desc = in_op_desc->GetInputDesc(in_data_anchor->GetIdx());
CopyTensorDesc(transdata_output_desc, input_desc);
if (in_op_desc->UpdateInputDesc(in_data_anchor->GetIdx(), input_desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update input:%d desc in op:%s(%s) failed", in_data_anchor->GetIdx(),
in_op_desc->GetName().c_str(), in_op_desc->GetType().c_str());
GELOGE(FAILED, "UpdateInputDesc fail.");
return FAILED;
}
@@ -569,6 +644,8 @@ graphStatus SameTransdataBreadthFusionPass::ReuseNodesBeforeTransdata(int anchor
auto output_desc = in_op_desc->GetOutputDesc(output_idx);
CopyTensorDesc(transdata_output_desc, output_desc);
GE_IF_BOOL_EXEC(in_op_desc->UpdateOutputDesc(output_idx, output_desc) != GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Update output:%d desc in op:%s(%s) failed", output_idx,
in_op_desc->GetName().c_str(), in_op_desc->GetType().c_str());
GELOGE(GRAPH_FAILED, "update input desc failed");
return GRAPH_FAILED);
// relink control edge
@@ -610,6 +687,13 @@ graphStatus SameTransdataBreadthFusionPass::LinkNewCastNode2RemainTransdata(
GELOGI("remove edge.src:%s, dst:%s", transdata_peer_out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_remove_in_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::RemoveEdge(transdata_peer_out_anchor, transdata_remove_in_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%d) failed",
transdata_peer_out_anchor->GetOwnerNode()->GetName().c_str(),
transdata_peer_out_anchor->GetOwnerNode()->GetType().c_str(),
transdata_peer_out_anchor->GetIdx(),
transdata_remove_in_anchor->GetOwnerNode()->GetName().c_str(),
transdata_remove_in_anchor->GetOwnerNode()->GetType().c_str(),
transdata_remove_in_anchor->GetIdx());
return GRAPH_FAILED;
}

@@ -642,6 +726,9 @@ graphStatus SameTransdataBreadthFusionPass::LinkNewCastNode2RemainTransdata(
}

if (graph->RemoveNode(transdata_node_remove) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove node:%s(%s) from graph:%s failed",
transdata_node_remove->GetName().c_str(), transdata_node_remove->GetType().c_str(),
graph->GetName().c_str());
GELOGE(GRAPH_FAILED, "remove node %s failed!", transdata_node_remove->GetName().c_str());
return GRAPH_FAILED;
}
@@ -660,6 +747,10 @@ graphStatus SameTransdataBreadthFusionPass::RelinkInControlEdge(const NodePtr &n
GELOGD("remove edge.src:%s, dst:%s", peer_out_control_anchor->GetOwnerNode()->GetName().c_str(),
node_src->GetName().c_str());
if (GraphUtils::RemoveEdge(peer_out_control_anchor, node_src->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove control edge between op:%s(%s) and op:%s(%s) failed",
peer_out_control_anchor->GetOwnerNode()->GetName().c_str(),
peer_out_control_anchor->GetOwnerNode()->GetType().c_str(),
node_src->GetName().c_str(), node_src->GetType().c_str());
GELOGE(GRAPH_FAILED, "remove edge faliled!src:%s, dst:%s",
peer_out_control_anchor->GetOwnerNode()->GetName().c_str(), node_src->GetName().c_str());
return GRAPH_FAILED;
@@ -667,6 +758,10 @@ graphStatus SameTransdataBreadthFusionPass::RelinkInControlEdge(const NodePtr &n
GELOGD("add edge.src:%s, dst:%s", peer_out_control_anchor->GetOwnerNode()->GetName().c_str(),
node_dst->GetName().c_str());
if (GraphUtils::AddEdge(peer_out_control_anchor, node_dst->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
peer_out_control_anchor->GetOwnerNode()->GetName().c_str(),
peer_out_control_anchor->GetOwnerNode()->GetType().c_str(),
node_dst->GetName().c_str(), node_dst->GetType().c_str());
GELOGE(GRAPH_FAILED, "add edge failed!src:%s, dst:%s", peer_out_control_anchor->GetOwnerNode()->GetName().c_str(),
node_dst->GetName().c_str());
return GRAPH_FAILED;
@@ -713,10 +808,16 @@ graphStatus SameTransdataBreadthFusionPass::AddCastNode(const ComputeGraphPtr &g

auto cast_node = graph->AddNode(cast_op_desc);
if (cast_node == nullptr) {
REPORT_CALL_ERROR("E19999", "Add node:%s(%s) to graph:%s failed",
cast_op_desc->GetName().c_str(), cast_op_desc->GetType().c_str(), graph->GetName().c_str());
return GRAPH_FAILED;
}
GELOGD("add edge.src:%s, dst:%s", pre_out_anchor->GetOwnerNode()->GetName().c_str(), cast_node->GetName().c_str());
if (GraphUtils::AddEdge(pre_out_anchor, cast_node->GetInDataAnchor(0)) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:%d) and op:%s(%s)(index:0) failed",
pre_out_anchor->GetOwnerNode()->GetName().c_str(),
pre_out_anchor->GetOwnerNode()->GetType().c_str(), pre_out_anchor->GetIdx(),
cast_node->GetName().c_str(), cast_node->GetType().c_str());
return GRAPH_FAILED;
}
if (i == 0) {
@@ -724,6 +825,8 @@ graphStatus SameTransdataBreadthFusionPass::AddCastNode(const ComputeGraphPtr &g
}

if (!AttrUtils::SetBool(cast_op_desc, ATTR_NEED_COMPILE, true)) {
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_NEED_COMPILE.c_str(),
cast_op_desc->GetName().c_str(), cast_op_desc->GetType().c_str());
GELOGE(FAILED, "SetExtAttr fail.");
return FAILED;
}
@@ -738,6 +841,7 @@ graphStatus SameTransdataBreadthFusionPass::GetSubGraphsBetweenNormalAndTransdat
std::vector<std::pair<OutDataAnchorPtr, InDataAnchorPtr>> &nodes_list) {
graphStatus ret = GRAPH_SUCCESS;
if (out_anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Param out_anchor is nullptr, check invalid");
GELOGE(GRAPH_FAILED, "out data anchor is null!This should not happen!");
return GRAPH_FAILED;
}


+ 5
- 1
ge/graph/passes/save_pass.cc View File

@@ -47,7 +47,9 @@ Status SavePass::Run(ge::ComputeGraphPtr graph) {
out_index.emplace_back(out_anchor->GetIdx());
ge::OpDescPtr op_desc = peer_node->GetOpDesc();
GE_IF_BOOL_EXEC(!ge::AttrUtils::SetStr(op_desc, kVarAttrVarIsSave, kVarIsSave),
GELOGE(INTERNAL_ERROR, "get kVarAttrVarIsSave failed"); return INTERNAL_ERROR);
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", kVarAttrVarIsSave,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "get kVarAttrVarIsSave failed"); return INTERNAL_ERROR);
}
}
}
@@ -65,6 +67,8 @@ Status SavePass::Run(ge::ComputeGraphPtr graph) {
for (auto &node_ptr : del_nodes) {
auto ret = graph->RemoveNode(node_ptr);
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove node:%s(%s) from graph:%s failed",
node_ptr->GetName().c_str(), node_ptr->GetType().c_str(), graph->GetName().c_str());
GELOGE(ret, "GraphUtils::RemoveNodeWithoutRelink failed.");
return ret;
}


+ 25
- 0
ge/graph/passes/set_input_output_offset_pass.cc View File

@@ -54,6 +54,8 @@ Status SetInputOutputOffsetPass::SetInputOffsetForFusion(const std::vector<int64
std::vector<int64_t> input_offset_of_node;
input_offset_of_node = op_desc->GetInputOffset();
if (input_offset_of_node.size() < i) {
REPORT_INNER_ERROR("E19999", "Input offsets size:%zu of node:%s(%s) < index:%zu, check invalid",
input_offset_of_node.size(), op_desc->GetName().c_str(), op_desc->GetType().c_str(), i);
GELOGE(PARAM_INVALID, "not get input_offset of %zu", i);
return PARAM_INVALID;
}
@@ -77,10 +79,15 @@ Status SetInputOutputOffsetPass::SetInputOffsetForFusion(const std::vector<int64
int64_t relative_offset = input_offset - out_offset;
zero_copy_relative_offset.emplace_back(relative_offset);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListInt(data_op_desc, ATTR_ZERO_COPY_BASIC_OFFSET, zero_copy_basic_offset),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed",
ATTR_ZERO_COPY_BASIC_OFFSET.c_str(),
data_op_desc->GetName().c_str(), data_op_desc->GetType().c_str());
GELOGE(FAILED, "SetListInt of zero_copy_basic_offset failed.");
return FAILED);
GE_CHK_BOOL_EXEC(
ge::AttrUtils::SetListInt(data_op_desc, ATTR_ZERO_COPY_RELATIVE_OFFSET, zero_copy_relative_offset),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_ZERO_COPY_RELATIVE_OFFSET.c_str(),
data_op_desc->GetName().c_str(), data_op_desc->GetType().c_str());
GELOGE(FAILED, "SetListInt of zero_copy_relative_offset failed.");
return FAILED);
}
@@ -115,10 +122,15 @@ Status SetInputOutputOffsetPass::SetInputOffsetForHcom(const ge::NodePtr &node,
zero_copy_basic_offset.emplace_back(output_offset);
zero_copy_relative_offset.emplace_back(relative_offset);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListInt(in_op_desc, ATTR_ZERO_COPY_BASIC_OFFSET, zero_copy_basic_offset),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed",
ATTR_ZERO_COPY_BASIC_OFFSET.c_str(),
in_op_desc->GetName().c_str(), in_op_desc->GetType().c_str());
GELOGE(FAILED, "SetListInt of zero_copy_basic_offset failed.");
return FAILED);
GE_CHK_BOOL_EXEC(
ge::AttrUtils::SetListInt(in_op_desc, ATTR_ZERO_COPY_RELATIVE_OFFSET, zero_copy_relative_offset),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_ZERO_COPY_RELATIVE_OFFSET.c_str(),
in_op_desc->GetName().c_str(), in_op_desc->GetType().c_str());
GELOGE(FAILED, "SetListInt of zero_copy_relative_offset failed.");
return FAILED);
}
@@ -159,6 +171,9 @@ Status SetInputOutputOffsetPass::SetOutputOffsetForConcat(const NodePtr &node) {
output_offset_of_concat = op_desc->GetOutputOffset();
// phony_concat has one output
GE_IF_BOOL_EXEC(output_offset_of_concat.size() != 1,
REPORT_INNER_ERROR("E19999", "Output offsets size:%zu of node:%s(%s) not equal to 1, check invalid",
output_offset_of_concat.size(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(PARAM_INVALID, "%s should has one output.", node->GetName().c_str());
return PARAM_INVALID);
NodePtr net_output = node->GetOutDataNodes().at(0);
@@ -186,9 +201,14 @@ Status SetInputOutputOffsetPass::SetOutputOffsetForConcat(const NodePtr &node) {
zero_copy_relative_offset.emplace_back(relative_offset);
}
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListInt(out_op_desc, ATTR_ZERO_COPY_BASIC_OFFSET, zero_copy_basic_offset),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_ZERO_COPY_BASIC_OFFSET.c_str(),
out_op_desc->GetName().c_str(), out_op_desc->GetType().c_str());
GELOGE(FAILED, "SetListInt of zero_copy_basic_offset failed.");
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListInt(out_op_desc, ATTR_ZERO_COPY_RELATIVE_OFFSET, zero_copy_relative_offset),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed",
ATTR_ZERO_COPY_RELATIVE_OFFSET.c_str(),
out_op_desc->GetName().c_str(), out_op_desc->GetType().c_str());
GELOGE(FAILED, "SetListInt of zero_copy_relative_offset failed.");
return FAILED);
return SUCCESS;
@@ -232,9 +252,14 @@ Status SetInputOutputOffsetPass::SetOutputOffsetForHcom(const NodePtr &node, con
}

GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListInt(out_op_desc, ATTR_ZERO_COPY_BASIC_OFFSET, zero_copy_basic_offset),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_ZERO_COPY_BASIC_OFFSET.c_str(),
out_op_desc->GetName().c_str(), out_op_desc->GetType().c_str());
GELOGE(FAILED, "SetListInt of zero_copy_basic_offset failed.");
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListInt(out_op_desc, ATTR_ZERO_COPY_RELATIVE_OFFSET, zero_copy_relative_offset),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed",
ATTR_ZERO_COPY_RELATIVE_OFFSET.c_str(),
out_op_desc->GetName().c_str(), out_op_desc->GetType().c_str());
GELOGE(FAILED, "SetListInt of zero_copy_relative_offset failed.");
return FAILED);
return SUCCESS;


+ 2
- 0
ge/graph/passes/snapshot_pass.cc View File

@@ -29,6 +29,8 @@ Status SnapshotPass::Run(NodePtr &node) {
string type;
Status status_ret = GetOriginalType(node, type);
if (status_ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get OriginalType of op:%s(%s) failed",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(status_ret, "SnapshotPass get original type failed.");
return status_ret;
}


+ 3
- 0
ge/graph/passes/stop_gradient_pass.cc View File

@@ -20,12 +20,15 @@
namespace ge {
Status StopGradientPass::Run(NodePtr &node) {
if (node == nullptr) {
REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid");
GELOGE(FAILED, "parameter is null.");
return FAILED;
}
string type;
Status status_ret = GetOriginalType(node, type);
if (status_ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get OriginalType of op:%s(%s) failed",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(status_ret, "StopGradientPass get original type failed.");
return status_ret;
}


+ 22
- 0
ge/graph/passes/subexpression_migration_pass.cc View File

@@ -144,6 +144,8 @@ Status SubexpressionMigrationPass::ClassifyDataNodes(const ComputeGraphPtr &grap
for (const auto &name : func_desc->GetSubgraphInstanceNames()) {
const auto &subgraph = graph->GetSubgraph(name);
if (subgraph == nullptr) {
REPORT_INNER_ERROR("E19999", "Get subgraph from graph:%s by name:%s failed",
graph->GetName().c_str(), name.c_str());
GELOGE(GE_GRAPH_EMPTY_SUBGRAPH, "Subgraph not found, name: %s", name.c_str());
return GE_GRAPH_EMPTY_SUBGRAPH;
}
@@ -156,6 +158,8 @@ Status SubexpressionMigrationPass::ClassifyDataNodes(const ComputeGraphPtr &grap

uint32_t parent_index = 0;
if (!AttrUtils::GetInt(data->GetOpDesc(), ATTR_NAME_PARENT_NODE_INDEX, parent_index)) {
REPORT_CALL_ERROR("E19999", "Get Attr:%s from op:%s(%s) failed", ATTR_NAME_PARENT_NODE_INDEX.c_str(),
data->GetName().c_str(), data->GetType().c_str());
GELOGE(FAILED, "Parent index not found, name: %s", data->GetName().c_str());
return FAILED;
}
@@ -229,6 +233,7 @@ bool SubexpressionMigrationPass::IsParallelNodeSame(const map<ComputeGraphPtr, m
const auto &data_nodes = it->second;
auto data_it = data_nodes.find(node_idx);
if (data_it == data_nodes.end()) {
REPORT_INNER_ERROR("E19999", "Find node in data_nodes by index:%u failed", node_idx);
GELOGE(FAILED, "Data: %s not fount, index: %u", base_node->GetName().c_str(), node_idx);
return false;
}
@@ -238,12 +243,15 @@ bool SubexpressionMigrationPass::IsParallelNodeSame(const map<ComputeGraphPtr, m
const auto &in_anchors = out_anchor->GetPeerInDataAnchors();
const auto &in_anchor = in_anchors.at(anchor_idx);
if (in_anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Index:%u anchor not exist in out:%u data anchor's peer of node:%s(%s)",
node_idx, kDataOutIndex, work_data->GetName().c_str(), work_data->GetType().c_str());
GELOGE(FAILED, "Data anchor size: %u, anchor size: %zu", anchor_idx, in_anchors.size());
return false;
}

const auto &work_node = in_anchor->GetOwnerNode();
if (work_node == nullptr) {
REPORT_INNER_ERROR("E19999", "Owner node of anchor is nullptr, check invalid");
GELOGE(FAILED, "Data: %s not found, index: %u", base_node->GetName().c_str(), node_idx);
return false;
}
@@ -338,17 +346,22 @@ Status SubexpressionMigrationPass::AppendParallelNode(map<ComputeGraphPtr, map<u
OpDescBuilder op_builder(data_name, DATA);
const OpDescPtr op_desc = op_builder.AddInput("x").AddOutput("y").Build();
if (op_desc == nullptr) {
REPORT_CALL_ERROR("E19999", "Build op:%s(%s) failed", data_name.c_str(), DATA);
GELOGE(OUT_OF_MEMORY, "Create multi-batch case desc failed");
return OUT_OF_MEMORY;
}

uint32_t data_index = item.second - kCaseInputBase;
if (!AttrUtils::SetInt(op_desc, ATTR_NAME_INDEX, data_index)) {
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_NAME_INDEX.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Parent index not found, name: %s", op_desc->GetName().c_str());
return FAILED;
}

if (!AttrUtils::SetInt(op_desc, ATTR_NAME_PARENT_NODE_INDEX, item.second)) {
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_NAME_PARENT_NODE_INDEX.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Parent index not found, name: %s", op_desc->GetName().c_str());
return FAILED;
}
@@ -392,12 +405,14 @@ Status SubexpressionMigrationPass::DetachParallelNode(const map<uint32_t, NodePt
for (uint32_t i = 0; i < detach->GetAllOutDataAnchorsSize(); ++i) {
auto it_idx = outputs.find(i);
if (it_idx == outputs.end()) {
REPORT_INNER_ERROR("E19999", "Node: %s parent index %u not found, check invalid", detach->GetName().c_str(), i);
GELOGE(FAILED, "Node: %s parent index %u not found", detach->GetName().c_str(), i);
return FAILED;
}

auto it_data = graph_datas.find(it_idx->second);
if (it_data == graph_datas.end()) {
REPORT_INNER_ERROR("E19999", "Node: %s parent index %u not found, check invalid", detach->GetName().c_str(), i);
GELOGE(FAILED, "Node: %s parent index %u not found", detach->GetName().c_str(), i);
return FAILED;
}
@@ -444,6 +459,7 @@ Status SubexpressionMigrationPass::AttachParallelNode(const ComputeGraphPtr &gra
for (uint32_t i = 0; i < attach->GetAllInDataAnchorsSize(); ++i) {
auto it_idx = inputs.find(i);
if (it_idx == inputs.end()) {
REPORT_INNER_ERROR("E19999", "Node: %s parent index %u not found, check invalid", attach->GetName().c_str(), i);
GELOGE(FAILED, "Node: %s parent index %u not found", attach->GetName().c_str(), i);
return FAILED;
}
@@ -505,6 +521,7 @@ Status SubexpressionMigrationPass::MoveNodeToParent(const ComputeGraphPtr &graph
uint32_t anchor_idx, const map<uint32_t, uint32_t> &inputs,
const map<uint32_t, uint32_t> &outputs) {
if (inputs.empty()) {
REPORT_INNER_ERROR("E19999", "Param inputs is empty, check invalid");
GELOGE(FAILED, "Graph: %s, inputs is empty", graph->GetName().c_str());
return FAILED;
}
@@ -516,6 +533,8 @@ Status SubexpressionMigrationPass::MoveNodeToParent(const ComputeGraphPtr &graph
const auto &subnodes = groups.second;
auto it = subnodes.find(base_index);
if (it == subnodes.end()) {
REPORT_INNER_ERROR("E19999", "Index:%u data node not found in graph:%s, check invalid",
base_index, subgraph->GetName().c_str());
GELOGE(FAILED, "Graph: %s, Data: %u node not found", subgraph->GetName().c_str(), base_index);
return FAILED;
}
@@ -525,12 +544,15 @@ Status SubexpressionMigrationPass::MoveNodeToParent(const ComputeGraphPtr &graph
const auto &in_anchors = out_anchor->GetPeerInDataAnchors();
const auto &in_anchor = in_anchors.at(anchor_idx);
if (in_anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Index:%u anchor not exist in out:%u data anchor's peer of node:%s(%s)",
anchor_idx, kDataOutIndex, base_data->GetName().c_str(), base_data->GetType().c_str());
GELOGE(FAILED, "Data anchor index: %u, anchor size: %zu", anchor_idx, in_anchors.size());
return FAILED;
}

move_node = in_anchor->GetOwnerNode();
if (move_node == nullptr) {
REPORT_INNER_ERROR("E19999", "Owner node of anchor is nullptr, check invalid");
GELOGE(FAILED, "Data: %s not found, index: %u", base_data->GetName().c_str(), base_index);
return FAILED;
}


+ 17
- 0
ge/graph/passes/subgraph_const_migration_pass.cc View File

@@ -141,6 +141,8 @@ Status SubgraphConstMigrationPass::ClassifyGraphNodes(const ComputeGraphPtr &gra
for (const auto &name : func_desc->GetSubgraphInstanceNames()) {
const auto &subgraph = graph->GetSubgraph(name);
if (subgraph == nullptr) {
REPORT_INNER_ERROR("E19999", "Get subgraph from graph:%s by name:%s failed",
graph->GetName().c_str(), name.c_str());
GELOGE(GE_GRAPH_EMPTY_SUBGRAPH, "Subgraph not found, name: %s", name.c_str());
return GE_GRAPH_EMPTY_SUBGRAPH;
}
@@ -152,6 +154,8 @@ Status SubgraphConstMigrationPass::ClassifyGraphNodes(const ComputeGraphPtr &gra
if (node->GetType() == DATA) {
uint32_t parent_index = kInvalidParent;
if (!AttrUtils::GetInt(node->GetOpDesc(), ATTR_NAME_PARENT_NODE_INDEX, parent_index)) {
REPORT_CALL_ERROR("E19999", "Get Attr:%s from op:%s(%s) failed", ATTR_NAME_PARENT_NODE_INDEX.c_str(),
node->GetName().c_str(), node->GetType().c_str());
return FAILED;
}

@@ -326,17 +330,22 @@ Status SubgraphConstMigrationPass::AppendParallelNode(const NodePtr &func_node,
OpDescBuilder op_builder(data_name, DATA);
const auto op_desc = op_builder.AddInput("x").AddOutput("y").Build();
if (op_desc == nullptr) {
REPORT_CALL_ERROR("E19999", "Build op:%s(%s) failed", data_name.c_str(), DATA);
GELOGE(OUT_OF_MEMORY, "Create multi-batch subgraph data desc failed");
return OUT_OF_MEMORY;
}

uint32_t data_index = parent_index - kCaseInputBase;
if (!AttrUtils::SetInt(op_desc, ATTR_NAME_INDEX, data_index)) {
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_NAME_INDEX.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Parent index not found, name: %s", op_desc->GetName().c_str());
return FAILED;
}

if (!AttrUtils::SetInt(op_desc, ATTR_NAME_PARENT_NODE_INDEX, parent_index)) {
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_NAME_PARENT_NODE_INDEX.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Parent index not found, name: %s", op_desc->GetName().c_str());
return FAILED;
}
@@ -460,6 +469,8 @@ Status SubgraphConstMigrationPass::MoveNodeToParent(const ComputeGraphPtr &graph
const map<ComputeGraphPtr, map<uint32_t, NodePtr>> &all_data_nodes,
const string &node_key, uint32_t parent_index) {
if (node_key.empty() || parent_index == kInvalidParent) {
REPORT_INNER_ERROR("E19999", "Param node_key is empty or param parent_index is 0x%X, check invalid",
kInvalidParent);
GELOGE(FAILED, "Graph: %s, node key: %s, parent index: %u invalid",
graph->GetName().c_str(), node_key.c_str(), parent_index);
return FAILED;
@@ -470,6 +481,8 @@ Status SubgraphConstMigrationPass::MoveNodeToParent(const ComputeGraphPtr &graph
const auto &subgraph = item.first;
const auto it_const = item.second.find(node_key);
if (it_const == item.second.end()) {
REPORT_INNER_ERROR("E19999", "Const node name:%s not found in graph:%s, check invalid",
node_key.c_str(), subgraph->GetName().c_str());
GELOGE(FAILED, "Graph: %s, Const: %s node not found", subgraph->GetName().c_str(), node_key.c_str());
return FAILED;
}
@@ -477,11 +490,15 @@ Status SubgraphConstMigrationPass::MoveNodeToParent(const ComputeGraphPtr &graph

const auto it_nodes = all_data_nodes.find(subgraph);
if (it_nodes == all_data_nodes.end()) {
REPORT_INNER_ERROR("E19999", "Const node name:%s not found in graph:%s, check invalid",
node_key.c_str(), subgraph->GetName().c_str());
GELOGE(FAILED, "Graph: %s, Const: %s node not found", subgraph->GetName().c_str(), node_key.c_str());
return FAILED;
}
const auto it_data = it_nodes->second.find(parent_index);
if (it_data == it_nodes->second.end()) {
REPORT_INNER_ERROR("E19999", "Const node name:%s not found in graph:%s, check invalid",
node_key.c_str(), subgraph->GetName().c_str());
GELOGE(FAILED, "Graph: %s, Const: %s node not found", subgraph->GetName().c_str(), node_key.c_str());
return FAILED;
}


+ 21
- 0
ge/graph/passes/subgraph_pass.cc View File

@@ -94,6 +94,8 @@ Status SubgraphPass::SubgraphInputNode(const ComputeGraphPtr &graph, const NodeP

uint32_t parent_index = 0;
if (!AttrUtils::GetInt(node->GetOpDesc(), ATTR_NAME_PARENT_NODE_INDEX, parent_index)) {
REPORT_CALL_ERROR("E19999", "Get Attr:%s from op:%s(%s) failed", ATTR_NAME_PARENT_NODE_INDEX.c_str(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "Get attr PARENT_NODE_INDEX failed, node:%s.", node->GetName().c_str());
return FAILED;
}
@@ -208,6 +210,8 @@ Status SubgraphPass::WhileBodySubgraph(const ComputeGraphPtr &graph, const NodeP
// index of body_subgraph is 1
ComputeGraphPtr while_body = NodeUtils::GetSubgraph(*node, 1);
if (while_body == nullptr) {
REPORT_INNER_ERROR("E19999", "While_body of node:%s(%s) is nullptr, check invalid",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "while_body of %s is NULL.", node->GetName().c_str());
return FAILED;
}
@@ -242,12 +246,16 @@ Status SubgraphPass::WhileBodySubgraph(const ComputeGraphPtr &graph, const NodeP
if (output_node == nullptr) {
output_node = n;
} else {
REPORT_INNER_ERROR("E19999", "While_body graph:%s exists multi NetOutput nodes, check invalid",
while_body->GetName().c_str());
GELOGE(FAILED, "while_body %s exists multi NetOutput nodes.", while_body->GetName().c_str());
return FAILED;
}
}
}
if (output_node == nullptr) {
REPORT_INNER_ERROR("E19999", "While_body graph:%s has no output, check invalid",
while_body->GetName().c_str());
GELOGE(FAILED, "while_body %s has no output.", while_body->GetName().c_str());
return FAILED;
}
@@ -462,6 +470,10 @@ Status SubgraphPass::InsertMemcpyNode(const ComputeGraphPtr &graph, const OutDat
(void)AttrUtils::SetBool(op_desc, ATTR_NO_NEED_CONSTANT_FOLDING, false);
(void)AttrUtils::SetBool(op_desc, ATTR_NAME_CANNOT_BE_DELETED, true);
if (GraphUtils::InsertNodeAfter(out_anchor, in_anchors, graph->AddNode(op_desc)) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Insert Cast node %s(%s) after %s(%s) failed",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
out_anchor->GetOwnerNode()->GetName().c_str(),
out_anchor->GetOwnerNode()->GetType().c_str());
GELOGE(FAILED, "Insert IDENTITY node %s after %s failed.", name.c_str(), in_node->GetName().c_str());
return FAILED;
}
@@ -481,6 +493,9 @@ Status SubgraphPass::InsertMemcpyNode(const ComputeGraphPtr &graph, const OutDat
Status SubgraphPass::InsertNodeBetween(const OutDataAnchorPtr &src, const std::vector<InDataAnchorPtr> &dsts,
const NodePtr &insert_node, uint32_t input_index, uint32_t output_index) {
if (GraphUtils::AddEdge(src, insert_node->GetInDataAnchor(input_index)) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%u) failed",
src->GetOwnerNode()->GetName().c_str(), src->GetOwnerNode()->GetType().c_str(), src->GetIdx(),
insert_node->GetName().c_str(), insert_node->GetType().c_str(), input_index);
GELOGE(FAILED, "Add data_edge %s:%d->%s:%u failed.",
src->GetOwnerNode()->GetName().c_str(), src->GetIdx(), insert_node->GetName().c_str(), input_index);
return FAILED;
@@ -490,6 +505,12 @@ Status SubgraphPass::InsertNodeBetween(const OutDataAnchorPtr &src, const std::v
dst->GetOwnerNode()->GetName().c_str());
if ((GraphUtils::RemoveEdge(src, dst) != GRAPH_SUCCESS) ||
(GraphUtils::AddEdge(insert_node->GetOutDataAnchor(output_index), dst) != GRAPH_SUCCESS)) {
REPORT_CALL_ERROR("E19999", "Remove edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%u) or "
"Add edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%u) failed",
src->GetOwnerNode()->GetName().c_str(), src->GetOwnerNode()->GetType().c_str(), src->GetIdx(),
dst->GetOwnerNode()->GetName().c_str(), dst->GetOwnerNode()->GetType().c_str(), dst->GetIdx(),
insert_node->GetName().c_str(), insert_node->GetType().c_str(), output_index,
dst->GetOwnerNode()->GetName().c_str(), dst->GetOwnerNode()->GetType().c_str(), dst->GetIdx());
GELOGE(FAILED, "Replace data_edge %s:%d->%s:%d by %s:%u->%s:%d failed.",
src->GetOwnerNode()->GetName().c_str(), src->GetIdx(),
dst->GetOwnerNode()->GetName().c_str(), dst->GetIdx(),


+ 20
- 0
ge/graph/passes/switch_data_edges_bypass.cc View File

@@ -50,6 +50,8 @@ bool IsSwitchInWhileLoop(const NodePtr &node) {
std::vector<std::pair<NodePtr, InDataAnchorPtr>> GetOutDataNodesByIndex(const NodePtr &node, int index) {
auto out_anchor = node->GetOutDataAnchor(index);
if (out_anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) has no index:%d out data anchor, check invalid",
node->GetName().c_str(), node->GetType().c_str(), index);
GELOGE(PARAM_INVALID, "Failed to get out data nodes of index %d from node %s, the anchor does not exists", index,
node->GetName().c_str());
return {};
@@ -84,18 +86,23 @@ NodePtr AddIdentityAfterNode(const NodePtr &node, int index) {

auto node_desc = node->GetOpDesc();
if (node_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "OpDesc in node is nullptr, check invalid");
GELOGE(INTERNAL_ERROR, "Failed to add identity after node %s index %d, the op desc is null",
node->GetName().c_str(), index);
return nullptr;
}
auto tensor = node_desc->GetOutputDescPtr(index);
if (tensor == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) has no index:%d output tensor, check invalid",
node_desc->GetName().c_str(), node_desc->GetType().c_str(), index);
GELOGE(INTERNAL_ERROR, "Failed to find the tensor by index %d from node %s, can not add the identity node", index,
node->GetName().c_str());
return nullptr;
}
auto anchor = node->GetOutDataAnchor(index);
if (anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) has no index:%d out data anchor, check invalid",
node->GetName().c_str(), node->GetType().c_str(), index);
GELOGE(OUT_OF_MEMORY, "Failed to add identity after node %s index %d, the out anchor does not exists",
node->GetName().c_str(), index);
return nullptr;
@@ -104,6 +111,7 @@ NodePtr AddIdentityAfterNode(const NodePtr &node, int index) {
auto identity_opdesc =
MakeShared<OpDesc>("SwitchDataEdgesByPass_Identity_" + std::to_string(identity_counter), IDENTITY);
if (identity_opdesc == nullptr) {
REPORT_CALL_ERROR("E19999", "New OpDesc failed");
GELOGE(OUT_OF_MEMORY, "Failed to add identity after node %s index %d", node->GetName().c_str(), index);
return nullptr;
}
@@ -111,6 +119,9 @@ NodePtr AddIdentityAfterNode(const NodePtr &node, int index) {
auto ret2 = identity_opdesc->AddOutputDesc("y", *tensor);
auto identity = node->GetOwnerComputeGraph()->AddNode(identity_opdesc);
if (ret1 != GRAPH_SUCCESS || ret2 != GRAPH_SUCCESS || identity == nullptr) {
REPORT_CALL_ERROR("E19999", "Add input ouput desc to op:%s(%s) failed or add it to graph:%s failed",
identity_opdesc->GetName().c_str(), identity_opdesc->GetType().c_str(),
node->GetOwnerComputeGraph()->GetName().c_str());
GELOGE(OUT_OF_MEMORY, "Failed to add identity after node %s index %d", node->GetName().c_str(), index);
return nullptr;
}
@@ -124,18 +135,23 @@ NodePtr AddMemcpyBeforeNode(const NodePtr &node, int index) {

auto node_desc = node->GetOpDesc();
if (node_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "OpDesc in node is nullptr, check invalid");
GELOGE(INTERNAL_ERROR, "Failed to add memcpy before node %s index %d, null op desc", node->GetName().c_str(),
index);
return nullptr;
}
auto tensor = node_desc->GetInputDescPtr(index);
if (tensor == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) has no index:%d input tensor, check invalid",
node_desc->GetName().c_str(), node_desc->GetType().c_str(), index);
GELOGE(INTERNAL_ERROR, "Failed to find the tensor by index %d from node %s, can not add the memcpy node", index,
node->GetName().c_str());
return nullptr;
}
auto anchor = node->GetInDataAnchor(index);
if (anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) has no index:%d in data anchor, check invalid",
node->GetName().c_str(), node->GetType().c_str(), index);
GELOGE(INTERNAL_ERROR, "Failed to add memcpy before node %s index %d, the in anchor does not exists",
node->GetName().c_str(), index);
return nullptr;
@@ -143,6 +159,7 @@ NodePtr AddMemcpyBeforeNode(const NodePtr &node, int index) {

auto memcpy_opdesc = MakeShared<OpDesc>("SwitchDataEdgesByPass_Memcpy_" + std::to_string(counter), MEMCPYASYNC);
if (memcpy_opdesc == nullptr) {
REPORT_CALL_ERROR("E19999", "New OpDesc failed");
GELOGE(OUT_OF_MEMORY, "Failed to add memcpy before node %s index %d", node->GetName().c_str(), index);
return nullptr;
}
@@ -150,6 +167,9 @@ NodePtr AddMemcpyBeforeNode(const NodePtr &node, int index) {
auto ret2 = memcpy_opdesc->AddOutputDesc(*tensor);
auto memcpy_node = node->GetOwnerComputeGraph()->AddNode(memcpy_opdesc);
if (ret1 != GRAPH_SUCCESS || ret2 != GRAPH_SUCCESS || memcpy_node == nullptr) {
REPORT_CALL_ERROR("E19999", "Add input ouput desc to op:%s(%s) failed or add it to graph:%s failed",
memcpy_opdesc->GetName().c_str(), memcpy_opdesc->GetType().c_str(),
node->GetOwnerComputeGraph()->GetName().c_str());
GELOGE(OUT_OF_MEMORY, "Failed to add memcpy before node %s index %d", node->GetName().c_str(), index);
return nullptr;
}


+ 13
- 0
ge/graph/passes/switch_dead_branch_elimination.cc View File

@@ -31,6 +31,7 @@ const int kDefaultInputIndex = -1;

bool ParsePred(const ConstGeTensorPtr &tensor) {
if (tensor == nullptr) {
REPORT_INNER_ERROR("E19999", "Param tensor is nullptr, check invalid");
GELOGE(FAILED, "parameter is null.");
return false;
}
@@ -65,6 +66,8 @@ bool ParseOutDataAnchors(const NodePtr &node, const NodePtr &pred_node, OutDataA
OutDataAnchorPtr &inactive_out_data_anchor) {
auto tensors = OpDescUtils::MutableWeights(pred_node);
if (tensors.empty()) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) has no weight, check invalid",
pred_node->GetName().c_str(), pred_node->GetType().c_str());
return false;
}

@@ -72,6 +75,7 @@ bool ParseOutDataAnchors(const NodePtr &node, const NodePtr &pred_node, OutDataA
int inactive_output_index = pred_value ? 0 : 1;

if (node == nullptr) {
REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid");
GELOGE(FAILED, "parameter is null.");
return false;
}
@@ -91,6 +95,7 @@ bool ParseOutDataAnchors(const NodePtr &node, const NodePtr &pred_node, OutDataA
Status SwitchDeadBranchElimination::DeleteSwitchNode(NodePtr &node, NodePtr &pred_node,
const OutDataAnchorPtr &active_out_data_anchor) {
if (node == nullptr || active_out_data_anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Param node or active_out_data_anchor is nullptr, check invalid");
GELOGE(FAILED, "parameter is null.");
return FAILED;
}
@@ -102,6 +107,9 @@ Status SwitchDeadBranchElimination::DeleteSwitchNode(NodePtr &node, NodePtr &pre

// link pred's in control nodes to switch
if (GraphUtils::CopyInCtrlEdges(pred_node, node) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Copy in control edge from node:%s(%s) to node:%s(%s) failed",
pred_node->GetName().c_str(), pred_node->GetType().c_str(),
node->GetName().c_str(), node->GetType().c_str());
return FAILED;
}
// Remove link between pred and switch
@@ -114,6 +122,8 @@ Status SwitchDeadBranchElimination::DeleteSwitchNode(NodePtr &node, NodePtr &pre
std::vector<int> switch_io_map = {kDefaultInputIndex, kDefaultInputIndex};
size_t out_index = static_cast<size_t>(active_out_data_anchor->GetIdx());
if (out_index >= switch_io_map.size()) {
REPORT_INNER_ERROR("E19999", "Out index:%zu of node:%s(%s) >= %zu, check invalid", out_index,
node->GetName().c_str(), node->GetType().c_str(), switch_io_map.size());
GELOGE(FAILED, "[%s] out index check failed, out_index:%zu.", node->GetName().c_str(), out_index);
return FAILED;
}
@@ -123,6 +133,7 @@ Status SwitchDeadBranchElimination::DeleteSwitchNode(NodePtr &node, NodePtr &pre

Status SwitchDeadBranchElimination::Run(NodePtr &node) {
if (node == nullptr) {
REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid");
GELOGE(PARAM_INVALID, "Param [node] must not be null.");
return PARAM_INVALID;
}
@@ -168,6 +179,8 @@ Status SwitchDeadBranchElimination::Run(NodePtr &node) {
std::vector<NodePtr> end_nodes;
Status ret = PassUtils::RemoveInactiveBranchToMerge(inactive_out_data_anchor, del_nodes, end_nodes);
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove inactive branch from node:%s(%s) to merge failed",
node->GetName().c_str(), node->GetType().c_str());
return ret;
}



+ 12
- 0
ge/graph/passes/switch_logic_remove_pass.cc View File

@@ -45,11 +45,15 @@ Status GetPredNode(const NodePtr &switch_node, PredNodeAndOut &pred_node_index)
GE_CHECK_NOTNULL(switch_node);
auto pred_in_anchor = switch_node->GetInDataAnchor(kSwitchPredIndex);
if (pred_in_anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) has no index:%d in data anchor, check invalid",
switch_node->GetName().c_str(), switch_node->GetType().c_str(), kSwitchPredIndex);
GELOGE(INTERNAL_ERROR, "Failed to get pred node for switch %s, no pred anchor", switch_node->GetName().c_str());
return INTERNAL_ERROR;
}
auto pred_node_anchor = pred_in_anchor->GetPeerOutAnchor();
if (pred_node_anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s)'s index:%d in data anchor, its peer anchor is nullptr, check invalid",
switch_node->GetName().c_str(), switch_node->GetType().c_str(), kSwitchPredIndex);
GELOGE(INTERNAL_ERROR,
"Failed to get pred node for switch %s, node peer out anchor",
switch_node->GetName().c_str());
@@ -57,6 +61,8 @@ Status GetPredNode(const NodePtr &switch_node, PredNodeAndOut &pred_node_index)
}
auto pred_node = pred_node_anchor->GetOwnerNode();
if (pred_node == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s)'s index:%d in data anchor, its peer node is nullptr, check invalid",
switch_node->GetName().c_str(), switch_node->GetType().c_str(), kSwitchPredIndex);
GELOGE(INTERNAL_ERROR,
"Failed to get pred node for switch %s, null node",
switch_node->GetName().c_str());
@@ -89,11 +95,15 @@ Status SwitchLogicRemovePass::Run(NodePtr &node) {
}
for (auto &in_anchor : out_anchor->GetPeerInDataAnchors()) {
if (in_anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s)'s index:%d out data anchor, its peer anchors has nullptr, "
"check invalid", node->GetName().c_str(), node->GetType().c_str(), i);
GELOGE(INTERNAL_ERROR, "The in-anchor from out anchor %d node %s is null", i, node->GetName().c_str());
return INTERNAL_ERROR;
}
auto dst_node = in_anchor->GetOwnerNode();
if (dst_node == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s)'s index:%d out data anchor, its peer nodes has nullptr, "
"check invalid", node->GetName().c_str(), node->GetType().c_str(), i);
GELOGE(INTERNAL_ERROR, "The peer node from out anchor %d node %s is null", i, node->GetName().c_str());
return INTERNAL_ERROR;
}
@@ -143,6 +153,8 @@ Status SwitchLogicRemovePass::RemoveSwitchNodeLogically(int parent_index, NodePt
std::vector<NodePtr> end_nodes;
auto ret = PassUtils::RemoveInactiveBranchToMerge(out_anchor, deleted_nodes, end_nodes);
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove inactive branch from node:%s(%s) to merge failed",
switch_node->GetName().c_str(), switch_node->GetType().c_str());
return ret;
}



+ 112
- 16
ge/graph/passes/switch_to_stream_switch_pass.cc View File

@@ -33,8 +33,14 @@ Status SwitchToStreamSwitchPass::Run(ComputeGraphPtr graph) {
GE_CHK_STATUS_RET(CombineSwitchNode(graph), "Combine StreamSwitch nodes failed.");

for (const auto &node : bypass_nodes_) {
GE_CHK_BOOL_EXEC(graph->IsolateNode(node) == GRAPH_SUCCESS, return FAILED, "Isolate node failed.");
GE_CHK_BOOL_EXEC(GraphUtils::RemoveNodeWithoutRelink(graph, node) == GRAPH_SUCCESS, return FAILED,
GE_CHK_BOOL_EXEC(graph->IsolateNode(node) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Isolate node:%s(%s) in graph:%s failed",
node->GetName().c_str(), node->GetType().c_str(), graph->GetName().c_str());
return FAILED, "Isolate node failed.");
GE_CHK_BOOL_EXEC(GraphUtils::RemoveNodeWithoutRelink(graph, node) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Remove node:%s(%s) without relink in graph:%s failed",
node->GetName().c_str(), node->GetType().c_str(), graph->GetName().c_str());
return FAILED,
"Remove switch node failed.");
}

@@ -159,7 +165,11 @@ Status SwitchToStreamSwitchPass::ReplaceSwitchNode(const ComputeGraphPtr &graph,
OpDescPtr cond_desc = peer_cond_anchor->GetOwnerNode()->GetOpDesc();
GE_CHECK_NOTNULL(cond_desc);
DataType cond_data_type = cond_desc->GetOutputDesc(peer_cond_anchor->GetIdx()).GetDataType();
GE_CHK_BOOL_EXEC(cond_data_type == DT_BOOL, return FAILED,
GE_CHK_BOOL_EXEC(cond_data_type == DT_BOOL,
REPORT_INNER_ERROR("E19999", "Pred_input of Switch node:%s(%s) only support DT_BOOL data_type, "
"but %s exactly", switch_node->GetName().c_str(), switch_node->GetType().c_str(),
TypeUtils::DataTypeToSerialString(cond_data_type).c_str());
return FAILED,
"pred_input of Switch only support DT_BOOL data_type, but %s exactly.",
TypeUtils::DataTypeToSerialString(cond_data_type).c_str());

@@ -176,6 +186,8 @@ Status SwitchToStreamSwitchPass::ReplaceSwitchNode(const ComputeGraphPtr &graph,
stream_switch = CreateStreamSwitchNode(graph, switch_node, true_branch_flag ? "_t" : "_f", peer_cond_anchor);
GE_CHK_BOOL_EXEC(stream_switch != nullptr, return FAILED, "Create stream_switch node failed.");
if (SetSwitchTrueBranchFlag(stream_switch, true_branch_flag) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set switch true branch flag from node:%s(%s) failed",
stream_switch->GetName().c_str(), stream_switch->GetType().c_str());
GELOGE(FAILED, "SetSwitchTrueBranchFlag for node %s failed.", stream_switch->GetName().c_str());
return FAILED;
}
@@ -204,6 +216,8 @@ Status SwitchToStreamSwitchPass::ReplaceSwitchNode(const ComputeGraphPtr &graph,
MoveCtrlEdges(switch_node, stream_switch);
switch_node_map_[stream_switch] = out_node_list;
if (SetOriginalNodeName(stream_switch, switch_node->GetName()) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set original node name:%s to node:%s(%s) failed", switch_node->GetName().c_str(),
stream_switch->GetName().c_str(), stream_switch->GetType().c_str());
GELOGE(FAILED, "SetOriginalNodeName for node %s failed.", stream_switch->GetName().c_str());
return FAILED;
}
@@ -230,6 +244,10 @@ Status SwitchToStreamSwitchPass::BypassSwitchNode(const NodePtr &switch_node, Ou
GE_CHECK_NOTNULL(peer_out_anchor);
// Remove Switch data input.
if (GraphUtils::RemoveEdge(peer_out_anchor, in_data_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%u) failed",
peer_out_anchor->GetOwnerNode()->GetName().c_str(),
peer_out_anchor->GetOwnerNode()->GetType().c_str(), peer_out_anchor->GetIdx(),
switch_node->GetName().c_str(), switch_node->GetType().c_str(), idx);
GELOGE(FAILED, "Remove data edge %s->%s failed.", peer_out_anchor->GetOwnerNode()->GetName().c_str(),
switch_node->GetName().c_str());
return FAILED;
@@ -284,8 +302,13 @@ NodePtr SwitchToStreamSwitchPass::CreateStreamSwitchNode(const ComputeGraphPtr &
const std::string &suffix,
const OutDataAnchorPtr &peer_cond_anchor) {
OpDescPtr switch_op_desc = switch_node->GetOpDesc();
GE_CHK_BOOL_EXEC(switch_op_desc != nullptr, return nullptr, "OpDesc of Switch node is invalid.");
GE_CHK_BOOL_EXEC(switch_op_desc != nullptr,
REPORT_INNER_ERROR("E19999", "OpDesc in node is nullptr, check invalid");
return nullptr, "OpDesc of Switch node is invalid.");
GE_IF_BOOL_EXEC(switch_op_desc->GetInputsSize() != SWITCH_INPUT_NUM, {
REPORT_INNER_ERROR("E19999", "Input desc size:%zu of node:%s(%s) not equal to %u, check invalid",
switch_op_desc->GetInputsSize(),
switch_op_desc->GetName().c_str(), switch_op_desc->GetType().c_str(), SWITCH_INPUT_NUM);
GELOGE(FAILED, "Switch input param invalid, input_size=%lu, should be %u.", switch_op_desc->GetInputsSize(),
SWITCH_INPUT_NUM);
return nullptr;
@@ -295,6 +318,7 @@ NodePtr SwitchToStreamSwitchPass::CreateStreamSwitchNode(const ComputeGraphPtr &
GELOGI("Create StreamSwitch, name=%s.", node_name.c_str());
OpDescPtr op_desc = MakeShared<OpDesc>(node_name, STREAMSWITCH);
if (op_desc == nullptr) {
REPORT_CALL_ERROR("E19999", "New OpDesc failed");
GELOGE(FAILED, "Create op_desc failed, StreamSwitch:%s.", node_name.c_str());
return nullptr;
}
@@ -316,6 +340,9 @@ NodePtr SwitchToStreamSwitchPass::CreateStreamSwitchNode(const ComputeGraphPtr &

if (!AttrUtils::SetInt(op_desc, ATTR_NAME_SWITCH_DATA_TYPE, RT_SWITCH_INT32) ||
!AttrUtils::SetInt(op_desc, ATTR_NAME_STREAM_SWITCH_COND, (int64_t)RT_EQUAL)) {
REPORT_CALL_ERROR("E19999", "Set Attr:%s or Attr:%s to op:%s(%s) failed",
ATTR_NAME_SWITCH_DATA_TYPE.c_str(), ATTR_NAME_STREAM_SWITCH_COND.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "set int failed");
return nullptr;
}
@@ -323,13 +350,22 @@ NodePtr SwitchToStreamSwitchPass::CreateStreamSwitchNode(const ComputeGraphPtr &
// Already checked, first input is Variable will passed, second is condition will checked.
GeTensorDesc cond_input_desc = switch_op_desc->GetInputDesc(SWITCH_PRED_INPUT);
GeTensorDesc input_desc(GeShape(cond_input_desc.GetShape().GetDims()), cond_input_desc.GetFormat(), DT_INT32);
GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(input_desc) == GRAPH_SUCCESS, return nullptr,
GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(input_desc) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Add input desc to op:%s(%s) failed",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return nullptr,
"Create StreamSwitch node: add input desc failed.");
GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(input_desc) == GRAPH_SUCCESS, return nullptr,
GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(input_desc) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Add ouput desc to op:%s(%s) failed",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return nullptr,
"Create StreamSwitch node: add input desc failed.");

NodePtr stream_switch = graph->AddNode(op_desc);
GE_CHK_BOOL_EXEC(stream_switch != nullptr, return nullptr, "Insert StreamSwitch node failed.");
GE_CHK_BOOL_EXEC(stream_switch != nullptr,
REPORT_CALL_ERROR("E19999", "Add node:%s(%s) to graph:%s failed",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str());
return nullptr, "Insert StreamSwitch node failed.");
GE_CHK_STATUS(GraphUtils::AddEdge(peer_cond_anchor, stream_switch->GetInDataAnchor(0)),
"StreamSwitch node add cond edge failed.");

@@ -361,6 +397,8 @@ Status SwitchToStreamSwitchPass::MarkBranches(const OutDataAnchorPtr &peer_cond_
it->second[switch_group_id] = switch_list;
} else {
GE_IF_BOOL_EXEC(switch_group_it->second.size() != SWITCH_OUTPUT_NUM, {
REPORT_INNER_ERROR("E19999", "switch group size:%zu not equal to %u, group_id:%ld, check invalid",
switch_group_it->second.size(), SWITCH_OUTPUT_NUM, switch_group_id);
GELOGE(INTERNAL_ERROR, "Check size failed, node: %s", stream_switch->GetName().c_str());
return FAILED;
});
@@ -443,6 +481,8 @@ Status SwitchToStreamSwitchPass::CombineSwitchNode(const ComputeGraphPtr &graph)
GE_CHK_STATUS(GraphUtils::AddEdge(cast_node->GetOutControlAnchor(), active_node->GetInControlAnchor()),
"StreamActive add ctl edge failed.");
if (SetActiveLabelList(active_node, { cast_node->GetName() }) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set active label list:%s to op:%s(%s) failed",
cast_node->GetName().c_str(), active_node->GetName().c_str(), active_node->GetType().c_str());
GELOGE(FAILED, "Set active_label_list attr for node %s failed.", active_node->GetName().c_str());
return FAILED;
}
@@ -456,7 +496,13 @@ Status SwitchToStreamSwitchPass::CombineSwitchNode(const ComputeGraphPtr &graph)
// select first stream_switch
NodePtr stream_switch = switch_list.front();
// set stream_label
GE_CHK_STATUS_RET(SetStreamLabel(stream_switch, cast_node->GetName()), "Set stream label failed.");
if (SetStreamLabel(stream_switch, cast_node->GetName()) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set stream_label:%s to op:%s(%s) failed",
cast_node->GetName().c_str(), stream_switch->GetName().c_str(),
stream_switch->GetType().c_str());
GELOGE(FAILED, "Set stream label failed.");
return FAILED;
}
OpDescPtr switch_desc = stream_switch->GetOpDesc();
GE_CHECK_NOTNULL(switch_desc);
switch_desc->SetName(CheckDuplicateName(cond_group + "/" + STREAMSWITCH + (true_branch_flag ? "_t" : "_f")));
@@ -497,18 +543,27 @@ NodePtr SwitchToStreamSwitchPass::CreateActiveNode(const ComputeGraphPtr &graph,
GELOGI("Create StreamActive op:%s.", node_name.c_str());
OpDescPtr op_desc = MakeShared<OpDesc>(node_name, STREAMACTIVE);
if (op_desc == nullptr) {
REPORT_CALL_ERROR("E19999", "New OpDesc failed");
GELOGE(FAILED, "Create op_desc failed, StreamActive:%s.", node_name.c_str());
return nullptr;
}

NodePtr active_node = graph->AddNode(op_desc);
GE_CHK_BOOL_EXEC(active_node != nullptr, return nullptr, "Create StreamActive node failed.");
GE_CHK_BOOL_EXEC(active_node != nullptr,
REPORT_CALL_ERROR("E19999", "Add node:%s(%s) to graph:%s failed",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str());
return nullptr, "Create StreamActive node failed.");

GE_IF_BOOL_EXEC(GraphUtils::AddEdge(node->GetOutControlAnchor(), active_node->GetInControlAnchor()) != SUCCESS,
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
node->GetName().c_str(), node->GetType().c_str(),
active_node->GetName().c_str(), active_node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "add edge failed");
return nullptr);

GE_IF_BOOL_EXEC(SetSwitchBranchNodeLabel(active_node, node_name) != SUCCESS,
REPORT_CALL_ERROR("E19999", "Set switch branch node label:%s to node:%s(%s) failed",
node_name.c_str(), active_node->GetName().c_str(), active_node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "set switch branch node label failed");
return nullptr);

@@ -529,6 +584,7 @@ NodePtr SwitchToStreamSwitchPass::CreateCastOp(const ComputeGraphPtr &graph, con
GELOGI("Create cast_node: %s, input datatype:DT_BOOL, out datatype:DT_INT32", cast_name.c_str());
OpDescPtr cast_desc = MakeShared<OpDesc>(cast_name, CAST);
if (cast_desc == nullptr) {
REPORT_CALL_ERROR("E19999", "New OpDesc failed");
GELOGE(FAILED, "Create op_desc failed, Cast:%s.", cast_name.c_str());
return nullptr;
}
@@ -536,6 +592,10 @@ NodePtr SwitchToStreamSwitchPass::CreateCastOp(const ComputeGraphPtr &graph, con
AttrUtils::SetInt(cast_desc, CAST_ATTR_DSTT, (int64_t)DT_INT32) &&
AttrUtils::SetInt(cast_desc, CAST_ATTR_DST_TYPE, (int64_t)DT_INT32) &&
AttrUtils::SetBool(cast_desc, CAST_ATTR_TRUNCATE, false))) {
REPORT_CALL_ERROR("E19999", "Set Attr:%s or %s or %s or %s to op:%s(%s) failed",
CAST_ATTR_SRCT.c_str(), CAST_ATTR_DSTT.c_str(),
CAST_ATTR_DST_TYPE.c_str(), CAST_ATTR_TRUNCATE.c_str(),
cast_desc->GetName().c_str(), cast_desc->GetType().c_str());
GELOGE(FAILED, "Set CAST_ATTR_SRCT or CAST_ATTR_DSTT or CAST_ATTR_DST_TYPE or CAST_ATTR_TRUNCATE failed, node: %s.",
cast_name.c_str());
return nullptr;
@@ -543,14 +603,24 @@ NodePtr SwitchToStreamSwitchPass::CreateCastOp(const ComputeGraphPtr &graph, con

GeTensorDesc tensor_desc = cond_desc->GetOutputDesc(peer_cond_anchor->GetIdx());
tensor_desc.SetDataType(DT_BOOL);
GE_CHK_BOOL_EXEC(cast_desc->AddInputDesc(tensor_desc) == SUCCESS, return nullptr,
GE_CHK_BOOL_EXEC(cast_desc->AddInputDesc(tensor_desc) == SUCCESS,
REPORT_CALL_ERROR("E19999", "Add input desc to op:%s(%s) failed",
cast_desc->GetName().c_str(), cast_desc->GetType().c_str());
return nullptr,
"Cast_node add input desc failed.");
tensor_desc.SetDataType(DT_INT32);
GE_CHK_BOOL_EXEC(cast_desc->AddOutputDesc(tensor_desc) == SUCCESS, return nullptr,
GE_CHK_BOOL_EXEC(cast_desc->AddOutputDesc(tensor_desc) == SUCCESS,
REPORT_CALL_ERROR("E19999", "Add output desc to op:%s(%s) failed",
cast_desc->GetName().c_str(), cast_desc->GetType().c_str());
return nullptr,
"Cast_node add output desc failed.");

NodePtr cast_node = graph->AddNode(cast_desc);
GE_CHK_BOOL_EXEC(cast_node != nullptr, return nullptr, "Create cast_node failed.");
GE_CHK_BOOL_EXEC(cast_node != nullptr,
REPORT_CALL_ERROR("E19999", "Add node:%s(%s) to graph:%s failed",
cast_desc->GetName().c_str(), cast_desc->GetType().c_str(),
graph->GetName().c_str());
return nullptr, "Create cast_node failed.");
// Cast node has and only has one input
GE_CHK_STATUS(GraphUtils::AddEdge(peer_cond_anchor, cast_node->GetInDataAnchor(0)), "Cast add data edge failed.");

@@ -567,13 +637,18 @@ Status SwitchToStreamSwitchPass::AddConstNode(const ComputeGraphPtr &graph, cons
OpDescPtr op_desc = stream_switch->GetOpDesc();
GE_CHECK_NOTNULL(op_desc);
bool value = false;
GE_CHK_BOOL_EXEC(AttrUtils::GetBool(op_desc, ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG, value), return FAILED,
GE_CHK_BOOL_EXEC(AttrUtils::GetBool(op_desc, ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG, value),
REPORT_CALL_ERROR("E19999", "Get Attr:%s from op:%s(%s) failed",
ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
return FAILED,
"StreamSwitch get attr TRUE_BRANCH_STREAM failed.");

const std::string &const_node_name = op_desc->GetName() + "_Constant_" + (value ? "t" : "f");
GELOGI("Create const op: %s", const_node_name.c_str());
OpDescPtr const_op_desc = MakeShared<OpDesc>(const_node_name, CONSTANT);
if (const_op_desc == nullptr) {
REPORT_CALL_ERROR("E19999", "New OpDesc failed");
GELOGE(FAILED, "Create op_desc failed, Constant:%s.", const_node_name.c_str());
return FAILED;
}
@@ -583,15 +658,26 @@ Status SwitchToStreamSwitchPass::AddConstNode(const ComputeGraphPtr &graph, cons
GeTensorPtr const_value =
MakeShared<GeTensor>(data_desc, reinterpret_cast<uint8_t *>(&resize_value), sizeof(int32_t));
if (const_value == nullptr) {
REPORT_CALL_ERROR("E19999", "New GeTensor failed");
GELOGE(FAILED, "Create tensor failed.");
return FAILED;
}
GE_CHK_BOOL_EXEC(AttrUtils::SetTensor(const_op_desc, ATTR_NAME_WEIGHTS, const_value), return FAILED);
GE_CHK_BOOL_EXEC(const_op_desc->AddOutputDesc(data_desc) == GRAPH_SUCCESS, return FAILED,
GE_CHK_BOOL_EXEC(AttrUtils::SetTensor(const_op_desc, ATTR_NAME_WEIGHTS, const_value),
REPORT_CALL_ERROR("E19999", "Get Attr:%s from op:%s(%s) failed", ATTR_NAME_WEIGHTS.c_str(),
const_op_desc->GetName().c_str(), const_op_desc->GetType().c_str());
return FAILED);
GE_CHK_BOOL_EXEC(const_op_desc->AddOutputDesc(data_desc) == GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Add output desc to op:%s(%s) failed",
const_op_desc->GetName().c_str(), const_op_desc->GetType().c_str());
return FAILED,
"Create Const op: add output desc failed.");

NodePtr const_node = graph->AddNode(const_op_desc);
GE_CHK_BOOL_EXEC(const_node != nullptr, return FAILED, "Insert Const node failed.");
GE_CHK_BOOL_EXEC(const_node != nullptr,
REPORT_CALL_ERROR("E19999", "Add node:%s(%s) to graph:%s failed",
const_op_desc->GetName().c_str(), const_op_desc->GetType().c_str(),
graph->GetName().c_str());
return FAILED, "Insert Const node failed.");
GE_CHK_STATUS(GraphUtils::AddEdge(const_node->GetOutDataAnchor(0), stream_switch->GetInDataAnchor(1)),
"StreamSwitch node add ctl edge failed.");

@@ -613,6 +699,8 @@ Status SwitchToStreamSwitchPass::ModifySwitchInCtlEdges(const NodePtr &switch_no
OpDescPtr switch_desc = switch_node->GetOpDesc();
GE_CHECK_NOTNULL(switch_desc);
if (!AttrUtils::GetStr(switch_desc, ATTR_NAME_ORIG_NODE_NAME, orig_switch_name) || orig_switch_name.empty()) {
REPORT_CALL_ERROR("E19999", "Get Attr:%s from op:%s(%s) failed", ATTR_NAME_ORIG_NODE_NAME.c_str(),
switch_desc->GetName().c_str(), switch_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Get attr ATTR_NAME_ORIG_NODE_NAME failed, node: %s", switch_desc->GetName().c_str());
return INTERNAL_ERROR;
}
@@ -634,6 +722,8 @@ Status SwitchToStreamSwitchPass::ModifySwitchInCtlEdges(const NodePtr &switch_no

auto find_res1 = switch_node_map_.find(in_ctrl_node);
GE_IF_BOOL_EXEC(find_res1 == switch_node_map_.end(), {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) can't find in switch_node_map_, check invalid",
in_ctrl_node->GetName().c_str(), in_ctrl_node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "StreamSwitch node %s not found in switch_node_map_.", in_ctrl_node->GetName().c_str());
return INTERNAL_ERROR;
});
@@ -662,10 +752,14 @@ Status SwitchToStreamSwitchPass::ModifySwitchOutCtlEdges(const NodePtr &switch_n
stream_switch->GetName().c_str(), active_node->GetName().c_str());
auto find_res = switch_node_map_.find(switch_node);
GE_IF_BOOL_EXEC(find_res == switch_node_map_.end(), {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) can't find in switch_node_map_, check invalid",
switch_node->GetName().c_str(), switch_node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "StreamSwitch node %s not found in switch_node_map_.", switch_node->GetName().c_str());
return INTERNAL_ERROR;
});
GE_IF_BOOL_EXEC(find_res->second.empty(), {
REPORT_INNER_ERROR("E19999", "True_nodes of StreamSwitch node:%s(%s) is empty, check invalid",
switch_node->GetName().c_str(), switch_node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "true_nodes of StreamSwitch node %s is empty.", switch_node->GetName().c_str());
return INTERNAL_ERROR;
});
@@ -678,6 +772,8 @@ Status SwitchToStreamSwitchPass::ModifySwitchOutCtlEdges(const NodePtr &switch_n
std::string orig_name = op_desc->GetName();
GE_IF_BOOL_EXEC(op_desc->HasAttr(ATTR_NAME_ORIG_NODE_NAME), {
if (!AttrUtils::GetStr(op_desc, ATTR_NAME_ORIG_NODE_NAME, orig_name) || orig_name.empty()) {
REPORT_CALL_ERROR("E19999", "Get Attr:%s from op:%s(%s) failed", ATTR_NAME_ORIG_NODE_NAME.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Get attr ATTR_NAME_ORIG_NODE_NAME failed, node: %s.", op_desc->GetName().c_str());
return INTERNAL_ERROR;
}


+ 4
- 1
ge/graph/passes/transop_breadth_fusion_pass.cc View File

@@ -31,6 +31,7 @@ Status TransOpBreadthFusionPass::Run(ge::ComputeGraphPtr graph) {
// breadth fusion pass requires new topologic
Status ret_topo = graph->TopologicalSorting();
if (ret_topo != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Topological sorting for graph:%s failed", graph->GetName().c_str());
GELOGE(ret_topo, "TopologicalSorting the merged graph failed.");
return ret_topo;
}
@@ -60,7 +61,9 @@ std::string TransOpBreadthFusionPass::GetNodeId(const int anchor_index, const No
bool trans_format = false;
bool trans_shape = false;

GE_IF_BOOL_EXEC(node == nullptr || node->GetOpDesc() == nullptr, GELOGE(FAILED, "node is null"); return "");
GE_IF_BOOL_EXEC(node == nullptr || node->GetOpDesc() == nullptr,
REPORT_INNER_ERROR("E19999", "Param node or its op_desc is nullptr, check invalid");
GELOGE(FAILED, "node is null"); return "");
if (node->GetType() == CAST) {
trans_data_type = true;
} else if (node->GetType() == TRANSPOSE || node->GetType() == TRANSPOSED || node->GetType() == EXPANDDIMS) {


+ 6
- 0
ge/graph/passes/transop_depth_fusion_pass.cc View File

@@ -82,6 +82,7 @@ graphStatus TransOpDepthFusionPass::RecursiveInDepth(const InDataAnchorPtr &dst_

if (dst_in_anchor == nullptr || dst_in_anchor->GetOwnerNode() == nullptr ||
dst_in_anchor->GetOwnerNode()->GetOpDesc() == nullptr) {
REPORT_INNER_ERROR("E19999", "Param dst_in_anchor related node info has nullptr, check invalid");
GELOGE(FAILED, "parameter is null.");
return GRAPH_FAILED;
}
@@ -257,11 +258,13 @@ graphStatus TransOpDepthFusionPass::RelinkEdges(const OutDataAnchorPtr &new_out_
const OutDataAnchorPtr &old_out_anchor,
const InDataAnchorPtr &in_data_anchor) {
if (new_out_anchor == nullptr || old_out_anchor == nullptr || in_data_anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Param anchor info has nullptr, check invalid");
GELOGE(INTERNAL_ERROR, "new_out_anchor or old_out_anchor or in_data_anchor is nullptr");
return GRAPH_FAILED;
}
if (new_out_anchor->GetOwnerNode() == nullptr || old_out_anchor->GetOwnerNode() == nullptr ||
in_data_anchor->GetOwnerNode() == nullptr) {
REPORT_INNER_ERROR("E19999", "Param anchor info owner node has nullptr, check invalid");
GELOGE(INTERNAL_ERROR, "anchor's owner node is nullptr");
return GRAPH_FAILED;
}
@@ -305,11 +308,14 @@ graphStatus TransOpDepthFusionPass::RemoveNode(const NodePtr &node, const ge::Co
return GRAPH_FAILED;
}
if (GraphUtils::IsolateNode(node, {0}) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Isolate node:%s(%s) failed", node->GetName().c_str(), node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Isolate removed node: %s, type: %s failed", node->GetName().c_str(),
node->GetType().c_str());
return GRAPH_FAILED;
}
if (GraphUtils::RemoveNodeWithoutRelink(graph, node) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove node:%s(%s) without relink in graph:%s failed",
node->GetName().c_str(), node->GetType().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Remove node: %s, type: %s without relink failed", node->GetName().c_str(),
node->GetType().c_str());
return GRAPH_FAILED;


+ 11
- 0
ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc View File

@@ -99,6 +99,9 @@ Status TransOpNearbyAllreduceFusionPass::RemoveNearbyPairedTransOps(const NodePt
auto in_data_anchors = node->GetAllInDataAnchors();
auto out_data_anchors = node->GetAllOutDataAnchors();
if (in_data_anchors.size() != out_data_anchors.size()) {
REPORT_INNER_ERROR("E19999", "In data anchors size:%zu not equal to out data anchors size:%zu in node:%s(%s), "
"check invalid", in_data_anchors.size(), out_data_anchors.size(),
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "in and out data anchor size are not equal, node=%s, in_size=%zu, out_size=%zu",
node->GetName().c_str(), in_data_anchors.size(), out_data_anchors.size());
return FAILED;
@@ -143,6 +146,8 @@ Status TransOpNearbyAllreduceFusionPass::RemoveNearbyPairedTransOps(const NodePt

// delete in_node
if (IsolateAndDeleteNode(in_node, {0}) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Isolate and delete node:%s(%s) failed",
in_node->GetName().c_str(), in_node->GetType().c_str());
GELOGE(FAILED, "remove node %s failed", in_node->GetName().c_str());
return FAILED;
}
@@ -150,6 +155,8 @@ Status TransOpNearbyAllreduceFusionPass::RemoveNearbyPairedTransOps(const NodePt

// delete out_node
if (IsolateAndDeleteNode(out_node, {0}) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Isolate and delete node:%s(%s) failed",
out_node->GetName().c_str(), out_node->GetType().c_str());
GELOGE(FAILED, "remove node %s failed", out_node->GetName().c_str());
return FAILED;
}
@@ -162,9 +169,13 @@ Status TransOpNearbyAllreduceFusionPass::RemoveNearbyPairedTransOps(const NodePt
auto input_desc = in_node->GetOpDesc()->GetInputDesc(0);
auto output_desc = out_node->GetOpDesc()->GetOutputDesc(0);
if (node->GetOpDesc()->UpdateInputDesc(static_cast<uint32_t>(i), input_desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update input:%zu desc in op:%s(%s) failed",
i, node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "UpdateInputDesc fail.");
}
if (node->GetOpDesc()->UpdateOutputDesc(static_cast<uint32_t>(i), output_desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update output:%zu desc in op:%s(%s) failed",
i, node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "UpdateOutputDesc");
}
GELOGI("successfully remove paired transop (%s and %s) for node %s",


+ 24
- 0
ge/graph/passes/transop_symmetry_elimination_pass.cc View File

@@ -172,6 +172,12 @@ Status TransOpSymmetryEliminationPass::EliminateTransOp(NodePtr &src_node, const
// 1.Unlink T1->T2
auto ret = src_out_anchor->Unlink(dst_in_anchor);
if (ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999",
"Op:%s(%s) out index:%d unlink from op:%s(%s) in index:%d failed",
src_out_anchor->GetOwnerNode()->GetName().c_str(),
src_out_anchor->GetOwnerNode()->GetType().c_str(), src_out_anchor->GetIdx(),
dst_in_anchor->GetOwnerNode()->GetName().c_str(),
dst_in_anchor->GetOwnerNode()->GetType().c_str(), dst_in_anchor->GetIdx());
GELOGE(FAILED, "Unlink data anchor from %s to %s.", src_node->GetName().c_str(), dst_node->GetName().c_str());
return ret;
}
@@ -183,6 +189,11 @@ Status TransOpSymmetryEliminationPass::EliminateTransOp(NodePtr &src_node, const
auto pre_normal_node = in_anchor->GetPeerOutAnchor()->GetOwnerNode();
ret = GraphUtils::AddEdge(in_anchor->GetPeerOutAnchor(), dst_in_anchor);
if (ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%d) failed",
pre_normal_node->GetName().c_str(), pre_normal_node->GetType().c_str(),
in_anchor->GetPeerOutAnchor()->GetIdx(),
dst_in_anchor->GetOwnerNode()->GetName().c_str(),
dst_in_anchor->GetOwnerNode()->GetType().c_str(), dst_in_anchor->GetIdx());
GELOGE(FAILED, "Add data edge from %s to %s failed.", pre_normal_node->GetName().c_str(),
dst_node->GetName().c_str());
return ret;
@@ -190,6 +201,9 @@ Status TransOpSymmetryEliminationPass::EliminateTransOp(NodePtr &src_node, const
// 3.Copy in-control/data-in-control from T1->T2
ret = GraphUtils::CopyInCtrlEdges(src_node, dst_node);
if (ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Copy in control edge from node:%s(%s) to node:%s(%s) failed",
src_node->GetName().c_str(), src_node->GetType().c_str(),
dst_node->GetName().c_str(), dst_node->GetType().c_str());
GELOGE(FAILED, "Copy control edge from %s to %s failed.", src_node->GetName().c_str(), dst_node->GetName().c_str());
return ret;
}
@@ -198,6 +212,9 @@ Status TransOpSymmetryEliminationPass::EliminateTransOp(NodePtr &src_node, const
if (in_node->GetName() == pre_normal_node->GetName()) { continue; }
ret = GraphUtils::AddEdge(in_node->GetOutControlAnchor(), dst_node->GetInControlAnchor());
if (ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
in_node->GetName().c_str(), in_node->GetType().c_str(),
dst_node->GetName().c_str(), dst_node->GetType().c_str());
GELOGE(FAILED, "Add control edge from %s to %s failed.", in_node->GetName().c_str(), dst_node->GetName().c_str());
return ret;
}
@@ -205,6 +222,8 @@ Status TransOpSymmetryEliminationPass::EliminateTransOp(NodePtr &src_node, const
// 5.IsolateAndDelete T2, A will link to B automatically, and all control edge will also relink.
ret = IsolateAndDeleteNode(dst_node, {0});
if (ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Isolate and delete node:%s(%s) failed",
dst_node->GetName().c_str(), dst_node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Isolate removed node: %s, type: %s failed", dst_node->GetName().c_str(),
dst_node->GetType().c_str());
return ret;
@@ -223,6 +242,9 @@ Status TransOpSymmetryEliminationPass::RemoveTransOpWithoutOutput(NodePtr &pre_n
// 6.1 Copy out control to pre normal node
Status ret = GraphUtils::CopyOutCtrlEdges(trans_node, pre_node);
if (ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Copy out control edge from node:%s(%s) to node:%s(%s) failed",
trans_node->GetName().c_str(), trans_node->GetType().c_str(),
pre_node->GetName().c_str(), pre_node->GetType().c_str());
GELOGE(FAILED, "Copy control edge from %s to %s failed.", trans_node->GetName().c_str(),
pre_node->GetName().c_str());
return ret;
@@ -230,6 +252,8 @@ Status TransOpSymmetryEliminationPass::RemoveTransOpWithoutOutput(NodePtr &pre_n
// 6.2 Isolate and delete T1
ret = IsolateAndDeleteNode(trans_node, {});
if (ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Isolate and delete node:%s(%s) failed",
trans_node->GetName().c_str(), trans_node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Isolate removed node: %s, type: %s failed", trans_node->GetName().c_str(),
trans_node->GetType().c_str());
return ret;


+ 107
- 7
ge/graph/passes/transop_without_reshape_fusion_pass.cc View File

@@ -63,7 +63,10 @@ void TransOpWithoutReshapeFusionPass::SetRemainNode(
continue;
}
GELOGI("SetRemainNode node is %s", op_desc->GetName().c_str());
GE_IF_BOOL_EXEC(!op_desc->SetExtAttr(kRemainNode, true), GELOGE(INTERNAL_ERROR, "set ext attr failed"); return);
GE_IF_BOOL_EXEC(!op_desc->SetExtAttr(kRemainNode, true),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", kRemainNode,
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "set ext attr failed"); return);
}
}

@@ -74,17 +77,29 @@ bool TransOpWithoutReshapeFusionPass::FormatContinuousCheck(const OutDataAnchorP
return false;
}
auto in_node = in_anchor->GetOwnerNode();
GE_IF_BOOL_EXEC(in_node == nullptr, GELOGE(INTERNAL_ERROR, "in_node is null"); return false);
GE_IF_BOOL_EXEC(in_node == nullptr,
REPORT_INNER_ERROR("E19999", "Param in_anchor's owner node is nullptr, check invalid");
GELOGE(INTERNAL_ERROR, "in_node is null"); return false);
auto in_op = in_node->GetOpDesc();
auto out_owner_node = out_anchor->GetOwnerNode();
GE_IF_BOOL_EXEC(out_owner_node == nullptr, GELOGE(INTERNAL_ERROR, "out_owner_node is null"); return false);
GE_IF_BOOL_EXEC(out_owner_node == nullptr,
REPORT_INNER_ERROR("E19999", "Param out_anchor's owner node is nullptr, check invalid");
GELOGE(INTERNAL_ERROR, "out_owner_node is null"); return false);
auto out_op = out_owner_node->GetOpDesc();
GE_IF_BOOL_EXEC(in_op == nullptr, GELOGE(INTERNAL_ERROR, "in_op is null"); return false);
GE_IF_BOOL_EXEC(out_op == nullptr, GELOGE(INTERNAL_ERROR, "out_op is null"); return false);
GE_IF_BOOL_EXEC(in_op == nullptr,
REPORT_INNER_ERROR("E19999", "Param in_anchor's owner op_desc is nullptr, check invalid");
GELOGE(INTERNAL_ERROR, "in_op is null"); return false);
GE_IF_BOOL_EXEC(out_op == nullptr,
REPORT_INNER_ERROR("E19999", "Param out_anchor's owner op_desc is nullptr, check invalid");
GELOGE(INTERNAL_ERROR, "out_op is null"); return false);
auto in_op_desc = in_op->GetInputDescPtr(in_anchor->GetIdx());
auto out_op_desc = out_op->GetOutputDescPtr(out_anchor->GetIdx());
GE_IF_BOOL_EXEC(in_op_desc == nullptr, GELOGE(INTERNAL_ERROR, "in_op_desc is null"); return false);
GE_IF_BOOL_EXEC(out_op_desc == nullptr, GELOGE(INTERNAL_ERROR, "out_op_desc is null"); return false);
GE_IF_BOOL_EXEC(in_op_desc == nullptr,
REPORT_INNER_ERROR("E19999", "Param in_anchor corresponding tensor is nullptr, check invalid");
GELOGE(INTERNAL_ERROR, "in_op_desc is null"); return false);
GE_IF_BOOL_EXEC(out_op_desc == nullptr,
REPORT_INNER_ERROR("E19999", "Param out_anchor corresponding tensor is nullptr, check invalid");
GELOGE(INTERNAL_ERROR, "out_op_desc is null"); return false);
if (!ShapeEqualCheck(in_op_desc->GetShape(), out_op_desc->GetShape())) {
return false;
}
@@ -357,6 +372,9 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkSubGraphControlEdges(
GELOGI("add control edge.src:%s, dst:%s", out_owner_node->GetName().c_str(), in_owner_node->GetName().c_str());
if (GraphUtils::AddEdge(out_owner_node->GetOutControlAnchor(), in_owner_node->GetInControlAnchor()) !=
GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
out_owner_node->GetName().c_str(), out_owner_node->GetType().c_str(),
in_owner_node->GetName().c_str(), in_owner_node->GetType().c_str());
return GRAPH_FAILED;
}
}
@@ -365,6 +383,9 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkSubGraphControlEdges(
GELOGI("add out data 2 in contorl edge.src:%s, dst:%s", out_owner_node->GetName().c_str(),
in_owner_node->GetName().c_str());
if (GraphUtils::AddEdge(out_anchor, in_owner_node->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
out_owner_node->GetName().c_str(), out_owner_node->GetType().c_str(),
in_owner_node->GetName().c_str(), in_owner_node->GetType().c_str());
return GRAPH_FAILED;
}
}
@@ -392,6 +413,10 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkControlEdgesWhenDescNotChange
GELOGI("add control edge.src:%s, dst:%s, dst idx:%d", out_owner_node->GetName().c_str(),
peer_in_anchor->GetOwnerNode()->GetName().c_str(), peer_in_anchor->GetIdx());
if (GraphUtils::AddEdge(out_owner_node->GetOutControlAnchor(), peer_in_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
out_owner_node->GetName().c_str(), out_owner_node->GetType().c_str(),
peer_in_anchor->GetOwnerNode()->GetName().c_str(),
peer_in_anchor->GetOwnerNode()->GetType().c_str());
return GRAPH_FAILED;
}
}
@@ -401,6 +426,10 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkControlEdgesWhenDescNotChange
GELOGI("add control edge.src:%s, src idx:%d, dst:%s", peer_out_anchor->GetOwnerNode()->GetName().c_str(),
peer_out_anchor->GetIdx(), in_owner_node->GetName().c_str());
if (GraphUtils::AddEdge(peer_out_anchor, in_owner_node->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
peer_out_anchor->GetOwnerNode()->GetName().c_str(),
peer_out_anchor->GetOwnerNode()->GetType().c_str(),
in_owner_node->GetName().c_str(), in_owner_node->GetType().c_str());
return GRAPH_FAILED;
}
}
@@ -410,6 +439,10 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkControlEdgesWhenDescNotChange
GELOGI("add out control 2 in data edge.src:%s, dst:%s, dst idx:%d", out_owner_node->GetName().c_str(),
peer_in_anchor->GetOwnerNode()->GetName().c_str(), peer_in_anchor->GetIdx());
if (GraphUtils::AddEdge(out_owner_node->GetOutControlAnchor(), peer_in_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
out_owner_node->GetName().c_str(), out_owner_node->GetType().c_str(),
peer_in_anchor->GetOwnerNode()->GetName().c_str(),
peer_in_anchor->GetOwnerNode()->GetType().c_str());
return GRAPH_FAILED;
}
}
@@ -419,6 +452,10 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkControlEdgesWhenDescNotChange
GELOGI("add out data 2 in control edge.src:%s, dst:%s, dst idx:%d", out_owner_node->GetName().c_str(),
peer_in_anchor->GetOwnerNode()->GetName().c_str(), peer_in_anchor->GetIdx());
if (GraphUtils::AddEdge(out_anchor, peer_in_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
out_owner_node->GetName().c_str(), out_owner_node->GetType().c_str(),
peer_in_anchor->GetOwnerNode()->GetName().c_str(),
peer_in_anchor->GetOwnerNode()->GetType().c_str());
return GRAPH_FAILED;
}
}
@@ -443,6 +480,9 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkNodesWhenDescNotChanged(
GELOGI("relink node.src node:%s, src idx:%d, dst node:%s, dst idx:%d", out_owner_node->GetName().c_str(),
out_anchor->GetIdx(), in_owner_node->GetName().c_str(), in_anchor->GetIdx());
if (GraphUtils::AddEdge(out_anchor, in_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%d) failed",
out_owner_node->GetName().c_str(), out_owner_node->GetType().c_str(), out_anchor->GetIdx(),
in_owner_node->GetName().c_str(), in_owner_node->GetType().c_str(), in_anchor->GetIdx());
GELOGE(GRAPH_FAILED, "add edge failed!src:%s, src idx:%d, dst:%s, dst idx:%d", out_owner_node->GetName().c_str(),
out_anchor->GetIdx(), in_owner_node->GetName().c_str(), in_anchor->GetIdx());
return GRAPH_FAILED;
@@ -466,16 +506,21 @@ OpDescPtr TransOpWithoutReshapeFusionPass::GetFormatTransferOp(const GeTensorDes
format_transfer_op_name << "fusion_format_transfer_" << fusion_format_transfer_op_count;
OpDescPtr format_transfer_op = MakeShared<OpDesc>(format_transfer_op_name.str().c_str(), TRANSDATA);
if (format_transfer_op == nullptr) {
REPORT_CALL_ERROR("E19999", "New GeTensor failed");
GELOGE(INTERNAL_ERROR, "new format transfer op failed!");
return nullptr;
}

GE_IF_BOOL_EXEC(!AttrUtils::SetInt(format_transfer_op, ATTR_NAME_INPUT_FORMAT,
static_cast<int64_t>(format_trans_input_desc.GetFormat())),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_NAME_INPUT_FORMAT.c_str(),
format_transfer_op->GetName().c_str(), format_transfer_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "set ATTR_NAME_INPUT_FORMAT failed");
return nullptr);
GE_IF_BOOL_EXEC(!AttrUtils::SetInt(format_transfer_op, ATTR_NAME_OUTPUT_FORMAT,
static_cast<int64_t>(format_trans_output_desc.GetFormat())),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_NAME_OUTPUT_FORMAT.c_str(),
format_transfer_op->GetName().c_str(), format_transfer_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "set ATTR_NAME_OUTPUT_FORMAT failed");
return nullptr);

@@ -483,22 +528,32 @@ OpDescPtr TransOpWithoutReshapeFusionPass::GetFormatTransferOp(const GeTensorDes
string dst_format = TypeUtils::FormatToSerialString(format_trans_output_desc.GetFormat());

GE_IF_BOOL_EXEC(!AttrUtils::SetStr(format_transfer_op, kAttrNameSrcFormat, src_format),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", kAttrNameSrcFormat,
format_transfer_op->GetName().c_str(), format_transfer_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "set kAttrNameSrcFormat failed");
return nullptr);

GE_IF_BOOL_EXEC(!AttrUtils::SetStr(format_transfer_op, kAttrNameDstFormat, dst_format),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", kAttrNameDstFormat,
format_transfer_op->GetName().c_str(), format_transfer_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "set kAttrNameDstFormat failed");
return nullptr);

GE_IF_BOOL_EXEC(format_transfer_op->AddInputDesc(format_trans_input_desc) != GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Add input desc to op:%s(%s) failed",
format_transfer_op->GetName().c_str(), format_transfer_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "add input desc failed");
return nullptr);

GE_IF_BOOL_EXEC(format_transfer_op->AddOutputDesc(format_trans_output_desc) != GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Add ouput desc to op:%s(%s) failed",
format_transfer_op->GetName().c_str(), format_transfer_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "add output desc failed");
return nullptr);

GE_IF_BOOL_EXEC(!ge::AttrUtils::SetBool(format_transfer_op, ATTR_NEED_COMPILE, true),
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_NEED_COMPILE.c_str(),
format_transfer_op->GetName().c_str(), format_transfer_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "set ext attr failed");
return nullptr);
return format_transfer_op;
@@ -515,6 +570,7 @@ OpDescPtr TransOpWithoutReshapeFusionPass::GetCastOp(const GeTensorDesc &cast_in
auto cast_op = ge::OpDescUtils::GetOpDescFromOperator(node_op);
node_op.BreakConnect();
if (cast_op == nullptr) {
REPORT_CALL_ERROR("E19999", "Create operator:%s(%s) failed", cast_op_name.str().c_str(), CAST);
GELOGE(INTERNAL_ERROR, "new cast op failed!");
return nullptr;
}
@@ -522,29 +578,41 @@ OpDescPtr TransOpWithoutReshapeFusionPass::GetCastOp(const GeTensorDesc &cast_in
const int default_output_index = 0;
if (cast_op->GetInputsSize() == 0) {
GE_IF_BOOL_EXEC(cast_op->AddInputDesc(cast_input_desc) != GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Add input desc to op:%s(%s) failed",
cast_op->GetName().c_str(), cast_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "add input desc failed");
return nullptr);
} else {
GE_IF_BOOL_EXEC(cast_op->UpdateInputDesc(default_input_index, cast_input_desc) != GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Update input:%d desc of op:%s(%s) failed", default_input_index,
cast_op->GetName().c_str(), cast_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "update input desc failed");
return nullptr);
}

if (cast_op->GetOutputsSize() == 0) {
GE_IF_BOOL_EXEC(cast_op->AddOutputDesc(cast_output_desc) != GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Add output desc to op:%s(%s) failed",
cast_op->GetName().c_str(), cast_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "add output desc failed");
return nullptr);
} else {
GE_IF_BOOL_EXEC(cast_op->UpdateOutputDesc(default_output_index, cast_output_desc) != GRAPH_SUCCESS,
REPORT_CALL_ERROR("E19999", "Update output:%d desc of op:%s(%s) failed", default_output_index,
cast_op->GetName().c_str(), cast_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "update output desc failed");
return nullptr);
}

if (!AttrUtils::SetInt(cast_op, CAST_ATTR_DST_TYPE, static_cast<int64_t>(cast_output_desc.GetDataType()))) {
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", CAST_ATTR_DST_TYPE.c_str(),
cast_op->GetName().c_str(), cast_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "set dst_type attr failed");
return nullptr;
}
if (!AttrUtils::SetBool(cast_op, ATTR_NEED_COMPILE, true)) {
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_NEED_COMPILE.c_str(),
cast_op->GetName().c_str(), cast_op->GetType().c_str());
GELOGE(INTERNAL_ERROR, "set need_compile attr failed");
return nullptr;
}
@@ -879,6 +947,8 @@ graphStatus TransOpWithoutReshapeFusionPass::AddTransNode(const ComputeGraphPtr

trans_node = graph->AddNode(transop);
if (trans_node == nullptr) {
REPORT_CALL_ERROR("E19999", "Add node:%s(%s) to graph:%s failed",
transop->GetName().c_str(), transop->GetType().c_str(), graph->GetName().c_str());
GELOGE(GRAPH_FAILED, "add node failed!");
return GRAPH_FAILED;
}
@@ -945,6 +1015,9 @@ graphStatus TransOpWithoutReshapeFusionPass::InsertNewTransOp(const ComputeGraph
GELOGI("add edge.src:%s, src idx:%d, dst:%s", out_anchor->GetOwnerNode()->GetName().c_str(), out_anchor->GetIdx(),
new_trans_nodes.front()->GetName().c_str());
if (GraphUtils::AddEdge(out_anchor, new_trans_nodes.front()->GetInAnchor(0)) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:%d) and op:%s(%s)(index:0) failed",
out_owner_node->GetName().c_str(), out_owner_node->GetType().c_str(), out_anchor->GetIdx(),
new_trans_nodes.front()->GetName().c_str(), new_trans_nodes.front()->GetType().c_str());
return GRAPH_FAILED;
} else {
auto old_peer_in_anchor = begin_out.second;
@@ -957,6 +1030,9 @@ graphStatus TransOpWithoutReshapeFusionPass::InsertNewTransOp(const ComputeGraph
new_trans_nodes.back()->GetName().c_str());
if (GraphUtils::AddEdge(new_trans_nodes.front()->GetOutAnchor(0), new_trans_nodes.back()->GetInAnchor(0)) !=
GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:0) and op:%s(%s)(index:0) failed",
new_trans_nodes.front()->GetName().c_str(), new_trans_nodes.front()->GetType().c_str(),
new_trans_nodes.back()->GetName().c_str(), new_trans_nodes.back()->GetType().c_str());
return GRAPH_FAILED;
} else {
auto old_peer_out_anchor = end_in.first;
@@ -967,6 +1043,9 @@ graphStatus TransOpWithoutReshapeFusionPass::InsertNewTransOp(const ComputeGraph
GELOGI("add edge.src:%s, dst:%s, dst idx:%d", new_trans_nodes.back()->GetName().c_str(),
in_anchor->GetOwnerNode()->GetName().c_str(), in_anchor->GetIdx());
if (GraphUtils::AddEdge(new_trans_nodes.back()->GetOutAnchor(0), in_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:0) and op:%s(%s)(index:%d) failed",
new_trans_nodes.front()->GetName().c_str(), new_trans_nodes.front()->GetType().c_str(),
in_owner_node->GetName().c_str(), in_owner_node->GetType().c_str(), in_anchor->GetIdx());
return GRAPH_FAILED;
}

@@ -977,6 +1056,7 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkControlEdge(const int index,
const vector<NodePtr> &new_trans_nodes) {
GE_CHECK_NOTNULL(out_anchor);
if (new_trans_nodes.front() == nullptr || new_trans_nodes.back() == nullptr) {
REPORT_INNER_ERROR("E19999", "Param new_trans_nodes front or back is nullptr, check invalid");
return GRAPH_FAILED;
}
if (sub_graph_has_control_edge_[index]) {
@@ -984,6 +1064,9 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkControlEdge(const int index,
new_trans_nodes.front()->GetName().c_str());
if (GraphUtils::AddEdge(out_anchor->GetOwnerNode()->GetOutControlAnchor(),
new_trans_nodes.front()->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
out_anchor->GetOwnerNode()->GetName().c_str(), out_anchor->GetOwnerNode()->GetType().c_str(),
new_trans_nodes.front()->GetName().c_str(), new_trans_nodes.front()->GetType().c_str());
return GRAPH_FAILED;
}
}
@@ -993,6 +1076,10 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkControlEdge(const int index,
GELOGI("add control edge.src:%s, dst:%s", new_trans_nodes.back()->GetName().c_str(),
peer_in_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::AddEdge(new_trans_nodes.back()->GetOutControlAnchor(), peer_in_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
new_trans_nodes.back()->GetName().c_str(), new_trans_nodes.back()->GetType().c_str(),
peer_in_anchor->GetOwnerNode()->GetName().c_str(),
peer_in_anchor->GetOwnerNode()->GetType().c_str());
return GRAPH_FAILED;
}
}
@@ -1002,6 +1089,10 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkControlEdge(const int index,
GELOGI("add control edge.src:%s, dst:%s", peer_out_anchor->GetOwnerNode()->GetName().c_str(),
new_trans_nodes.front()->GetName().c_str());
if (GraphUtils::AddEdge(peer_out_anchor, new_trans_nodes.front()->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
peer_out_anchor->GetOwnerNode()->GetName().c_str(),
peer_out_anchor->GetOwnerNode()->GetType().c_str(),
new_trans_nodes.front()->GetName().c_str(), new_trans_nodes.front()->GetType().c_str());
return GRAPH_FAILED;
}
}
@@ -1011,6 +1102,10 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkControlEdge(const int index,
GELOGI("add control edge.src:%s, dst:%s", new_trans_nodes.back()->GetName().c_str(),
peer_in_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::AddEdge(new_trans_nodes.back()->GetOutControlAnchor(), peer_in_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
new_trans_nodes.back()->GetName().c_str(), new_trans_nodes.back()->GetType().c_str(),
peer_in_anchor->GetOwnerNode()->GetName().c_str(),
peer_in_anchor->GetOwnerNode()->GetType().c_str());
return GRAPH_FAILED;
}
}
@@ -1020,6 +1115,10 @@ graphStatus TransOpWithoutReshapeFusionPass::RelinkControlEdge(const int index,
GELOGI("add control edge.src:%s, dst:%s", new_trans_nodes.back()->GetName().c_str(),
peer_in_anchor->GetOwnerNode()->GetName().c_str());
if (GraphUtils::AddEdge(new_trans_nodes.back()->GetOutDataAnchor(0), peer_in_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:0) and op:%s(%s)(index:%d) failed",
new_trans_nodes.back()->GetName().c_str(), new_trans_nodes.back()->GetType().c_str(),
peer_in_anchor->GetOwnerNode()->GetName().c_str(),
peer_in_anchor->GetOwnerNode()->GetType().c_str(), peer_in_anchor->GetIdx());
return GRAPH_FAILED;
}
}
@@ -1081,6 +1180,7 @@ graphStatus TransOpWithoutReshapeFusionPass::GetSubGraphsBetweenNormalNode(
vector<std::pair<OutDataAnchorPtr, InDataAnchorPtr>> &nodes_list) {
graphStatus ret = GRAPH_SUCCESS;
if (out_anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Param out_anchor is nullptr, check invalid");
return GRAPH_FAILED;
}



+ 11
- 0
ge/graph/passes/transpose_transdata_pass.cc View File

@@ -34,11 +34,13 @@ const char *const kAttrNameSrcFormat = "src_format";
namespace ge {
Status TransposeTransDataPass::Run(NodePtr &node) {
if (node == nullptr) {
REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid");
GELOGE(PARAM_INVALID, "param [node] must not be null.");
return PARAM_INVALID;
}
auto op_desc = node->GetOpDesc();
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Param node's op_desc is nullptr, check invalid");
GELOGE(PARAM_INVALID, "OpDesc of param [node] must not be null.");
return PARAM_INVALID;
}
@@ -77,6 +79,7 @@ Status TransposeTransDataPass::Run(NodePtr &node) {
GE_CHECK_NOTNULL(out_node);
OpDescPtr out_op_desc = out_node->GetOpDesc();
if (out_op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "OpDesc in node is nullptr, check invalid");
GELOGE(FAILED, "OpDesc of out data node of [%s] must not be null.", node->GetName().c_str());
return FAILED;
}
@@ -111,6 +114,10 @@ Status TransposeTransDataPass::CheckOneInAndOneOutDataAnchor(NodePtr &node) cons
// Trans op has one input data node, maybe has N output data nodes
uint32_t in_data_node_nums = node->GetInDataNodes().size();
if (in_data_anchor_nums != 1 || out_data_anchor_nums != 1 || in_data_node_nums != 1) {
REPORT_INNER_ERROR("E19999", "In data anchor num:%u, out data anchor num:%u, in data node num:%u of node:%s(%s) "
"must be all equal to 1, check invalid",
in_data_anchor_nums, out_data_anchor_nums, in_data_node_nums,
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "[%s] %s has %u in %u out data anchor, has %u in data node.", node->GetType().c_str(),
node->GetName().c_str(), in_data_anchor_nums, out_data_anchor_nums, in_data_node_nums);
return FAILED;
@@ -122,6 +129,8 @@ Status TransposeTransDataPass::RemoveTranspose(NodePtr &node) {
GE_CHECK_NOTNULL(node);
ComputeGraphPtr graph = node->GetOwnerComputeGraph();
if (graph == nullptr) {
REPORT_INNER_ERROR("E19999", "Owner graph of node:%s(%s) is nullptr, check invalid",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "[%s] The owner graph must not be null.", node->GetName().c_str());
return FAILED;
}
@@ -146,6 +155,8 @@ Status TransposeTransDataPass::RemoveTranspose(NodePtr &node) {
}
AddNodeDeleted(node);
if (GraphUtils::RemoveNodeWithoutRelink(graph, node) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove node:%s(%s) without relink in graph:%s failed",
node->GetName().c_str(), node->GetType().c_str(), graph->GetName().c_str());
GELOGE(FAILED, "[%s] RemoveNodeWithoutRelink failed.", node->GetName().c_str());
return FAILED;
}


+ 6
- 0
ge/graph/passes/unused_args_clean_pass.cc View File

@@ -101,6 +101,8 @@ Status UnusedArgsCleanPass::ClassifyDataNodes(const ComputeGraphPtr &graph, cons
for (const auto &name : func_desc->GetSubgraphInstanceNames()) {
const auto &subgraph = graph->GetSubgraph(name);
if (subgraph == nullptr) {
REPORT_CALL_ERROR("E19999", "Get subgraph from graph:%s by name:%s failed",
graph->GetName().c_str(), name.c_str());
GELOGE(GE_GRAPH_EMPTY_SUBGRAPH, "Subgraph not found, name: %s", name.c_str());
return GE_GRAPH_EMPTY_SUBGRAPH;
}
@@ -113,6 +115,8 @@ Status UnusedArgsCleanPass::ClassifyDataNodes(const ComputeGraphPtr &graph, cons

uint32_t parent_index = 0;
if (!AttrUtils::GetInt(data->GetOpDesc(), ATTR_NAME_PARENT_NODE_INDEX, parent_index)) {
REPORT_CALL_ERROR("E19999", "Get Attr:%s from op:%s(%s) failed", ATTR_NAME_PARENT_NODE_INDEX.c_str(),
data->GetName().c_str(), data->GetType().c_str());
GELOGE(FAILED, "Parent index not found, name: %s", data->GetName().c_str());
return FAILED;
}
@@ -150,6 +154,8 @@ Status UnusedArgsCleanPass::UpdateInputTensor(const map<ComputeGraphPtr, map<uin
const auto data = it->second;

if (!AttrUtils::SetInt(data->GetOpDesc(), ATTR_NAME_PARENT_NODE_INDEX, update_index)) {
REPORT_CALL_ERROR("E19999", "Get Attr:%s from op:%s(%s) failed", ATTR_NAME_PARENT_NODE_INDEX.c_str(),
data->GetName().c_str(), data->GetType().c_str());
GELOGE(FAILED, "Set parent index failed, name: %s", data->GetName().c_str());
return FAILED;
}


+ 2
- 0
ge/graph/passes/unused_const_pass.cc View File

@@ -27,10 +27,12 @@ namespace ge {
///
Status UnusedConstPass::Run(NodePtr &node) {
if (node == nullptr) {
REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid");
GELOGE(FAILED, "parameter is null.");
return FAILED;
}
if (node->GetOpDesc() == nullptr) {
REPORT_INNER_ERROR("E19999", "Param node's op_desc is nullptr, check invalid");
GELOGE(PARAM_INVALID, "param [opDesc] must not be null.");
return PARAM_INVALID;
}


+ 36
- 0
ge/graph/passes/var_is_initialized_op_pass.cc View File

@@ -61,6 +61,8 @@ Status VarIsInitializedOpPass::CheckSrcNode(const NodePtr &node, bool &inited) c
GE_CHECK_NOTNULL(node);
auto input_nodes = node->GetInDataNodes();
if (input_nodes.size() != kVarIsInitializedIOCnt) {
REPORT_INNER_ERROR("E19999", "In data node num:%zu of node:%s(%s) not equal to %d, check invalid",
input_nodes.size(), node->GetName().c_str(), node->GetType().c_str(), kVarIsInitializedIOCnt);
GELOGE(FAILED,
"[%s] Node input data nodes size [%zu] is not equal 1.",
node->GetName().c_str(),
@@ -73,6 +75,9 @@ Status VarIsInitializedOpPass::CheckSrcNode(const NodePtr &node, bool &inited) c
auto input_node_name = input_node->GetName();
auto input_node_type = input_node->GetType();
if (input_node_type != VARIABLE) {
REPORT_INNER_ERROR("E19999", "Index:%d In data node of node:%s(%s), type:%s not %s, check invalid",
kVarIsInitVarInputIndex, node->GetName().c_str(), node->GetType().c_str(),
input_node_type.c_str(), VARIABLE);
GELOGE(FAILED, "[%s] Src node %s is not Variable, is %s.", node->GetName().c_str(), input_node_name.c_str(),
input_node_type.c_str());
return FAILED;
@@ -95,6 +100,7 @@ Status VarIsInitializedOpPass::CreateConstant(NodePtr &node, OpDescPtr &op_desc,
// 1. create Constant OpDesc
op_desc = MakeShared<OpDesc>(node->GetName().c_str(), CONSTANT);
if (op_desc == nullptr) {
REPORT_CALL_ERROR("E19999", "New OpDesc failed");
GELOGE(FAILED, "[%s] Make shared of Constant op desc failed.", node->GetName().c_str());
return FAILED;
}
@@ -102,6 +108,7 @@ Status VarIsInitializedOpPass::CreateConstant(NodePtr &node, OpDescPtr &op_desc,
// 2. get OpDesc of VarIsInitializedOp
OpDescPtr original_op_desc = node->GetOpDesc();
if (original_op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "OpDesc in node is nullptr, check invalid");
GELOGE(FAILED, "[%s] Op desc must not be null.", node->GetName().c_str());
return FAILED;
}
@@ -111,10 +118,13 @@ Status VarIsInitializedOpPass::CreateConstant(NodePtr &node, OpDescPtr &op_desc,
bool val = inited;
GeTensorPtr const_tensor_ptr = MakeShared<GeTensor>(original_desc, reinterpret_cast<uint8_t *>(&val), sizeof(bool));
if (const_tensor_ptr == nullptr) {
REPORT_CALL_ERROR("E19999", "New GeTensor failed");
GELOGE(FAILED, "[%s] Make shared of Constant tensor failed.", node->GetName().c_str());
return FAILED;
}
if (!AttrUtils::SetTensor(op_desc, ATTR_NAME_WEIGHTS, const_tensor_ptr)) {
REPORT_CALL_ERROR("E19999", "Set Attr:%s to op:%s(%s) failed", ATTR_NAME_WEIGHTS.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "get ATTR_NAME_WEIGHTS failed");
return FAILED;
}
@@ -131,6 +141,9 @@ Status VarIsInitializedOpPass::ProcessInAnchor(NodePtr &node, NodePtr &new_node)
auto out_anchors = node->GetAllOutDataAnchors();
if ((in_anchors.size() != kVarIsInitializedIOCnt) ||
(out_anchors.size() != kVarIsInitializedIOCnt)) {
REPORT_INNER_ERROR("E19999", "In data anchor num:%zu and out data anchor num:%zu of node:%s(%s), "
"must botch equal to %d, check invalid", in_anchors.size(), out_anchors.size(),
node->GetName().c_str(), node->GetType().c_str(), kVarIsInitializedIOCnt);
GELOGE(FAILED,
"[%s] Node input/output data anchors"
" size [%lu][%lu] is not all equal 1.",
@@ -144,22 +157,36 @@ Status VarIsInitializedOpPass::ProcessInAnchor(NodePtr &node, NodePtr &new_node)
auto peer_out_anchor = in_anchor->GetPeerOutAnchor();
GE_CHECK_NOTNULL(peer_out_anchor);
if (GraphUtils::RemoveEdge(in_anchor, peer_out_anchor) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove edge between op:%s(%s)(index:%d) and op:%s(%s)(index:%d) failed",
in_anchor->GetOwnerNode()->GetName().c_str(), in_anchor->GetOwnerNode()->GetType().c_str(),
in_anchor->GetIdx(),
peer_out_anchor->GetOwnerNode()->GetName().c_str(),
peer_out_anchor->GetOwnerNode()->GetType().c_str(), peer_out_anchor->GetIdx());
GELOGE(FAILED, "[%s] Remove in data edge failed.", node->GetName().c_str());
return FAILED;
}
auto src_node = peer_out_anchor->GetOwnerNode();
if (GraphUtils::AddEdge(src_node->GetOutControlAnchor(), new_node->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add control edge between op:%s(%s) and op:%s(%s) failed",
src_node->GetName().c_str(), src_node->GetType().c_str(),
new_node->GetName().c_str(), new_node->GetType().c_str());
GELOGE(FAILED, "Failed to link control edges from var %s to new const %s",
src_node->GetName().c_str(), new_node->GetName().c_str());
return FAILED;
}

if (GraphUtils::MoveInCtrlEdges(node, new_node) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Move in control edge from node:%s(%s) to node:%s(%s) failed",
node->GetName().c_str(), node->GetType().c_str(),
new_node->GetName().c_str(), new_node->GetType().c_str());
GELOGE(FAILED, "Failed to move in ctrl edges from %s to new const", node->GetName().c_str());
return FAILED;
}

if (GraphUtils::MoveOutCtrlEdges(node, new_node) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Move out control edge from node:%s(%s) to node:%s(%s) failed",
node->GetName().c_str(), node->GetType().c_str(),
new_node->GetName().c_str(), new_node->GetType().c_str());
GELOGE(FAILED, "Failed to move out ctrl edges from %s to new const", node->GetName().c_str());
return FAILED;
}
@@ -177,6 +204,9 @@ Status VarIsInitializedOpPass::ChangeNodeToConstant(NodePtr &node, bool inited)

NodePtr const_node = graph->AddNodeFront(constant_op_desc);
if (const_node == nullptr) {
REPORT_CALL_ERROR("E19999", "Add node:%s(%s) to graph:%s front failed",
constant_op_desc->GetName().c_str(), constant_op_desc->GetType().c_str(),
graph->GetName().c_str());
return FAILED;
}

@@ -185,11 +215,16 @@ Status VarIsInitializedOpPass::ChangeNodeToConstant(NodePtr &node, bool inited)
}

if (NodeUtils::MoveOutputEdges(node, const_node) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Move out edge from node:%s(%s) to node:%s(%s) failed",
node->GetName().c_str(), node->GetType().c_str(),
const_node->GetName().c_str(), const_node->GetType().c_str());
GELOGE(FAILED, "[%s] Move output edges to new node failed.", node->GetName().c_str());
return FAILED;
}

if (GraphUtils::RemoveNodeWithoutRelink(graph, node) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove node:%s(%s) without relink in graph:%s failed",
node->GetName().c_str(), node->GetType().c_str(), graph->GetName().c_str());
GELOGE(FAILED, "[%s] RemoveNodeWithoutRelink failed.", node->GetName().c_str());
return FAILED;
}
@@ -263,6 +298,7 @@ Status VarIsInitializedOpPass::UpdateInitedVars(const NodePtr &node) {
std::set<int64_t> *VarIsInitializedOpPass::CreateInitedVars() {
std::unique_ptr<std::set<int64_t>> inited_vars_keeper(new(std::nothrow) std::set<int64_t>());
if (inited_vars_keeper == nullptr) {
REPORT_CALL_ERROR("E19999", "New set failed");
GELOGE(OUT_OF_MEMORY, "Failed to alloc set memory");
return nullptr;
}


+ 38
- 0
ge/graph/passes/variable_op_pass.cc View File

@@ -47,6 +47,9 @@ Status ByPassTransNode(NodePtr &trans_node, NodePtr &ref_node) {
GELOGD("Begin to bypass trans node %s", trans_node->GetName().c_str());
auto ret = GraphUtils::CopyInCtrlEdges(trans_node, ref_node);
if (ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Copy in control edge from node:%s(%s) to node:%s(%s) failed",
trans_node->GetName().c_str(), trans_node->GetType().c_str(),
ref_node->GetName().c_str(), ref_node->GetType().c_str());
GELOGE(INTERNAL_ERROR,
"Failed to move control edges from trans "
"node %s to var-ref %s",
@@ -55,6 +58,8 @@ Status ByPassTransNode(NodePtr &trans_node, NodePtr &ref_node) {
}
auto ref_in_anchor = ref_node->GetInDataAnchor(0);
if (ref_in_anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) has no input anchor, check invalid",
ref_node->GetName().c_str(), ref_node->GetType().c_str());
GELOGE(INTERNAL_ERROR,
"The variable ref node %s does not have an "
"input anchor",
@@ -64,6 +69,8 @@ Status ByPassTransNode(NodePtr &trans_node, NodePtr &ref_node) {
ref_in_anchor->UnlinkAll();
auto trans_in_anchor = trans_node->GetInDataAnchor(0);
if (trans_in_anchor == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) has no input anchor, check invalid",
trans_node->GetName().c_str(), trans_node->GetType().c_str());
GELOGE(INTERNAL_ERROR,
"Failed to get the in data anchor from trans"
" node %s type %s",
@@ -79,6 +86,11 @@ Status ByPassTransNode(NodePtr &trans_node, NodePtr &ref_node) {
} else {
ret = GraphUtils::AddEdge(prev_trans_node_out_anchor, ref_in_anchor);
if (ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add edge between op:%s(%s)(index:%d) and op:%s(%s)(index:0) failed",
prev_trans_node_out_anchor->GetOwnerNode()->GetName().c_str(),
prev_trans_node_out_anchor->GetOwnerNode()->GetType().c_str(),
prev_trans_node_out_anchor->GetIdx(),
ref_node->GetName().c_str(), ref_node->GetType().c_str());
GELOGE(INTERNAL_ERROR,
"Failed to add edge between ref node %s "
"and the prev node of trans node %s",
@@ -115,6 +127,7 @@ bool IsTransSupport(const TransNodeInfo &trans_info) {

Status VariableOpPass::Run(ge::ComputeGraphPtr graph) {
if (graph == nullptr) {
REPORT_INNER_ERROR("E19999", "Param graph is nullptr, check invalid");
GELOGE(INTERNAL_ERROR, "Failed to run variable op pass, null graph");
return INTERNAL_ERROR;
}
@@ -124,6 +137,7 @@ Status VariableOpPass::Run(ge::ComputeGraphPtr graph) {
GetContext().SessionId(), graph_id);

if (var_accelerate_ctrl_ == nullptr) {
REPORT_INNER_ERROR("E19999", "The variable accelerate control is nullptr, check invalid");
GELOGE(INTERNAL_ERROR, "Failed to run var op pass, the variable accelerate control is null");
return INTERNAL_ERROR;
}
@@ -174,11 +188,15 @@ Status VariableOpPass::Run(ge::ComputeGraphPtr graph) {

ret = VarManager::Instance(graph->GetSessionID())->SetTransRoad(node->GetName(), fusion_road);
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set Trans road for node:%s(%s) failed, session_id:%lu",
node->GetName().c_str(), node->GetType().c_str(), graph->GetSessionID());
GELOGE(INTERNAL_ERROR, "Failed to update the format fusion road for var %s", node->GetName().c_str());
return INTERNAL_ERROR;
}
ret = VarManager::Instance(graph->GetSessionID())->SetChangedGraphId(node->GetName(), graph_id);
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update graph_id:%u for node:%s(%s) failed, session_id:%lu",
graph_id, node->GetName().c_str(), node->GetType().c_str(), graph->GetSessionID());
GELOGE(INTERNAL_ERROR, "Failed to update the graph id for var %s", node->GetName().c_str());
return INTERNAL_ERROR;
}
@@ -210,10 +228,14 @@ Status VariableOpPass::DealFusion(const ge::NodePtr &var_node) {
trans_node->GetType().c_str(), var_node->GetName().c_str());

if (GraphUtils::IsolateNode(trans_node, {0}) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Isolate node:%s(%s) failed",
trans_node->GetName().c_str(), trans_node->GetType().c_str());
return GE_GRAPH_VARIABLE_OP_PASS_FAILED;
}

if (GraphUtils::RemoveNodeWithoutRelink(graph, trans_node) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove node:%s(%s) without relink in graph:%s failed",
trans_node->GetName().c_str(), trans_node->GetType().c_str(), graph->GetName().c_str());
return GE_GRAPH_VARIABLE_OP_PASS_FAILED;
}
}
@@ -245,9 +267,13 @@ Status VariableOpPass::DealFusion(const ge::NodePtr &var_node) {
" one output data nodes, isolate and remove it.",
trans_node->GetName().c_str(), trans_node->GetType().c_str(), ref_node->GetName().c_str());
if (GraphUtils::IsolateNode(trans_node, {0}) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Isolate node:%s(%s) failed",
trans_node->GetName().c_str(), trans_node->GetType().c_str());
return GE_GRAPH_VARIABLE_OP_PASS_FAILED;
}
if (GraphUtils::RemoveNodeWithoutRelink(graph, trans_node) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove node:%s(%s) without relink in graph:%s failed",
trans_node->GetName().c_str(), trans_node->GetType().c_str(), graph->GetName().c_str());
return GE_GRAPH_VARIABLE_OP_PASS_FAILED;
}
}
@@ -365,6 +391,7 @@ Status VariableOpPass::CheckVariableRefLegally(const ge::NodePtr &var_node, bool

Status VariableOpPass::UpdateVarAndRefOutputFormatInfo(const GeTensorDesc &final_output, const ge::NodePtr &node) {
if (node == nullptr || node->GetOpDesc() == nullptr) {
REPORT_INNER_ERROR("E19999", "Param node or its op_desc is nullptr, check invalid");
GELOGE(FAILED, "node or opdesc is nullptr");
return FAILED;
}
@@ -377,6 +404,8 @@ Status VariableOpPass::UpdateVarAndRefOutputFormatInfo(const GeTensorDesc &final
auto node_desc = node->GetOpDesc()->GetOutputDesc(0);
CopyVariableFormatDataTypeAndShape(final_output, node_desc);
if (node->GetOpDesc()->UpdateOutputDesc(0, node_desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update ouput:0 desc in op:%s(%s) failed",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "update output desc fail.");
return FAILED;
}
@@ -460,6 +489,10 @@ Status VariableOpPass::CheckVarAndVarRefAreAlike(const NodePtr &var_node, const
GELOGD("var_ref_node_trans_nodes size is %zu.", var_ref_node_trans_nodes.size());

if (var_ref_node_trans_nodes.size() > 1) {
REPORT_INNER_ERROR("E19999", "In data node num:%zu of node:%s(%s) bigger than 1, check invalid",
var_ref_node_trans_nodes.size(),
var_ref_node->GetName().c_str(), var_ref_node->GetType().c_str());

GELOGE(GE_GRAPH_VARIABLE_OP_PASS_FAILED, "var_ref_node_trans_nodes.size() > 1.");
return GE_GRAPH_VARIABLE_OP_PASS_FAILED;
}
@@ -525,6 +558,7 @@ void VariableOpPass::CopyVariableFormatDataTypeAndShape(const GeTensorDesc &src_

Status VariableOpPass::CheckIfCouldBeOptimized(const ge::NodePtr &node, bool &flag, VarTransRoad &fusion_road) {
if (node == nullptr) {
REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid");
return FAILED;
}
bool is_matched = false;
@@ -602,6 +636,8 @@ Status VariableOpPass::RenewVarDesc(ge::ComputeGraphPtr &graph) {
GE_CHECK_NOTNULL(node->GetOpDesc());
ret = ge::VarManager::Instance(graph->GetSessionID())->RenewCurVarDesc(node->GetName(), node->GetOpDesc());
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Renew descriptor for node:%s(%s) failed, session_id:%lu",
node->GetName().c_str(), node->GetType().c_str(), graph->GetSessionID());
GELOGE(FAILED, "var manager renew var[%s] descriptor failed!", node->GetName().c_str());
return FAILED;
}
@@ -626,6 +662,8 @@ Status VariableOpPass::RenewVarDesc(uint64_t session_id, const NodePtr &node, co
GE_CHECK_NOTNULL(node->GetOpDesc());
Status ret = ge::VarManager::Instance(session_id)->RenewCurVarDesc(node->GetName(), node->GetOpDesc());
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Renew descriptor for node:%s(%s) failed, session_id:%lu",
node->GetName().c_str(), node->GetType().c_str(), session_id);
GELOGE(FAILED, "var manager renew var[%s] descriptor failed!", node->GetName().c_str());
return FAILED;
}


+ 10
- 0
ge/graph/passes/variable_ref_delete_op_pass.cc View File

@@ -35,6 +35,8 @@ Status VariableRefDeleteOpPass::Run(ge::ComputeGraphPtr graph) {
continue;
}
if (all_var_names.count(ref_var_src_var_name) == 0) {
REPORT_INNER_ERROR("E19999", "Can not find source variable[%s] of variable ref[%s], check invalid",
ref_var_src_var_name.c_str(), node->GetName().c_str());
GELOGE(FAILED, "Can not find source variable[%s] of variable ref[%s]", ref_var_src_var_name.c_str(),
node->GetName().c_str());
return FAILED;
@@ -53,6 +55,8 @@ Status VariableRefDeleteOpPass::DealVariableRef(ge::ComputeGraphPtr &graph, ge::
GE_CHECK_NOTNULL(variable_ref);
auto inAnchor0 = variable_ref->GetInDataAnchor(0);
if (inAnchor0 == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) has no input anchor, check invalid",
variable_ref->GetName().c_str(), variable_ref->GetType().c_str());
GELOGE(FAILED, "variable_ref [%s] no input", variable_ref->GetName().c_str());
return FAILED;
}
@@ -73,17 +77,23 @@ Status VariableRefDeleteOpPass::DealVariableRef(ge::ComputeGraphPtr &graph, ge::
GELOGI("[%s-%d]: add attr [REF_VAR_SRC_VAR_NAME: %s ] ", peer_node->GetName().c_str(), index,
ref_var_src_var_name.c_str());
} else {
REPORT_CALL_ERROR("E19999", "Set Attr:%s to output:%d desc of op:%s(%s) failed", REF_VAR_SRC_VAR_NAME.c_str(),
index, op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "[%s-%d]: add attr [REF_VAR_SRC_VAR_NAME: %s ] failed", peer_node->GetName().c_str(), index,
ref_var_src_var_name.c_str());
return FAILED;
}
// remove variable_ref
if (GraphUtils::IsolateNode(variable_ref, {0}) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Isolate node:%s(%s) failed",
variable_ref->GetName().c_str(), variable_ref->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Isolate removed node: %s, type: %s failed", variable_ref->GetName().c_str(),
variable_ref->GetType().c_str());
return FAILED;
}
if (GraphUtils::RemoveNodeWithoutRelink(graph, variable_ref) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Remove node:%s(%s) without relink in graph:%s failed",
variable_ref->GetName().c_str(), variable_ref->GetType().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Remove node: %s, type: %s without relink failed", variable_ref->GetName().c_str(),
variable_ref->GetType().c_str());
return FAILED;


+ 22
- 0
ge/hybrid/executor/hybrid_execution_context.cc View File

@@ -63,5 +63,27 @@ Status GraphExecutionContext::Synchronize(rtStream_t rt_stream) {
REPORT_CALL_ERROR("E19999", "invoke rtStreamSynchronize failed, ret = %d", rt_ret);
return RT_FAILED;
}

Status GraphExecutionContext::DumpExceptionInfo(const std::vector<rtExceptionInfo> &exception_infos) {
if (exception_infos.empty()) {
GELOGI("[Dump][ExceptionInfo] Exception info is null.");
return SUCCESS;
}
GELOGI("[Dump][ExceptionInfo] Start to search dynamic op info and to dump.");
if (exception_dumper.DumpExceptionInfo(exception_infos) != SUCCESS) {
GELOGE(FAILED, "[Dump][Exception] Dump dynamic op exception info failed.");
return FAILED;
}
GELOGI("[Dump][ExceptionInfo] Start to search static op info and to dump.");
for (const auto &iter : davinci_model) {
if (iter != nullptr) {
if (iter->DumpExceptionInfo(exception_infos) != SUCCESS) {
GELOGE(FAILED, "[Dump][ExceptionInfo] Dump static op exception info failed.");
return FAILED;
}
}
}
return SUCCESS;
}
} // namespace hybrid
} // namespace ge

+ 4
- 0
ge/hybrid/executor/hybrid_execution_context.h View File

@@ -23,6 +23,7 @@
#include "common/properties_manager.h"
#include "framework/common/debug/ge_log.h"
#include "graph/ge_local_context.h"
#include "graph/load/model_manager/davinci_model.h"
#include "hybrid/common/npu_memory_allocator.h"
#include "hybrid/common/tensor_value.h"
#include "hybrid/executor/hybrid_profiler.h"
@@ -54,6 +55,7 @@ struct GraphExecutionContext {
void SetErrorCode(Status error_code);
Status GetStatus() const;
Status Synchronize(rtStream_t rt_stream);
Status DumpExceptionInfo(const std::vector<rtExceptionInfo> &exception_infos);

uint64_t session_id = 0;
uint64_t context_id = 0;
@@ -68,6 +70,8 @@ struct GraphExecutionContext {
DumpProperties dump_properties;
bool trace_enabled = false;
bool dump_enabled = false;
ExceptionDumper exception_dumper;
std::vector<std::shared_ptr<ge::DavinciModel>> davinci_model;
std::atomic_bool is_eos_{false};
long profiling_level = 0;
long iteration = 0;


+ 6
- 1
ge/hybrid/executor/hybrid_model_async_executor.cc View File

@@ -144,8 +144,12 @@ Status HybridModelAsyncExecutor::RunInternal() {
GE_MAKE_GUARD(not_used_var, [&] { GE_CHK_RT(rtDeviceReset(device_id)); });

while (run_flag_) {
// Model has not indeedly started running before received data
SetRunningFlag(false);
std::shared_ptr<InputDataWrapper> data_wrapper;
Status ret = data_inputer_->Pop(data_wrapper);
// Model indeedly start running
SetRunningFlag(true);
if (data_wrapper == nullptr || ret != SUCCESS) {
GELOGI("data_wrapper is null!, ret = %u", ret);
continue;
@@ -185,7 +189,8 @@ Status HybridModelAsyncExecutor::RunInternal() {

RECORD_MODEL_EXECUTION_EVENT(executor_->GetContext(), "[RunInternal] [iteration = %d] End", iterator_count_);
iterator_count_++;
GELOGI("run iterator count is %lu", iterator_count_);
SetRunningFlag(false);
GELOGI("run iterator count is %lu, model_id:%u", iterator_count_, model_id_);
}

CsaInteract::GetInstance().WriteInternalErrorCode();


+ 10
- 0
ge/hybrid/executor/hybrid_model_async_executor.h View File

@@ -55,6 +55,14 @@ class HybridModelAsyncExecutor {

Status EnqueueData(const std::shared_ptr<InputDataWrapper> &data);

uint32_t GetDataInputerSize() { return data_inputer_->Size(); }

bool GetRunningFlag() const { return running_flag_; }

void SetRunningFlag(bool flag) { running_flag_ = flag; }

const GraphExecutionContext * GeContext() { return executor_->GetContext(); }

private:
Status InitInputDesc();

@@ -84,6 +92,8 @@ class HybridModelAsyncExecutor {
uint32_t device_id_ = 0U;
uint32_t model_id_ = 0U;
std::atomic_bool run_flag_;
// check whether model is running with data
bool running_flag_ = false;
std::unique_ptr<DataInputer> data_inputer_;
std::unique_ptr<HybridModelExecutor> executor_;
std::unique_ptr<HybridModelPipelineExecutor> pipe_executor_;


+ 12
- 1
ge/hybrid/executor/hybrid_model_executor.cc View File

@@ -18,6 +18,7 @@
#include "graph/ge_context.h"
#include "graph/runtime_inference_context.h"
#include "graph/utils/tensor_utils.h"
#include "graph/load/model_manager/model_manager.h"
#include "common/dump/dump_manager.h"
#include "common/profiling/profiling_manager.h"

@@ -102,7 +103,17 @@ Status HybridModelExecutor::ExecuteGraphInternal(SubgraphExecutor &executor,
}

if (!model_->IsSingleOp()) {
HYBRID_CHK_STATUS_RET(executor.Synchronize(), "Failed to sync root graph.");
Status ret = executor.Synchronize();
if (ret != ge::SUCCESS) {
auto model_manager = ModelManager::GetInstance();
GE_CHECK_NOTNULL(model_manager);
auto exception_infos = model_manager->GetExceptionInfos();
if (!exception_infos.empty()) {
HYBRID_CHK_STATUS_RET(context_.DumpExceptionInfo(exception_infos),
"[Execute][GraphInternal] Dump exception info failed.");
}
GELOGE(ret, "[Execute][GraphInternal] Synchronize failed.");
}
RECORD_MODEL_EXECUTION_EVENT(&context_, "[Synchronize] End");
}



+ 8
- 0
ge/hybrid/executor/hybrid_model_pipeline_executor.cc View File

@@ -4,6 +4,7 @@
#include "common/dump/dump_manager.h"
#include "graph/ge_context.h"
#include "graph/runtime_inference_context.h"
#include "graph/load/model_manager/model_manager.h"

namespace ge {
namespace hybrid {
@@ -266,6 +267,13 @@ Status HybridModelPipelineExecutor::Execute(HybridModelExecutor::ExecuteArgs &ar
ret = stage_executors_[i]->Synchronize();

if (ret != SUCCESS) {
auto model_manager = ModelManager::GetInstance();
GE_CHECK_NOTNULL(model_manager);
auto exception_infos = model_manager->GetExceptionInfos();
if (!exception_infos.empty()) {
HYBRID_CHK_STATUS_RET(context_.DumpExceptionInfo(exception_infos),
"[Execute][GraphInternal] Dump exception info failed.");
}
GELOGE(ret, "[Invoke][Synchronize] failed for [Executor: %zu].", i);
REPORT_CALL_ERROR("E19999", "[Executor: %zu] failed to Synchronize result.", i);
has_error = true;


+ 42
- 0
ge/hybrid/executor/worker/execution_engine.cc View File

@@ -19,6 +19,7 @@
#include "graph/utils/tensor_utils.h"
#include "graph/utils/tensor_adapter.h"
#include "graph/debug/ge_attr_define.h"
#include "graph/load/model_manager/model_manager.h"
#include "hybrid/node_executor/node_executor.h"
#include "hybrid/executor//worker//shape_inference_engine.h"
#include "common/dump/dump_op.h"
@@ -70,6 +71,7 @@ class NodeDoneCallback {
Status PrepareConstInputs(const NodeItem &node_item);
Status DumpDynamicNode();
Status ProfilingReport();
Status SaveDumpOpInfo();
Status GetTaskDescInfo(const NodePtr node, const HybridModel *model,
std::vector<TaskDescInfo> &task_desc_info);
GraphExecutionContext *graph_context_;
@@ -266,6 +268,40 @@ Status NodeDoneCallback::DumpDynamicNode() {
return SUCCESS;
}

Status NodeDoneCallback::SaveDumpOpInfo() {
GE_CHECK_NOTNULL(graph_context_);
GE_CHECK_NOTNULL(graph_context_->model);

auto node = context_->GetNodeItem().node;
if (node == nullptr) {
GELOGE(PARAM_INVALID, "[Save][DumpOpInfo] Get node is nullptr.");
return PARAM_INVALID;
}
auto op_desc = node->GetOpDesc();
GE_CHECK_NOTNULL(op_desc);

vector<void *> input_addrs;
vector<void *> output_addrs;
for (int i = 0; i < context_->NumInputs(); i++) {
auto tensor_value = context_->GetInput(i);
GE_CHK_BOOL_RET_STATUS(tensor_value != nullptr, PARAM_INVALID, "[Save][DumpOpInfo] Tensor value is nullptr.");
void *input_addr = const_cast<void *>(tensor_value->GetData());
input_addrs.emplace_back(input_addr);
}
for (int j = 0; j < context_->NumOutputs(); j++) {
auto tensor_value = context_->GetOutput(j);
GE_CHK_BOOL_RET_STATUS(tensor_value != nullptr, PARAM_INVALID, "[Save][DumpOpInfo] Tensor value is nullptr.");
void *output_addr = const_cast<void *>(tensor_value->GetData());
output_addrs.emplace_back(output_addr);
}

uint32_t stream_id = context_->GetStreamId();
uint32_t task_id = context_->GetTaskId();
graph_context_->exception_dumper.SaveDumpOpInfo(op_desc, task_id, stream_id, input_addrs, output_addrs);

return SUCCESS;
}

Status NodeDoneCallback::OnNodeDone() {
auto &node_item = context_->GetNodeItem();
GELOGI("[%s] Start callback process.", node_item.NodeName().c_str());
@@ -278,6 +314,12 @@ Status NodeDoneCallback::OnNodeDone() {
GE_CHK_STATUS_RET(DumpDynamicNode(), "[Call][DumpDynamicNode] Failed.");
}

auto model_manager = ModelManager::GetInstance();
GE_CHECK_NOTNULL(model_manager);
if (model_manager->IsDumpExceptionOpen()) {
GE_CHK_STATUS_RET(SaveDumpOpInfo(), "[Save][DumpOpInfo] Failed to dump op info.");
}

if (ProfilingManager::Instance().ProfilingModelExecuteOn()) {
GE_CHK_STATUS_RET(ProfilingReport(), "[Report][Profiling] of node[%s] failed.", node_item.NodeName().c_str());
}


+ 55
- 3
ge/hybrid/hybrid_davinci_model.cc View File

@@ -19,6 +19,7 @@
#include "hybrid/model/hybrid_model.h"
#include "hybrid/executor/hybrid_model_async_executor.h"
#include "hybrid/node_executor/node_executor.h"
#include "graph/manager/graph_manager_utils.h"

namespace ge {
namespace hybrid {
@@ -32,9 +33,10 @@ class HybridDavinciModel::Impl {
}

Status Init() {
GE_CHK_STATUS_RET(NodeExecutorManager::GetInstance().EnsureInitialized(), "Failed to initialize executors");
GE_CHK_STATUS_RET(model_.Init(), "Failed to init model.")
GE_CHK_STATUS_RET(executor_.Init(), "Failed to init model executor.")
GE_CHK_STATUS_RET(NodeExecutorManager::GetInstance().EnsureInitialized(),
"[Initialize][NodeExecutorManager] failed");
GE_CHK_STATUS_RET(model_.Init(), "[Init][HybridModel] failed.")
GE_CHK_STATUS_RET(executor_.Init(), "[Init][HybridModelAsyncExecutor] failed.")
return SUCCESS;
}

@@ -80,6 +82,12 @@ class HybridDavinciModel::Impl {
model_.SetOmName(model_name);
}

uint32_t GetDeviceId() {
return model_.GetDeviceId();
}

const GraphExecutionContext * GeContext() { return executor_.GeContext(); }

uint64_t GetSessionId() {
return model_.GetSessionId();
}
@@ -107,6 +115,17 @@ class HybridDavinciModel::Impl {
model_.SetModelDescVersion(is_new_model_desc);
}

uint32_t GetDataInputerSize() { return executor_.GetDataInputerSize(); }

bool GetRunningFlag() const { return executor_.GetRunningFlag(); }

Status SetRunAsyncListenerCallback(const RunAsyncCallback &callback) {
auto listener = dynamic_cast<RunAsyncListener *>(listener_.get());
GE_CHECK_NOTNULL(listener);
listener->SetCallback(callback);
return SUCCESS;
}

private:
std::shared_ptr<ModelListener> listener_;
HybridModel model_;
@@ -186,6 +205,11 @@ void HybridDavinciModel::SetOmName(const string &om_name) {
}
}

uint32_t HybridDavinciModel::GetDeviceId() const {
GE_CHECK_NOTNULL(impl_);
return impl_->GetDeviceId();
}

Status HybridDavinciModel::GetDynamicBatchInfo(std::vector<std::vector<int64_t>> &batch_info, int32_t &dynamic_type) {
GE_CHECK_NOTNULL(impl_);
return impl_->GetDynamicBatchInfo(batch_info, dynamic_type);
@@ -221,5 +245,33 @@ uint64_t HybridDavinciModel::GetSessionId() {
GE_CHECK_NOTNULL(impl_);
return impl_->GetSessionId();
}

uint32_t HybridDavinciModel::GetDataInputerSize() {
GE_CHECK_NOTNULL(impl_);
return impl_->GetDataInputerSize();
}

bool HybridDavinciModel::GetRunningFlag() const { return impl_->GetRunningFlag(); }

Status HybridDavinciModel::SetRunAsyncListenerCallback(const RunAsyncCallback &callback) {
return impl_->SetRunAsyncListenerCallback(callback);
}

bool HybridDavinciModel::GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const {
if (impl_ == nullptr) {
return false;
}
auto context = impl_->GeContext();
GE_CHECK_NOTNULL(context);
bool ret = context->exception_dumper.GetOpDescInfo(stream_id, task_id, op_desc_info);
if (!ret) {
for (const auto &iter : context->davinci_model) {
if (iter->GetOpDescInfo(stream_id, task_id, op_desc_info)) {
return true;
}
}
}
return ret;
}
} // namespace hybrid
} // namespace ge

+ 10
- 0
ge/hybrid/hybrid_davinci_model.h View File

@@ -61,6 +61,8 @@ class HybridDavinciModel {

uint64_t GetSessionId();

uint32_t GetDeviceId() const;

Status GetDynamicBatchInfo(std::vector<std::vector<int64_t>> &batch_info, int32_t &dynamic_type);

void GetUserDesignateShapeOrder(std::vector<std::string> &user_input_shape_order);
@@ -74,6 +76,14 @@ class HybridDavinciModel {

void SetModelDescVersion(bool is_new_model_desc);

uint32_t GetDataInputerSize();

bool GetRunningFlag() const;

Status SetRunAsyncListenerCallback(const RunAsyncCallback &callback);

bool GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const;

private:
HybridDavinciModel() = default;
class Impl;


+ 20
- 0
ge/hybrid/hybrid_davinci_model_stub.cc View File

@@ -68,6 +68,14 @@ uint64_t HybridDavinciModel::GetSessionId() {
return 0;
}

uint32_t HybridDavinciModel::GetDataInputerSize() {
return 0;
}

uint32_t HybridDavinciModel::GetDeviceId() const {
return 0;
}

Status HybridDavinciModel::GetDynamicBatchInfo(std::vector<std::vector<int64_t>> &batch_info, int32_t &dynamic_type) {
return UNSUPPORTED;
}
@@ -87,5 +95,17 @@ Status HybridDavinciModel::GetInputOutputDescInfo(vector<InputOutputDescInfo> &i

void HybridDavinciModel::SetModelDescVersion(bool is_new_model_desc) {
}

bool HybridDavinciModel::GetRunningFlag() const {
return false;
}

Status HybridDavinciModel::SetRunAsyncListenerCallback(const RunAsyncCallback &callback) {
return UNSUPPORTED;
}

bool HybridDavinciModel::GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const {
return true;
}
} // namespace hybrid
} // namespace ge

+ 11
- 5
ge/hybrid/model/hybrid_model_builder.cc View File

@@ -134,7 +134,7 @@ HybridModelBuilder::HybridModelBuilder(HybridModel &hybrid_model)

Status HybridModelBuilder::Build() {
GE_CHK_STATUS_RET(ValidateParams(), "[Invoke][ValidateParams] failed, model_name_:[%s]", GetGraphName());
hybrid_model_.model_name_ = ge_root_model_->GetRootGraph()->GetName();
hybrid_model_.model_name_ = ge_root_model_->GetModelName();
GELOGI("[%s] Start to build hybrid model.", GetGraphName());
GE_CHK_STATUS_RET(InitRuntimeParams(), "[Invoke][InitRuntimeParams] failed, model_name_:[%s]", GetGraphName());
GE_CHK_STATUS_RET(RecoverGraphUnknownFlag(),
@@ -277,7 +277,7 @@ Status HybridModelBuilder::ParseForceInfershapeNodes(const NodePtr &node, NodeIt
auto op_desc = node->GetOpDesc();
GE_CHECK_NOTNULL(op_desc);
// not care result, if no this attr, stand for the op does not need force infershape
(void)AttrUtils::GetBool(op_desc, kForceInfershape, node_item.is_need_force_infershape);
(void) AttrUtils::GetBool(op_desc, kForceInfershape, node_item.is_need_force_infershape);
GELOGD("node [%s] is need do infershape, flag is %d",
op_desc->GetName().c_str(),
node_item.is_need_force_infershape);
@@ -1540,14 +1540,20 @@ Status HybridModelBuilder::IdentifyVariableOutputs(NodeItem &node_item) {
in_data_anchor->GetIdx(),
src_node->GetName().c_str(),
src_op_type.c_str());
uint32_t parent_index = 0;
GE_CHK_STATUS_RET_NOLOG(GetParentNodeOutputIndex(*net_output_desc, in_data_anchor->GetIdx(), parent_index));
GELOGD("Got parent output index = %u", parent_index);
if (src_op_type == DATA) {
int ref_i = 0;
(void)AttrUtils::GetInt(src_node->GetOpDesc(), ATTR_NAME_PARENT_NODE_INDEX, ref_i);
node_item.reuse_inputs.emplace(static_cast<int>(parent_index), ref_i);
GELOGD("[%s] output[%u] resues input[%d]", node_item.NodeName().c_str(), parent_index, ref_i);
}

if (src_op_type != CONSTANTOP && src_op_type != CONSTANT && src_op_type != VARIABLE) {
continue;
}

uint32_t parent_index = 0;
GE_CHK_STATUS_RET_NOLOG(GetParentNodeOutputIndex(*net_output_desc, in_data_anchor->GetIdx(), parent_index));
GELOGD("Got parent output index = %u", parent_index);
GE_CHECK_LE(parent_index, INT32_MAX);
node_item.ref_outputs.emplace(static_cast<int>(parent_index), src_node);
if (src_op_type == CONSTANTOP || src_op_type == CONSTANT) {


+ 2
- 0
ge/hybrid/node_executor/aicore/aicore_node_executor.cc View File

@@ -208,6 +208,8 @@ Status AiCoreNodeTask::ExecuteAsync(TaskContext &context, std::function<void()>
REPORT_CALL_ERROR("E19999", "rtGetTaskIdAndStreamID failed, ret: 0x%X.", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
context.SetTaskId(task_id);
context.SetStreamId(stream_id);
GELOGD("Aicore node[%s] task_id: %u, stream_id: %u.", context.GetNodeName(), task_id, stream_id);
(void)context.SaveProfilingTaskDescInfo(task_id, stream_id, kTaskTypeAicore, (*it)->GetBlockDim());
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[AiCoreNodeLaunchKernel] End");


+ 2
- 0
ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc View File

@@ -208,6 +208,8 @@ Status AicpuNodeTaskBase::ExecuteAsync(TaskContext &context, std::function<void(
REPORT_CALL_ERROR("E19999", "rtGetTaskIdAndStreamID failed, ret: 0x%X.", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
context.SetTaskId(task_id);
context.SetStreamId(stream_id);
GELOGD("Aicpu node[%s] task_id: %u, stream_id: %u.", context.GetNodeName(), task_id, stream_id);
(void)context.SaveProfilingTaskDescInfo(task_id, stream_id, kTaskTypeAicpu, 0);
auto callback = [=, &context]() {


+ 31
- 15
ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc View File

@@ -30,7 +30,7 @@ namespace ge {
namespace hybrid {
REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::COMPILED_SUBGRAPH, KnownNodeExecutor);

Status KnownNodeTask:: ExecuteAsync(TaskContext &context, std::function<void()> done_callback) {
Status KnownNodeTask::ExecuteAsync(TaskContext &context, std::function<void()> done_callback) {
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodeTaskExecuteAsync] Start");
GELOGD("[%s] KnownNodeTask::ExecuteAsync in.", context.GetNodeName());
if (davinci_model_->GetTaskList().empty()) {
@@ -56,7 +56,9 @@ Status KnownNodeTask:: ExecuteAsync(TaskContext &context, std::function<void()
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodertModelExecute] Start");
rt_ret = rtModelExecute(davinci_model_->GetRtModelHandle(), context.GetStream(), 0);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
GELOGE(rt_ret, "rtModelExecute error, ret: hybrid_model_executorOx%X", rt_ret); return FAILED;);
REPORT_CALL_ERROR("E19999", "rtModelExecute error, ret:Ox%X", rt_ret);
GELOGE(rt_ret, "[Invoke][rtModelExecute] error, ret:Ox%X", rt_ret);
return FAILED;);
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodertModelExecute] End");

GE_CHK_STATUS_RET_NOLOG(context.RegisterCallback(done_callback));
@@ -87,7 +89,7 @@ Status KnownNodeTask::UpdateArgs(TaskContext &context) {
}

GE_CHK_STATUS_RET(davinci_model_->UpdateKnownNodeArgs(inputs, outputs),
"known node task update known node args failed.");
"[Update][KnownNodeArgs] failed for %s.", context.GetNodeName());
GELOGD("[%s] KnownNodeExecutor::UpdateArgs success, task_size = %zu", context.GetNodeName(),
davinci_model_->GetTaskList().size());
return SUCCESS;
@@ -95,15 +97,15 @@ Status KnownNodeTask::UpdateArgs(TaskContext &context) {

Status KnownNodeTask::Init(TaskContext &context) {
// allocate output mem
GE_CHK_STATUS_RET(context.AllocateOutputs(), "known node task allocate output failed.");
GE_CHK_STATUS_RET(context.AllocateOutputs(), "[Allocate][Outputs] failed for %s.", context.GetNodeName());
// allocate mem base
void *buffer = nullptr;
if (davinci_model_->TotalMemSize() != 0) {
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(),
"[KnownNodeTask_AllocateWorkspace] Start");
GE_CHK_STATUS_RET(
context.AllocateWorkspace(davinci_model_->TotalMemSize(), &buffer, davinci_model_->GetRuntimeParam().mem_base),
"known node task allocate workspace failed.");
GE_CHK_STATUS_RET(context.AllocateWorkspace(davinci_model_->TotalMemSize(), &buffer,
davinci_model_->GetRuntimeParam().mem_base),
"[Allocate][Workspace] failed for %s.", context.GetNodeName());
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(),
"[KnownNodeTask_AllocateWorkspace] End, size %zu", davinci_model_->TotalMemSize());
// update mem base
@@ -112,8 +114,18 @@ Status KnownNodeTask::Init(TaskContext &context) {
davinci_model_->GetRuntimeParam().mem_base, davinci_model_->GetRuntimeParam().mem_size);
}
GE_CHK_STATUS_RET(ModelManager::GetInstance()->DestroyAicpuKernel(davinci_model_->GetSessionId(),
davinci_model_->Id(), davinci_model_->SubModelId()),
"KnownNodeTask::Init destroy aicpu kernel failed.");
davinci_model_->Id(),
davinci_model_->SubModelId()),
"[Destroy][AicpuKernel] failed, session_id:%lu, model_id:%u, sub_model_id:%u",
davinci_model_->GetSessionId(), davinci_model_->Id(), davinci_model_->SubModelId());
if (!load_flag_) {
auto execution_context = const_cast<GraphExecutionContext *>(context.GetExecutionContext());
GE_CHECK_NOTNULL(execution_context);
auto &davinci_model = execution_context->davinci_model;
davinci_model.emplace_back(davinci_model_);
load_flag_ = true;
}

GELOGI("[%s] KnownNodeExecutor::Init success.", context.GetNodeName());
return SUCCESS;
}
@@ -121,7 +133,8 @@ Status KnownNodeTask::Init(TaskContext &context) {
Status KnownNodeTask::InitDavinciModel(const HybridModel &model, TensorBuffer *weight_buffer) {
GELOGD("[Init][DavinciModel] start");
davinci_model_->InitRuntimeParams();
GE_CHK_STATUS_RET(davinci_model_->InitVariableMem(), "init variable mem failed");
GE_CHK_STATUS_RET(davinci_model_->InitVariableMem(),
"[Init][VariableMem] failed");
int32_t device_id = 0;
GE_CHK_RT_RET(rtGetDevice(&device_id));
davinci_model_->SetDeviceId(static_cast<uint32_t>(device_id));
@@ -153,11 +166,13 @@ Status KnownNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) cons
GELOGD("[%s] KnownNodeExecutor::PrepareTask in.", context.GetNodeName());
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodeExecutorPrepareTask] Start");
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodeExecutorTaskInit] Start");
GE_CHK_STATUS_RET(task.Init(context), "known node init davinci model failed.");
GE_CHK_STATUS_RET(task.Init(context), "[Invoke][Init] %s known node init davinci model failed.",
context.GetNodeName());
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodeExecutorTaskInit] End");

RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodeExecutorUpdateArgs] Start");
GE_CHK_STATUS_RET(task.UpdateArgs(context), "known node task update args failed.");
GE_CHK_STATUS_RET(task.UpdateArgs(context), "[Invoke][UpdateArgs] %s known node task update args failed.",
context.GetNodeName());
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodeExecutorUpdateArgs] End");
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodeExecutorPrepareTask] End");
GELOGD("[%s] KnownNodeExecutor::PrepareTask success.", context.GetNodeName());
@@ -188,7 +203,9 @@ Status KnownNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &node
davinci_model->SetSubModelId(node->GetOpDesc()->GetId());
GELOGD("KnownNodeExecutor::LoadTask node id %ld.", node->GetOpDesc()->GetId());

GE_CHK_STATUS_RET(davinci_model->Assign(ge_model), "KnownNodeExecutor::LoadTask davincimodel assign failed.");
GE_CHK_STATUS_RET(davinci_model->Assign(ge_model),
"[Invoke][Assign]KnownNodeExecutor::LoadTask davincimodel assign failed for node:%s.",
node->GetName().c_str());

auto known_node_task = MakeShared<KnownNodeTask>(davinci_model);
GE_CHECK_NOTNULL(known_node_task);
@@ -201,8 +218,7 @@ Status KnownNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &node
Status KnownNodeExecutor::ExecuteTask(NodeTask &task, TaskContext &context,
const std::function<void()> &callback) const {
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodeExecutorExecuteTask] Start");
GE_CHK_STATUS_RET(task.ExecuteAsync(context, callback),
"Failed to execute task. node = %s",
GE_CHK_STATUS_RET(task.ExecuteAsync(context, callback), "[Invoke][ExecuteAsync]Failed to execute task. node = %s",
context.GetNodeItem().NodeName().c_str());
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodeExecutorExecuteTask] End");
return SUCCESS;


+ 1
- 0
ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h View File

@@ -42,6 +42,7 @@ class KnownNodeTask : public NodeTask {
virtual Status DoInitDavinciModel(void *weight, size_t weight_size);
private:
std::shared_ptr<DavinciModel> davinci_model_ = nullptr;
bool load_flag_ = false;
};

class KnownNodeExecutor : public NodeExecutor {


+ 30
- 33
ge/hybrid/node_executor/controlop/control_op_executor.cc View File

@@ -43,8 +43,7 @@ Status ControlOpNodeTask::ExecuteSubgraph(const GraphItem *subgraph,
auto executor = MakeShared<SubgraphExecutor>(subgraph, execution_context);
GE_CHECK_NOTNULL(executor);
GE_CHK_STATUS_RET(executor->ExecuteAsync(task_context),
"[%s] Failed to execute partitioned call.",
subgraph->GetName().c_str());
"[Invoke][ExecuteAsync][%s] Failed to execute partitioned call.", subgraph->GetName().c_str());

auto callback = [executor, done_callback]() mutable {
if (done_callback != nullptr) {
@@ -127,7 +126,7 @@ Status IfOpNodeTask::DoExecuteAsync(TaskContext &task_context, const std::functi
auto cond_tensor = task_context.GetInput(kIfCondIndex);
GE_CHECK_NOTNULL(cond_tensor);
GE_CHK_STATUS_RET(ToBool(*cond_tensor, data_type, cond_val),
"[%s] Failed to get cond value.",
"[Invoke][ToBool][%s] Failed to get cond value.",
task_context.GetNodeName());
} else {
// true if num elements is non-zero
@@ -141,9 +140,7 @@ Status IfOpNodeTask::DoExecuteAsync(TaskContext &task_context, const std::functi
auto subgraph = cond_val ? then_ : else_;
GELOGD("[%s] Taking subgraph [%s] by cond = [%d]", task_context.GetNodeName(), subgraph->GetName().c_str(), cond_val);
GE_CHK_STATUS_RET(ExecuteSubgraph(subgraph, task_context, done_callback),
"[%s] Failed to execute subgraph. cond = %d",
task_context.GetNodeName(),
cond_val);
"[Execute][Subgraph] failed for [%s]. cond = %d", task_context.GetNodeName(), cond_val);

GELOGD("[%s] Done executing with cond = %d successfully.", task_context.GetNodeName(), cond_val);
return SUCCESS;
@@ -201,8 +198,7 @@ Status CaseOpNodeTask::DoExecuteAsync(TaskContext &task_context, const std::func
}

GE_CHK_STATUS_RET(ExecuteSubgraph(subgraph, task_context, done_callback),
"[%s] Failed to execute else-subgraph.",
task_context.GetNodeName());
"[Execute][Subgraph] failed for [%s].", task_context.GetNodeName());

GELOGD("[%s] Done executing subgraph[%d] successfully.", task_context.GetNodeName(), branch_index);
return SUCCESS;
@@ -228,18 +224,18 @@ Status WhileOpNodeTask::Init(const NodePtr &node, const HybridModel &model) {

Status WhileOpNodeTask::DoExecuteAsync(TaskContext &task_context, const std::function<void()> &done_callback) const {
if (task_context.NumInputs() != task_context.NumOutputs()) {
REPORT_INNER_ERROR("E19999",
"[%s] Invalid while args. num_inputs = %d not equal num_outputs = %d",
task_context.GetNodeName(), task_context.NumInputs(), task_context.NumOutputs());
GELOGE(INTERNAL_ERROR,
"[%s] Invalid while args. num_inputs = %d, num_outputs = %d",
task_context.GetNodeName(),
task_context.NumInputs(),
task_context.NumOutputs());
"[Check][Param:task_context][%s] Invalid while args. num_inputs = %d, num_outputs = %d",
task_context.GetNodeName(), task_context.NumInputs(), task_context.NumOutputs());
return INTERNAL_ERROR;
}

bool is_continue = false;
GE_CHK_STATUS_RET(ExecuteCond(task_context, is_continue),
"[%s] Failed to execute cond-subgraph",
task_context.GetNodeName());
"[Execute][Cond] failed for [%s]", task_context.GetNodeName());
if (!is_continue) {
for (int i = 0; i < task_context.NumInputs(); ++i) {
auto input_tensor = task_context.GetInput(i);
@@ -269,9 +265,8 @@ Status WhileOpNodeTask::DoExecuteAsync(TaskContext &task_context, const std::fun
++iteration;
GELOGD("[%s] Start to execute, iteration = %d", task_context.GetNodeName(), iteration);
GE_CHK_STATUS_RET(ExecuteOneLoop(task_context, is_continue),
"[%s] Failed to execute iteration %d.",
task_context.GetNodeName(),
iteration);
"[Invoke][ExecuteOneLoop][%s] Failed to execute iteration %d.",
task_context.GetNodeName(), iteration);
}
GELOGD("[%s] Quit from loop. current iteration = %d", task_context.GetNodeName(), iteration);
if (done_callback) {
@@ -299,24 +294,27 @@ Status WhileOpNodeTask::ExecuteCond(TaskContext &task_context, bool &is_continue
auto executor = MakeShared<SubgraphExecutor>(cond_, execution_context, task_context.IsForceInferShape());
GE_CHECK_NOTNULL(executor);
GELOGD("[%s] Start to execute cond-subgraph.", task_context.GetNodeName());
GE_CHK_STATUS_RET(executor->ExecuteAsync(inputs, input_desc), "Failed to execute partitioned call.");
GE_CHK_STATUS_RET(executor->ExecuteAsync(inputs, input_desc),
"[Invoke][ExecuteAsync] %s Failed to execute partitioned call.", task_context.GetNodeName());
GELOGD("[%s] Done executing cond-subgraph successfully.", cond_->GetName().c_str());
GE_CHK_STATUS_RET_NOLOG(task_context.RegisterCallback([executor]() mutable {
executor.reset();
}));

// get cond output
GE_CHK_STATUS_RET(executor->Synchronize(), "[%s] Failed to sync cond-subgraph result.", cond_->GetName().c_str());
GE_CHK_STATUS_RET(executor->Synchronize(),
"[Invoke][Synchronize][%s] Failed to sync cond-subgraph result.", cond_->GetName().c_str());
std::vector<TensorValue> cond_outputs;
std::vector<ConstGeTensorDescPtr> cond_output_desc_list;
GE_CHK_STATUS_RET(executor->GetOutputs(cond_outputs, cond_output_desc_list),
"[%s] Failed to get cond-output.",
cond_->GetName().c_str());
"[Invoke][GetOutputs][%s] Failed to get cond-output.", cond_->GetName().c_str());
if (cond_outputs.size() != kCondOutputSize || cond_output_desc_list.size() != kCondOutputSize) {
REPORT_INNER_ERROR("E19999", "[%s] Number of cond outputs(%zu) or size of cond output desc(%zu)"
"not equal %zu, check invalid", task_context.GetNodeName(), cond_outputs.size(),
cond_output_desc_list.size(), kCondOutputSize);
GELOGE(INTERNAL_ERROR,
"[%s] Number of cond outputs is invalid. number = %zu",
task_context.GetNodeName(),
cond_outputs.size());
"[Check][Size][%s] Number of cond outputs(%zu) or Number of cond output desc(%zu) not equal %zu",
task_context.GetNodeName(), cond_outputs.size(), cond_output_desc_list.size(), kCondOutputSize);
return INTERNAL_ERROR;
}

@@ -325,8 +323,7 @@ Status WhileOpNodeTask::ExecuteCond(TaskContext &task_context, bool &is_continue
if (shape.IsScalar()) {
auto data_type = cond_tensor_desc->GetDataType();
GE_CHK_STATUS_RET(ToBool(cond_outputs[0], data_type, is_continue),
"[%s] Failed to get cond value.",
task_context.GetNodeName());
"[Invoke][ToBool][%s] Failed to get cond value.", task_context.GetNodeName());
} else {
// true if num elements is non-zero
is_continue = shape.GetShapeSize() > 0;
@@ -367,17 +364,15 @@ Status WhileOpNodeTask::MoveOutputs2Inputs(TaskContext &task_context) {
Status WhileOpNodeTask::ExecuteOneLoop(TaskContext &task_context, bool &is_continue) const {
GELOGD("[%s] Start to execute body-subgraph.", task_context.GetNodeName());
GE_CHK_STATUS_RET(ExecuteSubgraph(body_, task_context, nullptr),
"[%s] Failed to execute cond-subgraph", task_context.GetNodeName());
"[Execute][Subgraph] failed for [%s]", task_context.GetNodeName());
GELOGD("[%s] Done executing body-subgraph successfully.", task_context.GetNodeName());

// set outputs to inputs for next iteration
GE_CHK_STATUS_RET(MoveOutputs2Inputs(task_context),
"[%s] Failed to move outputs to inputs",
task_context.GetNodeName());
"[Move][Outputs2Inputs] failed for [%s]", task_context.GetNodeName());

GE_CHK_STATUS_RET(ExecuteCond(task_context, is_continue),
"[%s] Failed to execute cond-subgraph",
task_context.GetNodeName());
"[Invoke][ExecuteCond][%s] Failed to execute cond-subgraph", task_context.GetNodeName());

if (!is_continue) {
for (int i = 0; i < task_context.NumInputs(); ++i) {
@@ -404,12 +399,14 @@ Status ControlOpNodeExecutor::LoadTask(const HybridModel &model,
} else if (node_type == WHILE || node_type == STATELESSWHILE) {
node_task.reset(new(std::nothrow) WhileOpNodeTask());
} else {
GELOGE(PARAM_INVALID, "[%s] Unsupported type: %s", node->GetName().c_str(), node_type.c_str());
REPORT_INNER_ERROR("E19999", "[%s] Unsupported type: %s", node->GetName().c_str(), node_type.c_str());
GELOGE(PARAM_INVALID, "[Check][NodeType][%s] Unsupported type: %s", node->GetName().c_str(), node_type.c_str());
return PARAM_INVALID;
}

GE_CHECK_NOTNULL(node_task);
GE_CHK_STATUS_RET(node_task->Init(node, model), "[%s] Failed to init ControlOpNodeTask.", node->GetName().c_str());
GE_CHK_STATUS_RET(node_task->Init(node, model),
"[Invoke][Init][%s] Failed to init ControlOpNodeTask.", node->GetName().c_str());

task = std::move(node_task);
return SUCCESS;


+ 33
- 13
ge/hybrid/node_executor/ge_local/ge_local_node_executor.cc View File

@@ -47,7 +47,9 @@ Status RefInputTask::UpdateArgs(TaskContext &) {
Status RefInputTask::Execute(TaskContext &context) {
auto iter = out_ref_input_index_.find(node_type_);
if (iter == out_ref_input_index_.end()) {
GELOGE(UNSUPPORTED, "node %s type %s can not use RefInputTask.",
REPORT_INNER_ERROR("E19999", "node %s type %s can not use RefInputTask.",
node_name_.c_str(), node_type_.c_str());
GELOGE(UNSUPPORTED, "[Find][Node]node %s type %s can not use RefInputTask.",
node_name_.c_str(), node_type_.c_str());
return UNSUPPORTED;
}
@@ -65,7 +67,9 @@ Status RefInputTask::RefOneByOne(TaskContext &context) {
int input_num = context.NumInputs();
int output_num = context.NumOutputs();
if (output_num > input_num) {
GELOGE(INTERNAL_ERROR, "node %s type %s has %d outputs but only %d inputs, can't ref one by one.",
REPORT_INNER_ERROR("E19999", "node %s type %s has %d outputs but only %d inputs, can't ref one by one.",
node_name_.c_str(), node_type_.c_str(), output_num, input_num);
GELOGE(INTERNAL_ERROR, "[Check][Size]node %s type %s has %d outputs but only %d inputs, can't ref one by one.",
node_name_.c_str(), node_type_.c_str(), output_num, input_num);
return INTERNAL_ERROR;
}
@@ -84,7 +88,9 @@ Status RefInputTask::RefByOrder(const std::vector<uint32_t> &ref_order, TaskCont
GELOGI("node %s type %s ref input by order begin.", node_name_.c_str(), node_type_.c_str());
int32_t output_num = context.NumOutputs();
if (ref_order.size() != static_cast<size_t>(output_num)) {
GELOGE(INTERNAL_ERROR, "node %s type %s has %d outputs but only has %zu out ref index.",
REPORT_INNER_ERROR("E19999", "node %s type %s has %d outputs but only has %zu out ref index.",
node_name_.c_str(), node_type_.c_str(), output_num, ref_order.size());
GELOGE(INTERNAL_ERROR, "[Check][Size]node %s type %s has %d outputs but only has %zu out ref index.",
node_name_.c_str(), node_type_.c_str(), output_num, ref_order.size());
return INTERNAL_ERROR;
}
@@ -102,7 +108,7 @@ Status RefInputTask::RefByOrder(const std::vector<uint32_t> &ref_order, TaskCont

Status RefInputTask::ExecuteAsync(TaskContext &context, std::function<void()> done_callback) {
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[RefInputTaskExecuteAsync] Start");
GE_CHK_STATUS_RET(Execute(context), "node:%s type:%s ref input task execute failed",
GE_CHK_STATUS_RET(Execute(context), "[Invoke][Execute]node:%s type:%s ref input task execute failed",
node_name_.c_str(), node_type_.c_str());
if (done_callback != nullptr) {
// host cpu no need register callback, call it directly.
@@ -126,20 +132,26 @@ Status DependInputShapeTask::Execute(TaskContext &context) {
std::string node_type = node_->GetType();
auto kernel = factory.Create(node_type);
if (kernel == nullptr) {
GELOGE(UNSUPPORTED, "node %s type %s is not supported by host kernel.",
REPORT_CALL_ERROR("E19999", "create failed for node %s type %s is not supported by host kernel.",
node_->GetName().c_str(), node_type.c_str());
GELOGE(UNSUPPORTED, "[Invoke][Create]node %s type %s is not supported by host kernel.",
node_->GetName().c_str(), node_type.c_str());
return UNSUPPORTED;
}
std::vector<GeTensorPtr> outputs;
Status compute_ret = kernel->Compute(node_, outputs);
if (compute_ret != SUCCESS) {
GELOGE(compute_ret, "node %s type %s compute failed or not imply.",
REPORT_CALL_ERROR("E19999", "node %s type %s compute failed.", node_->GetName().c_str(), node_type.c_str());
GELOGE(compute_ret, "[Invoke][Compute]node %s type %s compute failed or not imply.",
node_->GetName().c_str(), node_type.c_str());
return compute_ret;
}
int32_t output_num = context.NumOutputs();
if (static_cast<size_t>(output_num) != outputs.size()) {
GELOGE(INTERNAL_ERROR, "node %s type %s has %d output, but kernel compute only has %zu output.",
REPORT_INNER_ERROR("E19999", "node %s type %s has %d output,"
"but kernel compute only has %zu output. check invalid",
node_->GetName().c_str(), node_type.c_str(), output_num, outputs.size());
GELOGE(INTERNAL_ERROR, "[Check][Size]node %s type %s has %d output, but kernel compute only has %zu output.",
node_->GetName().c_str(), node_type.c_str(), output_num, outputs.size());
return INTERNAL_ERROR;
}
@@ -155,7 +167,11 @@ Status DependInputShapeTask::Execute(TaskContext &context) {
auto tensor_value = context.MutableOutput(i);
GE_CHECK_NOTNULL(tensor_value);
if (tensor_data.GetSize() > tensor_value->GetSize()) {
GELOGE(INTERNAL_ERROR, "node:%s type:%s [%d]th compute data size=%zu, but context data size=%zu.",
REPORT_INNER_ERROR("E19999", "node:%s type:%s [%d]th compute data size=%zu, but context data size=%zu."
"check invalid",
node_->GetName().c_str(), node_type.c_str(), i,
tensor_data.GetSize(), tensor_value->GetSize());
GELOGE(INTERNAL_ERROR, "[Check][Size]node:%s type:%s [%d]th compute data size=%zu, but context data size=%zu.",
node_->GetName().c_str(), node_type.c_str(), i, tensor_data.GetSize(), tensor_value->GetSize());
return INTERNAL_ERROR;
}
@@ -180,7 +196,7 @@ Status DependInputShapeTask::Execute(TaskContext &context) {
Status DependInputShapeTask::ExecuteAsync(TaskContext &context, std::function<void()> done_callback) {
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(),
"[DependInputShapeTaskExecuteAsync] Start");
GE_CHK_STATUS_RET(Execute(context), "node:%s type:%s depend input shape task execute failed",
GE_CHK_STATUS_RET(Execute(context), "[Invoke][Execute]node:%s type:%s depend input shape task execute failed",
node_->GetName().c_str(), node_->GetType().c_str());
if (done_callback != nullptr) {
// host cpu no need register callback, call it directly.
@@ -213,7 +229,8 @@ Status GeLocalNodeExecutor::LoadTask(const HybridModel &model,
node->GetName().c_str(), node_type.c_str());
task = MakeShared<RefInputTask>(node);
if (task == nullptr) {
GELOGE(MEMALLOC_FAILED, "create RefInputTask for node %s failed.", node->GetName().c_str());
REPORT_CALL_ERROR("E19999", "Create RefInputTask failed for node %s.", node->GetName().c_str());
GELOGE(MEMALLOC_FAILED, "[Create][RefInputTask] failed for node %s.", node->GetName().c_str());
return MEMALLOC_FAILED;
}
} else if (DependInputShapeTask::IsBelong(node_type)) {
@@ -221,7 +238,9 @@ Status GeLocalNodeExecutor::LoadTask(const HybridModel &model,
node->GetName().c_str(), node_type.c_str());
task = MakeShared<DependInputShapeTask>(node);
if (task == nullptr) {
GELOGE(MEMALLOC_FAILED, "create DependInputShapeTask for node %s type %s failed.",
REPORT_CALL_ERROR("E19999", "Create DependInputShapeTask failed for node %s type %s.",
node->GetName().c_str(), node_type.c_str());
GELOGE(MEMALLOC_FAILED, "[Create][DependInputShapeTask]failed for node %s type %s.",
node->GetName().c_str(), node_type.c_str());
return MEMALLOC_FAILED;
}
@@ -229,7 +248,8 @@ Status GeLocalNodeExecutor::LoadTask(const HybridModel &model,
GELOGI("node %s type %s, use ConstantNodeTask.", node->GetName().c_str(), node_type.c_str());
auto tensor = model.GetTensor(node);
if (tensor == nullptr) {
GELOGE(INTERNAL_ERROR, "Failed to get tensor by name: %s", node->GetName().c_str());
REPORT_CALL_ERROR("E19999", "GetTensor failed for name: %s", node->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Get][Tensor] failed for name: %s", node->GetName().c_str());
return INTERNAL_ERROR;
}

@@ -251,7 +271,7 @@ Status ConstantNodeTask::UpdateArgs(TaskContext &context) {

Status ConstantNodeTask::ExecuteAsync(TaskContext &context, std::function<void()> done_callback) {
GELOGD("[%s] Start execute.", context.GetNodeName());
GE_CHK_STATUS_RET(context.SetOutput(0, *tensor_), "[%s] Failed to set output.", context.GetNodeName());
GE_CHK_STATUS_RET(context.SetOutput(0, *tensor_), "[Set][Output] failed for [%s].", context.GetNodeName());
if (done_callback) {
GELOGD("[%s] Start invoke callback.", context.GetNodeName());
done_callback();


+ 67
- 25
ge/hybrid/node_executor/hccl/hccl_node_executor.cc View File

@@ -43,13 +43,15 @@ REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::HCCL, HcclNode
Status HcclNodeTask::ExecuteAsync(TaskContext &context, std::function<void()> done_callback) {
GELOGI("[%s] HcclNodeTask::ExecuteAsync in.", context.GetNodeName());
if (context.handle_ == nullptr) {
GELOGE(FAILED, "hccl handle is nullptr! ");
REPORT_INNER_ERROR("E19999", " %s invalid, hccl handle is nullptr!", context.GetNodeName());
GELOGE(FAILED, "[Check][Param:context] %s hccl handle is nullptr!", context.GetNodeName());
return FAILED;
}
auto HcomExecEnqueueOperation = (HcclResult(*)(HcomOperation, std::function<void(HcclResult status)>))dlsym(
context.handle_, "HcomExecEnqueueOperation");
if (HcomExecEnqueueOperation == nullptr) {
GELOGE(FAILED, "Failed to invoke HcomExecEnqueueOperation hcom unknown node function.");
GELOGE(FAILED, "[Invoke][HcomExecEnqueueOperation] failed for %s hcom unknown node function.",
context.GetNodeName());
if (dlclose(context.handle_) != 0) {
GELOGW("Failed to close handle %s", dlerror());
}
@@ -83,24 +85,35 @@ Status HcclNodeTask::ExecuteAsync(TaskContext &context, std::function<void()> do
ge::DataType src_data_type = input_desc->GetDataType();
auto iter = kConstOpHcclDataType.find(static_cast<int64_t>(src_data_type));
if (iter == kConstOpHcclDataType.end()) {
GELOGE(PARAM_INVALID, "kConstOpHcclDataType find failed.");
REPORT_INNER_ERROR("E19999", "%s inputdesc0 datatype:%s not support.",
op_desc->GetName().c_str(),
TypeUtils::DataTypeToSerialString(src_data_type).c_str());
GELOGE(PARAM_INVALID, "[Find][DataType]%s inputdesc0 datatype:%s not support.",
op_desc->GetName().c_str(),
TypeUtils::DataTypeToSerialString(src_data_type).c_str());
return PARAM_INVALID;
}
op_info.dataType = iter->second;
HcclReduceOp op_type = HCCL_REDUCE_SUM;
if (op_desc->GetType() == HCOMALLREDUCE || op_desc->GetType() == HCOMREDUCESCATTER ||
op_desc->GetType() == HVDCALLBACKALLREDUCE || op_desc->GetType() == HCOMREDUCE) {
GE_CHK_STATUS_RET(HcomOmeUtil::GetHcclOperationType(op_desc, op_type), "GetHcclOperationType failed");
GE_CHK_STATUS_RET(HcomOmeUtil::GetHcclOperationType(op_desc, op_type),
"[Get][HcclOperationType] failed for %s type:%s", op_desc->GetName().c_str(),
op_desc->GetType().c_str());
op_info.opType = op_type;
}
int64_t root_id = 0;
if (op_desc->GetType() == HCOMBROADCAST) {
GE_CHK_STATUS_RET(HcomOmeUtil::GetHcclRootId(op_desc, root_id), "GetHcclRootId failed");
GE_CHK_STATUS_RET(HcomOmeUtil::GetHcclRootId(op_desc, root_id),
"[Get][HcclRootId] failed for %s type:%s", op_desc->GetName().c_str(),
op_desc->GetType().c_str());
}
op_info.root = root_id;
auto callback = [op_desc, done_callback](HcclResult status) {
if (status != HCCL_SUCCESS) {
GELOGE(HCCL_E_INTERNAL, "node %s call HcomExecEnqueueOperation failed, ret: 0x%X",
REPORT_CALL_ERROR("E19999", "call HcomExecEnqueueOperation failed for node %s, ret: 0x%X",
op_desc->GetName().c_str(), status);
GELOGE(HCCL_E_INTERNAL, "[Call][HcomExecEnqueueOperation] failed for node %s, ret: 0x%X",
op_desc->GetName().c_str(), status);
}

@@ -110,14 +123,18 @@ Status HcclNodeTask::ExecuteAsync(TaskContext &context, std::function<void()> do
int32_t count = 0;
GE_CHK_STATUS_RET(HcomOmeUtil::GetHcomCount(op_desc, static_cast<HcclDataType>(op_info.dataType),
op_desc->GetType() == HCOMALLGATHER, count),
"GetHcomCount failed");
"[Get][HcomCount] failed for %s type:%s", op_desc->GetName().c_str(),
op_desc->GetType().c_str());
GELOGI("[%s] HcclNodeTask::ExecuteAsync hccl_type %s, count %d, data_type %d, op_type %d, root %d.",
context.GetNodeName(), op_info.hcclType.c_str(), count, op_info.dataType, op_info.opType, op_info.root);
op_info.count = count;

HcclResult hccl_ret = HcomExecEnqueueOperation(op_info, callback);
if (hccl_ret != HCCL_SUCCESS) {
GELOGE(HCCL_E_INTERNAL, "Call HcomExecInitialize failed, ret: 0x%X", hccl_ret);
REPORT_CALL_ERROR("E19999", "Call HcomExecEnqueueOperation failed for node:%s(%s), ret: 0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), hccl_ret);
GELOGE(HCCL_E_INTERNAL, "[Call][HcomExecEnqueueOperation] failed for node:%s(%s), ret: 0x%X",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), hccl_ret);
return HCCL_E_INTERNAL;
}

@@ -173,13 +190,23 @@ Status RdmaNodeTask::ExtractTensor(TaskContext &context, vector<HcomRemoteAccess
GELOGD("data is null, no need to do rdma read/write, node=%s", context.GetNodeName());
return SUCCESS;
} else {
GELOGE(FAILED, "Tensor data is nullptr.");
REPORT_INNER_ERROR("E19999", "Tensor data is nullptr. and kRdmaScatterTypes not contain %s",
context.GetNodeItem().NodeType().c_str());
GELOGE(FAILED, "[Find][NodeType]Tensor data is nullptr. and kRdmaScatterTypes not contain %s",
context.GetNodeItem().NodeType().c_str());
return FAILED;
}
}
auto dims = remote_tensor.GetTensorDesc().GetShape().GetDims();
if (dims.size() != kVarTableDims && dims.back() != kVarTableRowCnt) {
GELOGE(PARAM_INVALID, "Variable table shape check failed");
REPORT_INNER_ERROR("E19999", "Variable table shape check failed, number of shape dims:%zu not equal expect:%zu"
"and shape dims back:%zu not equal expect:%zu, node:%s(%s)",
dims.size(), kVarTableDims, dims.back(), kVarTableRowCnt,
context.GetNodeName(), context.GetNodeItem().NodeType().c_str());
GELOGE(PARAM_INVALID, "[Check][Param]Variable table shape check failed,"
"number of shape dims:%zu not equal expect:%zu and shape dims back:%zu not equal expect:%zu, node:%s(%s)",
dims.size(), kVarTableDims, dims.back(), kVarTableRowCnt,
context.GetNodeName(), context.GetNodeItem().NodeType().c_str());
return PARAM_INVALID;
}

@@ -222,7 +249,11 @@ Status RdmaNodeTask::ExtractTensor(TaskContext &context, vector<HcomRemoteAccess
Tensor offset_tensor;
GE_CHK_STATUS_RET(ctx->GetTensor(offset_index_.first, offset_index_.second, offset_tensor))
if (static_cast<int64_t>(offset_tensor.GetSize() / GetSizeByDataType(data_type)) != row_num) {
GELOGE(PARAM_INVALID, "num of offset and remote addr mismatch, offset size=%zu, remote_addr size=%ld, dtype=%s",
REPORT_INNER_ERROR("E19999", "num of offset and remote addr mismatch, check invalid"
"offset size=%zu, remote_addr size=%ld, dtype=%s", offset_tensor.GetSize(), row_num,
TypeUtils::DataTypeToSerialString(data_type).c_str());
GELOGE(PARAM_INVALID, "[Check][Size]num of offset and remote addr mismatch,"
"offset size=%zu, remote_addr size=%ld, dtype=%s",
offset_tensor.GetSize(), row_num, TypeUtils::DataTypeToSerialString(data_type).c_str());
return PARAM_INVALID;
}
@@ -244,7 +275,9 @@ Status RdmaNodeTask::ExtractTensor(TaskContext &context, vector<HcomRemoteAccess
auto local_addr = reinterpret_cast<uint64_t>(reinterpret_cast<uintptr_t>(tv->MutableData()));
auto device_len = tv->GetSize() / row_num;
if (device_len <= 0 || device_len > data[kVarTableIdxLen]) {
GELOGE(FAILED, "Local embedding length is out of range, expect %ld, but %ld exactly.",
REPORT_INNER_ERROR("E19999", "Local embedding length is out of range, expect %ld, but %ld exactly.",
data[kVarTableIdxLen], device_len);
GELOGE(FAILED, "[Check][Size]Local embedding length is out of range, expect %ld, but %ld exactly.",
data[kVarTableIdxLen], device_len);
return FAILED;
}
@@ -267,7 +300,8 @@ Status RdmaNodeTask::ExecuteAsync(TaskContext &context, std::function<void()> do
(HcclResult(*)(const string &, const vector<HcomRemoteAccessAddrInfo> &,
std::function<void(HcclResult status)>))dlsym(context.handle_, "HcomExecEnqueueRemoteAccess");
if (HcomExecEnqueueRemoteAccess == nullptr) {
GELOGE(FAILED, "Failed to invoke HcomExecEnqueueRemoteAccess hcom unknown node function.");
GELOGE(FAILED, "[Invoke][HcomExecEnqueueRemoteAccess] failed for node:%s(%s) hcom unknown node function.",
context.GetNodeName(), context.GetNodeItem().NodeType().c_str());
if (dlclose(context.handle_) != 0) {
GELOGW("Failed to close handle %s", dlerror());
}
@@ -283,7 +317,8 @@ Status RdmaNodeTask::ExecuteAsync(TaskContext &context, std::function<void()> do
TaskContext *p_ctx = &context;
auto callback = [p_ctx, done_callback](HcclResult status) {
if (status != HCCL_SUCCESS) {
GELOGE(HCCL_E_INTERNAL, "Call HcomExcutorInitialize failed, ret: 0x%X", status);
GELOGE(HCCL_E_INTERNAL, "[Call][HcomExcutorInitialize] failed for node:%s(%s), ret: 0x%X",
p_ctx->GetNodeName(), p_ctx->GetNodeItem().NodeType().c_str(), status);
p_ctx->SetStatus(FAILED);
}
done_callback();
@@ -296,7 +331,8 @@ Status RdmaNodeTask::ExecuteAsync(TaskContext &context, std::function<void()> do
}
HcclResult hccl_ret = HcomExecEnqueueRemoteAccess(context.GetNodeItem().NodeType(), addr_infos, callback);
if (hccl_ret != HCCL_SUCCESS) {
GELOGE(HCCL_E_INTERNAL, "Call HcomExcutorInitialize failed, ret: 0x%X", hccl_ret);
GELOGE(HCCL_E_INTERNAL, "[Call][HcomExecEnqueueRemoteAccess] failed for node:%s(%s), ret: 0x%X",
context.GetNodeName(), context.GetNodeItem().NodeType().c_str(), hccl_ret);
return HCCL_E_INTERNAL;
}

@@ -314,13 +350,17 @@ Status HcclNodeTask::Init(TaskContext &context) {
Status HcclNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) const {
GELOGI("[%s] HcclNodeExecutor::PrepareTask in.", context.GetNodeName());

GE_CHK_STATUS_RET(task.Init(context), "hccl node load hccl so failed.");
GE_CHK_STATUS_RET(task.Init(context), "[Invoke][Init]hccl node %s(%s) load hccl so failed.",
context.GetNodeName(), context.GetNodeItem().NodeType().c_str());
// allocate output mem, output mem or remote read will be calculated when node execute.
if (kRdmaReadTypes.count(context.GetNodeItem().NodeType()) == 0) {
GE_CHK_STATUS_RET(context.AllocateOutputs(), "hccl node task allocate output failed.");
GE_CHK_STATUS_RET(context.AllocateOutputs(),
"[Invoke][AllocateOutputs]hccl node %s(%s) task allocate output failed.",
context.GetNodeName(), context.GetNodeItem().NodeType().c_str());
}

GE_CHK_STATUS_RET(task.UpdateArgs(context), "hccl node task update args failed.");
GE_CHK_STATUS_RET(task.UpdateArgs(context), "[Update][Args] failed for hccl node %s(%s).",
context.GetNodeName(), context.GetNodeItem().NodeType().c_str());
GELOGI("[%s] HcclNodeExecutor::PrepareTask success.", context.GetNodeName());
return SUCCESS;
}
@@ -341,8 +381,9 @@ Status HcclNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &node,
Status HcclNodeExecutor::ExecuteTask(NodeTask &task, TaskContext &context,
const std::function<void()> &callback) const {
context.handle_ = handle_;
GE_CHK_STATUS_RET(task.ExecuteAsync(context, callback), "Failed to execute task. node = %s",
context.GetNodeItem().NodeName().c_str());
GE_CHK_STATUS_RET(task.ExecuteAsync(context, callback),
"[Invoke][ExecuteAsync] failed to execute task. node:%s(%s)",
context.GetNodeItem().NodeName().c_str(), context.GetNodeItem().NodeType().c_str());
return SUCCESS;
}

@@ -359,12 +400,13 @@ Status HcclNodeExecutor::Initialize() {
GELOGI("FileName:%s, Path:%s.", file_name.c_str(), canonical_path.c_str());
handle_ = dlopen(canonical_path.c_str(), RTLD_NOW | RTLD_GLOBAL);
if (handle_ == nullptr) {
GELOGE(GE_PLGMGR_SO_NOT_EXIST, "Failed in dlopen %s! ", dlerror());
REPORT_CALL_ERROR("E19999", "Open SoFile %s failed, error:%s! ", canonical_path.c_str(), dlerror());
GELOGE(GE_PLGMGR_SO_NOT_EXIST, "[Open][SoFile] %s failed, error:%s! ", canonical_path.c_str(), dlerror());
return FAILED;
}
auto HcomExecInitialize = (HcclResult(*)())dlsym(handle_, "HcomExecInitialize");
if (HcomExecInitialize == nullptr) {
GELOGE(FAILED, "Failed to invoke HcomExecInitialize hcom unknown node function.");
GELOGE(FAILED, "[Invoke][HcomExecInitialize] Failed for hcom unknown node function.");
return FAILED;
}
HcclResult hccl_ret = HcomExecInitialize();
@@ -373,7 +415,7 @@ Status HcclNodeExecutor::Initialize() {
} else if (hccl_ret == HCCL_SUCCESS) {
GELOGI("Hcom executor initialize success.");
} else {
GELOGE(FAILED, "Call HcomExecInitialize failed, ret: 0x%X", hccl_ret);
GELOGE(FAILED, "[Call][HcomExecInitialize] failed, ret: 0x%X", hccl_ret);
return FAILED;
}
return SUCCESS;
@@ -382,12 +424,12 @@ Status HcclNodeExecutor::Initialize() {
Status HcclNodeExecutor::Finalize() {
auto HcomExecFinalize = (HcclResult(*)())dlsym(handle_, "HcomExecFinalize");
if (HcomExecFinalize == nullptr) {
GELOGE(FAILED, "Failed to invoke HcomExecFinalize hcom unknown node function.");
GELOGE(FAILED, "[Invoke][HcomExecFinalize] failed for hcom unknown node function.");
return FAILED;
}
HcclResult hccl_ret = HcomExecFinalize();
if (hccl_ret != HCCL_SUCCESS) {
GELOGE(FAILED, "Call HcomExecFinalize failed, ret: 0x%X", hccl_ret);
GELOGE(FAILED, "[Call][HcomExecFinalize] failed, ret: 0x%X", hccl_ret);
return FAILED;
}
// dlclose file handle


+ 13
- 5
ge/hybrid/node_executor/host_cpu/host_cpu_node_executor.cc View File

@@ -33,7 +33,7 @@ Status HostNodeTaskBase::UpdateArgs(TaskContext &) {

Status HostNodeTaskBase::ExecuteAsync(TaskContext &context, std::function<void()> done_callback) {
GELOGD("[%s] Start execute.", context.GetNodeName());
GE_CHK_STATUS_RET(Execute(context), "node:%s type:%s, task execute failed.",
GE_CHK_STATUS_RET(Execute(context), "[Invoke][Execute] failed for node:%s type:%s.",
node_->GetName().c_str(), node_->GetType().c_str())
if (done_callback) {
GELOGD("[%s] Start invoke callback.", context.GetNodeName());
@@ -70,7 +70,8 @@ Status CpuKernelNodeTask::Execute(TaskContext &context) {
AllocationAttr attr;
attr.SetMemType(HOST_DDR);
if (context.AllocateOutput(i, output_desc, nullptr, &attr) != SUCCESS) {
GELOGE(FAILED, "node:%s Failed to allocate output %d", context.GetNodeName(), i);
REPORT_CALL_ERROR("E19999", "node:%s Failed to allocate output %d", context.GetNodeName(), i);
GELOGE(FAILED, "[Invoke][AllocateOutput]node:%s Failed to allocate output %d", context.GetNodeName(), i);
return FAILED;
}
auto tensor = context.GetOutput(i);
@@ -92,14 +93,18 @@ Status HostCpuNodeTask::Execute(TaskContext &context) {
RunContext run_context;
auto host_kernel = hybrid::host_cpu::KernelFactory::Instance().CreateKernel(node_);
if (host_kernel == nullptr) {
GELOGE(UNSUPPORTED, "node %s type %s is not supported by host kernel.",
REPORT_CALL_ERROR("E19999", "CreateKernel failed for node %s type %s is not supported by host kernel.",
node_->GetName().c_str(), node_->GetType().c_str());
GELOGE(UNSUPPORTED, "[Create][Kernel]node %s type %s is not supported by host kernel.",
node_->GetName().c_str(), node_->GetType().c_str());
return UNSUPPORTED;
}

Status compute_ret = host_kernel->Compute(context);
if (compute_ret != SUCCESS) {
GELOGE(compute_ret, "node %s type %s compute failed or not imply.",
REPORT_CALL_ERROR("E19999", "node %s type %s compute failed.",
node_->GetName().c_str(), node_->GetType().c_str());
GELOGE(compute_ret, "[Invoke][Compute]node %s type %s compute failed or not imply.",
node_->GetName().c_str(), node_->GetType().c_str());
return compute_ret;
}
@@ -131,7 +136,10 @@ Status HostCpuNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &no
task = MakeShared<HostCpuNodeTask>(node);
GE_CHECK_NOTNULL(task);
} else {
GELOGE(UNSUPPORTED, "node %s type %s is not support in HostCpuNodeExecutor now.", name.c_str(), type.c_str());
REPORT_INNER_ERROR("E19999", "Create NodeTask failed for node %s type %s.",
name.c_str(), type.c_str());
GELOGE(UNSUPPORTED, "[Create][NodeTask]node %s type %s is not support in HostCpuNodeExecutor now.",
name.c_str(), type.c_str());
return UNSUPPORTED;
}
return SUCCESS;


+ 4
- 2
ge/hybrid/node_executor/host_cpu/kernel/assign_kernel.cc View File

@@ -34,7 +34,9 @@ Status AssignKernel::Compute(TaskContext& context) {
const auto value_tensor = context.GetInput(kAssignValueInputIndex);
GE_CHECK_NOTNULL(value_tensor);
if (value_tensor->GetSize() > ref_tensor->GetSize()) {
GELOGE(INTERNAL_ERROR, "[%s] value_input_size=%zu, but ref_input_size=%zu.",
REPORT_INNER_ERROR("E19999", "[%s] value_input_size=%zu bigger than ref_input_size=%zu. check invalid",
node_->GetName().c_str(), value_tensor->GetSize(), ref_tensor->GetSize());
GELOGE(INTERNAL_ERROR, "[Check][Size][%s] value_input_size=%zu, but ref_input_size=%zu.",
node_->GetName().c_str(), value_tensor->GetSize(), ref_tensor->GetSize());
return INTERNAL_ERROR;
}
@@ -46,7 +48,7 @@ Status AssignKernel::Compute(TaskContext& context) {
value_tensor->GetSize(), RT_MEMCPY_HOST_TO_HOST));
}
GE_CHK_STATUS_RET(context.SetOutput(kAssignRefOutputIndex, *ref_tensor),
"[%s] Failed to set output.", context.GetNodeName());
"[Set][Output] failed for[%s].", context.GetNodeName());

GELOGD("[%s] compute success.", node_->GetName().c_str());
return SUCCESS;


+ 2
- 1
ge/hybrid/node_executor/host_cpu/kernel/data_kernel.cc View File

@@ -30,7 +30,8 @@ namespace host_cpu {
Status DataKernel::Compute(TaskContext& context) {
auto input = context.MutableInput(kDataInputIndex);
GE_CHECK_NOTNULL(input);
GE_CHK_STATUS_RET(context.SetOutput(kDataOutputIndex, *input), "[%s] Failed to set output.", context.GetNodeName())
GE_CHK_STATUS_RET(context.SetOutput(kDataOutputIndex, *input),
"[Set][Output] failed for [%s].", context.GetNodeName())
GELOGD("[%s] compute success.", node_->GetName().c_str());
return SUCCESS;
}


+ 20
- 10
ge/hybrid/node_executor/host_cpu/kernel/random_uniform_kernel.cc View File

@@ -36,31 +36,41 @@ Status RandomUniformKernel::Compute(TaskContext& context) {
(void)AttrUtils::GetInt(node_->GetOpDesc(), "seed2", seed2);
DataType data_type = DT_FLOAT;
if (!AttrUtils::GetDataType(node_->GetOpDesc(), kAttrDtype, data_type)) {
GELOGE(PARAM_INVALID, "[%s] get attr dtype failed.", node_->GetName().c_str());
REPORT_CALL_ERROR("E19999", "GetDataType failed for [%s].", node_->GetName().c_str());
GELOGE(PARAM_INVALID, "[Get][DataType] failed for [%s].", node_->GetName().c_str());
return PARAM_INVALID;
}
switch (data_type) {
case DT_FLOAT16:
if (GenerateFP16(node_->GetOpDesc(), seed, seed2, context) != SUCCESS) {
GELOGE(FAILED, "Generate random_distribution failed, data_type=DT_FLOAT");
GELOGE(FAILED, "[Invoke][GenerateFP16]Generate random_distribution failed for %s, data_type=DT_FLOAT16",
node_->GetName().c_str());
return FAILED;
}
break;
case DT_FLOAT:
if (Generate<float>(node_->GetOpDesc(), seed, seed2, context) != SUCCESS) {
GELOGE(FAILED, "Generate random_distribution failed, data_type=DT_FLOAT");
GELOGE(FAILED, "[Invoke][Generate]Generate random_distribution failed for %s, data_type=DT_FLOAT",
node_->GetName().c_str());
return FAILED;
}
break;
case DT_DOUBLE:
if (Generate<double>(node_->GetOpDesc(), seed, seed2, context) != SUCCESS) {
GELOGE(FAILED, "Generate random_distribution failed, data_type=DT_DOUBLE");
GELOGE(FAILED, "[Invoke][Generate]Generate random_distribution failed for %s, data_type=DT_DOUBLE",
node_->GetName().c_str());
return FAILED;
}
break;
default:
GELOGE(UNSUPPORTED, "Supported DataType is DT_FLOAT16 / DT_FLOAT / DT_DOUBLE, but data_type=%s",
TypeUtils::DataTypeToSerialString(data_type).c_str());
REPORT_INNER_ERROR("E19999", "[Check][DataType]Supported DataType is DT_FLOAT16 / DT_FLOAT / DT_DOUBLE,"
"but data_type=%s, node:%s",
TypeUtils::DataTypeToSerialString(data_type).c_str(),
node_->GetName().c_str());
GELOGE(UNSUPPORTED, "[Check][DataType]Supported DataType is DT_FLOAT16 / DT_FLOAT / DT_DOUBLE,"
"but data_type=%s, node:%s",
TypeUtils::DataTypeToSerialString(data_type).c_str(),
node_->GetName().c_str());
return UNSUPPORTED;
}

@@ -79,7 +89,7 @@ Status RandomUniformKernel::Generate(const ge::OpDescPtr &op_desc_ptr, int64_t s
auto tensor_size = data_num * sizeof(T);
TensorValue tensor;
GE_CHK_STATUS_RET(context.AllocateTensor(tensor_size, tensor, &attr),
"[%s] Failed to allocate output of size %zu",
"[Invoke][AllocateTensor][%s] Failed to allocate output of size %zu",
context.GetNodeName(),
tensor_size);

@@ -101,7 +111,7 @@ Status RandomUniformKernel::Generate(const ge::OpDescPtr &op_desc_ptr, int64_t s
*(buf + i) = distribution(gen);
}

GE_CHK_STATUS_RET(context.SetOutput(0, tensor), "[%s] Failed to set output.", context.GetNodeName());
GE_CHK_STATUS_RET(context.SetOutput(0, tensor), "[Set][Output] failed for [%s].", context.GetNodeName());
return SUCCESS;
}

@@ -115,7 +125,7 @@ Status RandomUniformKernel::GenerateFP16(const ge::OpDescPtr &op_desc_ptr, int64
auto tensor_size = data_num * sizeof(fp16_t);
TensorValue tensor;
GE_CHK_STATUS_RET(context.AllocateTensor(tensor_size, tensor, &attr),
"[%s] Failed to allocate output of size %zu",
"[Invoke][AllocateTensor][%s] Failed to allocate output of size %zu",
context.GetNodeName(),
tensor_size);

@@ -137,7 +147,7 @@ Status RandomUniformKernel::GenerateFP16(const ge::OpDescPtr &op_desc_ptr, int64
*(buf + i) = static_cast<fp16_t>(distribution(gen));
}

GE_CHK_STATUS_RET(context.SetOutput(0, tensor), "[%s] Failed to set output.", context.GetNodeName());
GE_CHK_STATUS_RET(context.SetOutput(0, tensor), "[Set][Output]failed for [%s].", context.GetNodeName());
return SUCCESS;
}



+ 3
- 2
ge/hybrid/node_executor/host_cpu/kernel/variable_kernel.cc View File

@@ -25,11 +25,12 @@ namespace host_cpu {
Status VariableKernel::Compute(TaskContext& context) {
auto tensor = context.GetVariable(node_->GetName());
if (tensor == nullptr) {
GELOGE(PARAM_INVALID, "tensor is NULL.");
REPORT_INNER_ERROR("E19999", "Get Variable from task context for node:%s failed.", context.GetNodeName());
GELOGE(PARAM_INVALID, "[Check][Param]Get Variable from task context for node:%s failed.", context.GetNodeName());
return PARAM_INVALID;
}
// Constant & Variable Op has and only has one output
GE_CHK_STATUS_RET(context.SetOutput(0, *tensor), "[%s] Failed to set output.", context.GetNodeName());
GE_CHK_STATUS_RET(context.SetOutput(0, *tensor), "[Set][Output] failed for [%s].", context.GetNodeName());
GELOGD("[%s] compute success.", node_->GetName().c_str());
return SUCCESS;
}


+ 4
- 1
ge/hybrid/node_executor/host_cpu/kernel_factory.cc View File

@@ -34,7 +34,10 @@ std::shared_ptr<Kernel> KernelFactory::CreateKernel(const NodePtr &node) {
if (iter != kernel_creator_map_.end()) {
return iter->second(node);
}
GELOGE(FAILED, "Not supported, type = %s, name = %s", node->GetType().c_str(), node->GetName().c_str());
REPORT_INNER_ERROR("E19999", "Not supported because kernel_creator_map_ not contain type:%s, name = %s",
node->GetType().c_str(), node->GetName().c_str());
GELOGE(FAILED, "[Find][NodeType]Not supported because kernel_creator_map_ not contain type = %s, name = %s",
node->GetType().c_str(), node->GetName().c_str());
return nullptr;
}



+ 17
- 9
ge/hybrid/node_executor/node_executor.cc View File

@@ -45,8 +45,7 @@ Status NodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) const {

Status NodeExecutor::ExecuteTask(NodeTask &task, TaskContext &context, const std::function<void()> &callback) const {
HYBRID_CHK_STATUS_RET(task.ExecuteAsync(context, callback),
"Failed to execute task. node = %s",
context.GetNodeItem().NodeName().c_str());
"[Execute][Task] failed. node = %s", context.GetNodeItem().NodeName().c_str());
return SUCCESS;
}

@@ -106,7 +105,10 @@ NodeExecutorManager::ExecutorType NodeExecutorManager::ResolveExecutorType(Node
const auto &lib_name = op_desc->GetOpKernelLibName();
auto it = engine_mapping_.find(lib_name);
if (it == engine_mapping_.end()) {
GELOGE(UNSUPPORTED, "KernelLib not supported. node = %s, lib_name = %s", node.GetName().c_str(), lib_name.c_str());
REPORT_INNER_ERROR("E19999", "Failed to get ExecutorType by lib_name:%s, node:%s",
lib_name.c_str(), node.GetName().c_str());
GELOGE(UNSUPPORTED, "[Find][ExecutorType]Failed to get ExecutorType by lib_name:%s, node:%s",
lib_name.c_str(), node.GetName().c_str());
return ExecutorType::RESERVED;
}

@@ -117,7 +119,10 @@ Status NodeExecutorManager::GetExecutor(Node &node, const NodeExecutor **executo
auto executor_type = ResolveExecutorType(node);
const auto it = executors_.find(executor_type);
if (it == executors_.end()) {
GELOGE(INTERNAL_ERROR, "Failed to get executor by type: %d.", static_cast<int>(executor_type));
REPORT_INNER_ERROR("E19999", "Failed to get executor by type: %d.",
static_cast<int>(executor_type));
GELOGE(INTERNAL_ERROR, "[Check][ExecutorType]Failed to get executor by type: %d.",
static_cast<int>(executor_type));
return INTERNAL_ERROR;
}

@@ -155,16 +160,16 @@ Status NodeExecutorManager::CalcOpRunningParam(Node &node) const {
GeShape output_shape = output_tensor.GetShape();
int64_t output_mem_size = 0;
GE_CHK_STATUS_RET(TensorUtils::CalcTensorMemSize(output_shape, format, data_type, output_mem_size),
"hccl calc tensor mem size failed.");
"[Calc][TensorMemSize] failed, node:%s.", node.GetName().c_str());
GE_CHK_STATUS_RET(CheckInt64AddOverflow(output_mem_size, MEMORY_ALIGN_RATIO * MEMORY_ALIGN_SIZE - 1),
"[%s] Invalid output mem size: %ld",
"[Check][Overflow][%s] Invalid output mem size: %ld",
node.GetName().c_str(),
output_mem_size);
output_mem_size = ((output_mem_size +
MEMORY_ALIGN_RATIO * MEMORY_ALIGN_SIZE - 1) / MEMORY_ALIGN_SIZE) * MEMORY_ALIGN_SIZE;
TensorUtils::SetSize(output_tensor, output_mem_size);
GE_CHK_STATUS_RET(op_desc->UpdateOutputDesc(static_cast<uint32_t>(i), output_tensor),
"hccl update output size failed.");
"[Update][OutputDesc] failed, node:%s.", node.GetName().c_str());
GELOGD("%s output desc[%zu], dim_size: %zu, mem_size: %ld.", node.GetName().c_str(), i,
output_tensor.GetShape().GetDimNum(), output_mem_size);
}
@@ -189,14 +194,17 @@ Status NodeExecutorManager::InitializeExecutors() {
GE_CHECK_NOTNULL(build_fn);
auto executor = std::unique_ptr<NodeExecutor>(build_fn());
if (executor == nullptr) {
GELOGE(INTERNAL_ERROR, "Failed to create executor for engine type = %d", static_cast<int>(engine_type));
REPORT_CALL_ERROR("E19999", "Create NodeExecutor failed for engine type = %d",
static_cast<int>(engine_type));
GELOGE(INTERNAL_ERROR, "[Create][NodeExecutor] failed for engine type = %d", static_cast<int>(engine_type));
return INTERNAL_ERROR;
}

GELOGD("Executor of engine type = %d was created successfully", static_cast<int>(engine_type));
auto ret = executor->Initialize();
if (ret != SUCCESS) {
GELOGE(ret, "Failed to initialize NodeExecutor of type = %d, clear executors", static_cast<int>(engine_type));
REPORT_CALL_ERROR("E19999", "Initialize NodeExecutor failed for type = %d", static_cast<int>(engine_type));
GELOGE(ret, "[Initialize][NodeExecutor] failed for type = %d", static_cast<int>(engine_type));
for (auto &executor_it : executors_) {
executor_it.second->Finalize();
}


+ 3
- 4
ge/hybrid/node_executor/partitioned_call/partitioned_call_node_executor.cc View File

@@ -38,15 +38,14 @@ Status PartitionedCallNodeTask::Init(TaskContext &context) {

Status PartitionedCallNodeTask::ExecuteAsync(TaskContext &context, std::function<void()> done_callback) {
GE_CHK_STATUS_RET(subgraph_executor_->ExecuteAsync(context),
"[%s] Failed to set inputs", graph_item_->GetName().c_str());
"[Invoke][ExecuteAsync] failed for[%s]", graph_item_->GetName().c_str());

auto callback = [=]() {
Callback(done_callback);
};

GE_CHK_STATUS_RET(context.RegisterCallback(callback),
"[%s] Failed to register callback",
graph_item_->GetName().c_str());
"[Register][Callback] failed for [%s]", graph_item_->GetName().c_str());
GELOGD("[%s] Done executing subgraph successfully.", graph_item_->GetName().c_str());
return SUCCESS;
}
@@ -83,7 +82,7 @@ Status PartitionedCallNodeExecutor::LoadTask(const ge::hybrid::HybridModel &mode

Status PartitionedCallNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) const {
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[PartitionedCallPrepareTask] Start");
GE_CHK_STATUS_RET(task.Init(context), "[%s] Failed to init task.", context.GetNodeName());
GE_CHK_STATUS_RET(task.Init(context), "[Init][Task] failed for [%s].", context.GetNodeName());
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[PartitionedCallPrepareTask] End");
return SUCCESS;
}


+ 67
- 21
ge/hybrid/node_executor/task_context.cc View File

@@ -63,17 +63,22 @@ std::unique_ptr<TaskContext> TaskContext::Create(NodeState *node_state,
node_item.output_start,
node_item.num_outputs);
if (node_item.input_start < 0 || node_item.output_start < 0) {
REPORT_INNER_ERROR("E19999", "NodeItem:%s(%s) not property initialized."
"input_start:%d or output_start:%d less than 0",
node_item.NodeName().c_str(), node_item.NodeType().c_str(),
node_item.input_start, node_item.output_start);
GELOGE(INTERNAL_ERROR,
"NodeItem not property initialized. input_start = %d, output_start = %d",
node_item.input_start,
node_item.output_start);
"[Check][Param]NodeItem:%s(%s) not property initialized. input_start = %d, output_start = %d",
node_item.NodeName().c_str(), node_item.NodeType().c_str(),
node_item.input_start, node_item.output_start);
return nullptr;
}

auto task_context = std::unique_ptr<TaskContext>(
new(std::nothrow)TaskContext(execution_context, node_state, subgraph_context));
if (task_context == nullptr) {
GELOGE(MEMALLOC_FAILED, "[%s] Failed to create instance of TaskContext.", node_item.NodeName().c_str());
REPORT_CALL_ERROR("E19999", "Create TaskContext failed for [%s].", node_item.NodeName().c_str());
GELOGE(MEMALLOC_FAILED, "[Create][TaskContext] failed for [%s].", node_item.NodeName().c_str());
return nullptr;
}

@@ -94,7 +99,12 @@ int TaskContext::NumOutputs() const {

TensorValue *TaskContext::MutableInput(int index) {
if (index < 0 || index >= node_item_->num_inputs) {
GELOGE(PARAM_INVALID, "Index out of range. index = %d, num_inputs = %d", index, node_item_->num_inputs);
REPORT_INNER_ERROR("E19999", "Index out of range, check invalid. index = %d, num_inputs = %d, node:%s(%s)",
index, node_item_->num_inputs,
node_item_->NodeName().c_str(), node_item_->NodeType().c_str());
GELOGE(PARAM_INVALID, "[Check][Param]Index out of range. index = %d, num_inputs = %d, node:%s(%s)",
index, node_item_->num_inputs,
node_item_->NodeName().c_str(), node_item_->NodeType().c_str());
return nullptr;
}

@@ -103,7 +113,12 @@ TensorValue *TaskContext::MutableInput(int index) {

const TensorValue *TaskContext::GetOutput(int index) const {
if (index < 0 || index >= node_item_->num_outputs) {
GELOGE(PARAM_INVALID, "Index out of range. index = %d, num_outputs = %d", index, node_item_->num_outputs);
REPORT_INNER_ERROR("E19999", "Index out of range, check invalid. index = %d, num_outputs = %d, node:%s(%s)",
index, node_item_->num_outputs,
node_item_->NodeName().c_str(), node_item_->NodeType().c_str());
GELOGE(PARAM_INVALID, "[Check][Param]Index out of range. index = %d, num_outputs = %d, node:%s(%s)",
index, node_item_->num_outputs,
node_item_->NodeName().c_str(), node_item_->NodeType().c_str());
return nullptr;
}

@@ -112,7 +127,12 @@ const TensorValue *TaskContext::GetOutput(int index) const {

TensorValue *TaskContext::MutableOutput(int index) {
if (index < 0 || index >= node_item_->num_outputs) {
GELOGE(PARAM_INVALID, "Index out of range. index = %d, num_outputs = %d", index, node_item_->num_outputs);
REPORT_INNER_ERROR("E19999", "Index out of range, check invalid. index = %d, num_outputs = %d, node:%s(%s)",
index, node_item_->num_outputs,
node_item_->NodeName().c_str(), node_item_->NodeType().c_str());
GELOGE(PARAM_INVALID, "[Check][Param]Index out of range. index = %d, num_outputs = %d, node:%s(%s)",
index, node_item_->num_outputs,
node_item_->NodeName().c_str(), node_item_->NodeType().c_str());
return nullptr;
}

@@ -125,7 +145,10 @@ std::size_t TaskContext::NumWorkspaces() const {

void *TaskContext::MutableWorkspace(int index) {
if (index < 0 || static_cast<size_t>(index) >= workspaces_.size()) {
GELOGE(PARAM_INVALID, "Index out of range. index = %d, num_workspaces = %d", index, node_item_->num_outputs);
REPORT_INNER_ERROR("E19999", "Index:%d out of range, check invalid. number:%zu of workspaces_, node:%s(%s)",
index, workspaces_.size(), node_item_->NodeName().c_str(), node_item_->NodeType().c_str());
GELOGE(PARAM_INVALID, "[Check][Param]Index:%d out of range. number:%zu of workspaces_, node:%s(%s)",
index, workspaces_.size(), node_item_->NodeName().c_str(), node_item_->NodeType().c_str());
return nullptr;
}

@@ -134,7 +157,11 @@ void *TaskContext::MutableWorkspace(int index) {

const TensorValue *TaskContext::GetInput(int index) const {
if (index < 0 || index >= node_item_->num_inputs) {
GELOGE(PARAM_INVALID, "Index out of range. index = %d, num_inputs = %d", index, node_item_->num_inputs);
REPORT_INNER_ERROR("E19999", "Index:%d out of range, check invalid. num_inputs:%d node:%s(%s)",
index, node_item_->num_inputs, node_item_->NodeName().c_str(),
node_item_->NodeType().c_str());
GELOGE(PARAM_INVALID, "[Check][Param]Index:%d out of range. num_inputs:%d node:%s(%s)",
index, node_item_->num_inputs, node_item_->NodeName().c_str(), node_item_->NodeType().c_str());
return nullptr;
}

@@ -146,7 +173,10 @@ Status TaskContext::AllocateWorkspaces() {
for (auto size : workspace_sizes) {
void *workspace = execution_context_->allocator->Allocate(size);
if (workspace == nullptr) {
GELOGE(MEMALLOC_FAILED, "Failed to allocate workspace of size: %ld", size);
REPORT_CALL_ERROR("E19999", "node:%s(%s) Allocate workspace failed, size: %ld",
node_item_->NodeName().c_str(), node_item_->NodeType().c_str(), size);
GELOGE(MEMALLOC_FAILED, "[Allocate][workspace] failed for node:%s(%s), size: %ld",
node_item_->NodeName().c_str(), node_item_->NodeType().c_str(), size);
return MEMALLOC_FAILED;
}

@@ -162,7 +192,8 @@ Status TaskContext::RegisterCallback(const std::function<void()> &callback_fun)
}
auto ret = execution_context_->callback_manager->RegisterCallback(GetStream(), callback_fun);
if (ret != SUCCESS) {
GELOGE(ret, "[%s] Failed to register callback", GetNodeName());
REPORT_CALL_ERROR("E19999", "RegisterCallback failed for [%s]", GetNodeName());
GELOGE(ret, "[Register][Callback] failed for [%s]", GetNodeName());
execution_context_->callback_manager->Destroy();
return ret;
}
@@ -187,7 +218,8 @@ string TaskContext::TensorDesc2String(const GeTensorDesc &desc) {
Status TaskContext::AllocateTensor(const GeTensorDesc &tensor_desc, TensorValue &tensor, AllocationAttr *attr) {
int64_t size = 0;
if (ge::TensorUtils::GetSize(tensor_desc, size) != GRAPH_SUCCESS) {
GELOGE(INTERNAL_ERROR, "Failed to get tensor size");
REPORT_CALL_ERROR("E19999", "Get TensorSize failed, tensor:%s", tensor_desc.GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Get][TensorSize] failed, tensor:%s", tensor_desc.GetName().c_str());
return INTERNAL_ERROR;
}

@@ -211,7 +243,12 @@ Status TaskContext::AllocateOutput(int index,
TensorDesc2String(tensor_desc).c_str());

if (index < 0 || index >= node_item_->num_outputs) {
GELOGE(PARAM_INVALID, "output index out of range. num_output = %d, index = %d", node_item_->num_outputs, index);
REPORT_INNER_ERROR("E19999", "%s(%s) output index out of range check invalid. num_output = %d, index = %d",
node_item_->NodeName().c_str(), node_item_->NodeType().c_str(),
node_item_->num_outputs, index);
GELOGE(PARAM_INVALID, "[Check][Param] %s(%s) output index out of range. num_output = %d, index = %d",
node_item_->NodeName().c_str(), node_item_->NodeType().c_str(),
node_item_->num_outputs, index);
return PARAM_INVALID;
}

@@ -289,7 +326,10 @@ Status TaskContext::AllocateOutputs(AllocationAttr *attr) {
Status TaskContext::AllocateTensor(size_t size, TensorValue &tensor, AllocationAttr *attr) {
auto buffer = TensorBuffer::Create(execution_context_->allocator, size, attr);
if (buffer == nullptr) {
GELOGE(MEMALLOC_FAILED, "Failed to allocate buffer of size: %zu", size);
REPORT_CALL_ERROR("E19999", "%s(%s) Allocate buffer failed, size: %zu",
node_item_->NodeName().c_str(), node_item_->NodeType().c_str(), size);
GELOGE(MEMALLOC_FAILED, "[Allocate][buffer] failed for %s(%s), size: %zu",
node_item_->NodeName().c_str(), node_item_->NodeType().c_str(), size);
return MEMALLOC_FAILED;
}

@@ -303,7 +343,12 @@ const NodeItem &TaskContext::GetNodeItem() const {

Status TaskContext::SetOutput(int index, const TensorValue &tensor) {
if (index < 0 || index >= node_item_->num_outputs) {
GELOGE(PARAM_INVALID, "output index out of range. num_output = %d, index = %d", node_item_->num_outputs, index);
REPORT_INNER_ERROR("E19999", "%s(%s) output index out of range check invalid. num_output = %d, index = %d",
node_item_->NodeName().c_str(), node_item_->NodeType().c_str(),
node_item_->num_outputs, index);
GELOGE(PARAM_INVALID, "[Check][Param]%s(%s) output index out of range. num_output = %d, index = %d",
node_item_->NodeName().c_str(), node_item_->NodeType().c_str(),
node_item_->num_outputs, index);
return PARAM_INVALID;
}

@@ -368,7 +413,8 @@ Status TaskContext::AllocateWorkspace(size_t size, void **buffer, void *ori_addr
}

if (*buffer == nullptr) {
GELOGE(MEMALLOC_FAILED, "Failed to allocate workspace of size = %zu", size);
REPORT_CALL_ERROR("E19999", "Allocate Workspace failed, size = %zu", size);
GELOGE(MEMALLOC_FAILED, "[Allocate][Workspace] failed, size = %zu", size);
return MEMALLOC_FAILED;
}

@@ -400,11 +446,11 @@ Status TaskContext::PropagateOutputs() {
input_offset);

if (subgraph_context_->all_inputs_.size() <= static_cast<size_t>(input_offset)) {
GELOGE(INTERNAL_ERROR,
"[%s] input index out of range. index = %d, total input num = %zu",
GetNodeName(),
input_offset,
subgraph_context_->all_inputs_.size());
REPORT_INNER_ERROR("E19999",
"[%s] input index out of range check invalid. index = %d, total input num = %zu",
GetNodeName(), input_offset, subgraph_context_->all_inputs_.size());
GELOGE(INTERNAL_ERROR, "[Check][Size][%s] input index out of range. index = %d, total input num = %zu",
GetNodeName(), input_offset, subgraph_context_->all_inputs_.size());
return INTERNAL_ERROR;
}



+ 10
- 0
ge/model/ge_model.cc View File

@@ -85,4 +85,14 @@ ProtoAttrMapHelper GeModel::MutableAttrMap() { return attrs_; }
ConstProtoAttrMapHelper GeModel::GetAttrMap() const {
return ConstProtoAttrMapHelper(attrs_.GetProtoOwner(), attrs_.GetProtoMsg());
}

Status GeModel::GetSessionId(uint32_t model_id, uint64_t &session_id) const {
auto it = model_id_to_session_id_map_.find(model_id);
if (it != model_id_to_session_id_map_.end()) {
session_id = it->second;
return SUCCESS;
}
GELOGW("No session id were found with model id [%u].", model_id);
return INTERNAL_ERROR;
}
} // namespace ge

+ 6
- 0
ge/model/ge_model.h View File

@@ -71,6 +71,11 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeModel : public AttrHolder
void SetModelId(uint32_t model_id) { model_id_ = model_id; }
uint32_t GetModelId() const { return model_id_; }

Status GetSessionId(uint32_t model_id, uint64_t &session_id) const;
void InsertSessionMap(uint32_t model_id, uint64_t session_id) {
model_id_to_session_id_map_.insert({model_id, session_id});
}

protected:
ConstProtoAttrMapHelper GetAttrMap() const override;

@@ -90,6 +95,7 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeModel : public AttrHolder
std::string platform_version_;
uint8_t platform_type_ = {0};
uint32_t model_id_ = INVALID_MODEL_ID;
std::map<uint32_t, uint64_t> model_id_to_session_id_map_;
};
} // namespace ge
using GeModelPtr = std::shared_ptr<ge::GeModel>;


+ 27
- 1
ge/model/ge_root_model.h View File

@@ -33,17 +33,43 @@ class GeRootModel {
};

const ComputeGraphPtr &GetRootGraph() const { return root_graph_; };
void SetModelId(uint32_t model_id) { model_id_ = model_id; }
void SetModelId(uint32_t model_id) {
model_id_ = model_id;
// cached for removement
model_ids_.emplace_back(model_id);
}

void SetIsSpecificStream(bool is_specific_stream) { is_specific_stream_ = is_specific_stream; }
bool CheckIsSpecificStream() {return is_specific_stream_; }

uint32_t GetModelId() const { return model_id_; }

std::vector<uint32_t> GetAllModelId() const { return model_ids_; }

void SetModelName(const std::string &model_name) { model_name_ = model_name; }
const std::string &GetModelName() const { return model_name_; }
Status CheckIsUnknownShape(bool &is_dynamic_shape);

void SetRootGraph(ComputeGraphPtr graph) { root_graph_ = graph; }

void SetTrainFlag(bool flag) { train_flag_ = flag; }

bool GetTrainFlag() const { return train_flag_; }

private:
ComputeGraphPtr root_graph_ = nullptr;
std::map<std::string, GeModelPtr> subgraph_instance_name_to_model_;
uint32_t model_id_ = 0;
<<<<<<< HEAD
bool is_specific_stream_ = false;
=======
// In multithread online secenario, same graph can owns different davinci_model for for concurrency
std::vector<uint32_t> model_ids_;
bool train_flag_ = false;
std::string model_name_;
>>>>>>> master
};
} // namespace ge
using GeRootModelPtr = std::shared_ptr<ge::GeRootModel>;


+ 5
- 0
tests/ut/ge/CMakeLists.txt View File

@@ -166,6 +166,7 @@ set(COMMON_SRC_FILES
"${GE_CODE_DIR}/ge/common/dump/dump_properties.cc"
"${GE_CODE_DIR}/ge/common/helper/model_helper.cc"
"${GE_CODE_DIR}/ge/common/dump/dump_manager.cc"
"${GE_CODE_DIR}/ge/common/dump/exception_dumper.cc"
"${GE_CODE_DIR}/ge/common/dump/opdebug_register.cc"
"${GE_CODE_DIR}/ge/common/dump/dump_op.cc"
"${GE_CODE_DIR}/ge/common/helper/om_file_helper.cc"
@@ -593,6 +594,7 @@ set(SINGLE_OP_SRC_FILES
"${GE_CODE_DIR}/ge/hybrid/executor/hybrid_model_executor.cc"
"${GE_CODE_DIR}/ge/hybrid/executor/hybrid_model_async_executor.cc"
"${GE_CODE_DIR}/ge/hybrid/executor/hybrid_execution_context.cc"
"${GE_CODE_DIR}/ge/hybrid/executor/hybrid_model_pipeline_executor.cc"
"${GE_CODE_DIR}/ge/hybrid/executor/subgraph_context.cc"
"${GE_CODE_DIR}/ge/hybrid/executor/subgraph_executor.cc"
"${GE_CODE_DIR}/ge/hybrid/executor/worker/task_compile_engine.cc"
@@ -755,6 +757,7 @@ set(MULTI_PARTS_TEST_FILES
"common/datatype_transfer_unittest.cc"
"common/dump_manager_unittest.cc"
"common/dump_op_unittest.cc"
"common/dump_exception_unittest.cc"
"common/opdebug_register_unittest.cc"
"common/format_transfer_unittest.cc"
"common/format_transfer_transpose_unittest.cc"
@@ -780,10 +783,12 @@ set(MULTI_PARTS_TEST_FILES
"graph/build/mem_assigner_unittest.cc"
"graph/build/task_generator_unittest.cc"
"graph/build/buffer_pool_mem_assigner_unittest.cc"
"graph/execute/graph_execute_unittest.cc"
"graph/preprocess/graph_preprocess_unittest.cc"
"graph/manager/hcom_util_unittest.cc"
"graph/manager/graph_caching_allocator_unittest.cc"
"graph/partition/dynamic_shape_partition_unittest.cc"
"graph/manager/graph_manager_unittest.cc"
"session/omg_omg_unittest.cc"
)



+ 54
- 0
tests/ut/ge/common/dump_exception_unittest.cc View File

@@ -0,0 +1,54 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>

#define protected public
#define private public
#include "common/dump/exception_dumper.h"
#include "common/debug/log.h"
#include "common/ge_inner_error_codes.h"
#undef private
#undef protected

namespace ge {
class UTEST_dump_exception : public testing::Test {
protected:
void SetUp() {}
void TearDown() {}
};

TEST_F(UTEST_dump_exception, save_dump_op_info_success) {
OpDescPtr op_desc = std::make_shared<OpDesc>("GatherV2", "GatherV2");
uint32_t task_id = 1;
uint32_t stream_id = 233;
vector<void *> input_addr;
vector<void *> output_addr;
ExceptionDumper exception_dumper;
exception_dumper.SaveDumpOpInfo(op_desc, task_id, stream_id, input_addr, output_addr);
}

TEST_F(UTEST_dump_exception, dump_exception_info) {
rtExceptionInfo exception_info = {1, 2, 3, 4, 5};
std::vector<rtExceptionInfo> exception_infos = { exception_info };
OpDescInfo op_desc_info = {"Save", "Save", 1, 2, {FORMAT_NCHW}, {{1}}, {DT_FLOAT}, {}, {2},
{FORMAT_NCHW}, {{1}}, {DT_FLOAT}, {}, {2}};

ExceptionDumper exception_dumper;
exception_dumper.op_desc_info_ = { op_desc_info };
exception_dumper.DumpExceptionInfo(exception_infos);
}
} // namespace ge

+ 41
- 1
tests/ut/ge/graph/build/mem_assigner_unittest.cc View File

@@ -191,6 +191,30 @@ class UtestMemoryAssignerTest : public testing::Test {
return builder.GetGraph();
}

ComputeGraphPtr MakeRefNodeGraph() {
ge::ut::GraphBuilder builder("graph");
auto var_input = builder.AddNode("var", "Variable", 1, 1);
auto const_input = builder.AddNode("const", "Const", 1, 1);
auto assign = builder.AddNode("assgin", "Assign", 2, 1);
// add link
builder.AddDataEdge(var_input, 0, assign, 0);
builder.AddDataEdge(const_input, 0, assign, 1);
// set offset
assign->GetOpDesc()->SetInputOffset({100, 0});
assign->GetOpDesc()->SetOutputOffset({10000});
var_input->GetOpDesc()->SetOutputOffset({10000});
const_input->GetOpDesc()->SetOutputOffset({1000});
// set mem type
ge::AttrUtils::SetListInt(assign->GetOpDesc(), ATTR_NAME_INPUT_MEM_TYPE_LIST, {RT_MEMORY_HBM, RT_MEMORY_L1});
// set ref
auto output_tensordesc = assign->GetOpDesc()->MutableOutputDesc(0);
ge::TensorUtils::SetReuseInput(*output_tensordesc, true);
uint32_t reuse_input_index = 0;
ge::TensorUtils::SetReuseInputIndex(*output_tensordesc, reuse_input_index);

return builder.GetGraph();
}

protected:
void SetUp() {}

@@ -298,4 +322,20 @@ TEST_F(UtestMemoryAssignerTest, graph_memory_assign_ref_var_not_found) {
size_t zero_memory_size = 0;
VarManager::Instance(0)->Init(0, 0, 0, 0);
EXPECT_NE(memory_assigner.AssignMemory(false, mem_offset, zero_memory_size), GRAPH_SUCCESS);
}
}

TEST_F(UtestMemoryAssignerTest, graph_memory_assign_set_input_offset) {
ge::ComputeGraphPtr graph = MakeRefNodeGraph();
auto assgin = graph->FindNode("assgin");
EXPECT_EQ(assgin->GetOpDesc()->GetOutputOffset()[0], 10000);
EXPECT_EQ(assgin->GetOpDesc()->GetInputOffset()[0], 100);
EXPECT_EQ(assgin->GetOpDesc()->GetInputOffset()[1], 0);
GraphMemoryAssigner memoryAssigner(graph);
MemoryOffset memory_offset(RT_MEMORY_HBM, 0);
memoryAssigner.memory_offset_.emplace(RT_MEMORY_HBM, memory_offset);
EXPECT_EQ(memoryAssigner.SetInputOffset(), GRAPH_SUCCESS);
EXPECT_EQ(assgin->GetOpDesc()->GetOutputOffset()[0], 10100);
EXPECT_EQ(assgin->GetOpDesc()->GetInputOffset()[0], 10100);
EXPECT_EQ(assgin->GetOpDesc()->GetInputOffset()[1], 0);
EXPECT_EQ(memoryAssigner.CheckOffset(), GRAPH_SUCCESS);
}

+ 129
- 0
tests/ut/ge/graph/execute/graph_execute_unittest.cc View File

@@ -0,0 +1,129 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>
#include <memory>

#define protected public
#define private public
#include "graph/execute/graph_execute.h"
#include "graph/load/model_manager/model_manager.h"
#include "graph/load/model_manager/davinci_model.h"
#include "omm/csa_interact.h"
#undef private
#undef public


#include <pthread.h>
#include <algorithm>
#include <future>
#include <set>
#include <sstream>
#include <string>
#include <thread>
#include <future>

using namespace std;
using namespace testing;
using namespace ge;
using namespace domi;

namespace ge {
namespace {
const uint32_t kInvalidModelId = UINT32_MAX;
}

class UtestGraphExecuteTest : public testing::Test {
protected:
void SetUp() {}

void TearDown() {}
};

TEST_F(UtestGraphExecuteTest, get_execute_model_id_invalid) {
GraphExecutor executor;
ComputeGraphPtr graph = MakeShared<ComputeGraph>("test");
GeRootModelPtr ge_root_model = MakeShared<GeRootModel>(graph);
auto model_id = executor.GetExecuteModelId(ge_root_model);
EXPECT_EQ(model_id, kInvalidModelId);
}

TEST_F(UtestGraphExecuteTest, get_execute_model_id_1) {
GraphExecutor executor;
ComputeGraphPtr graph = MakeShared<ComputeGraph>("test");
GeRootModelPtr ge_root_model = MakeShared<GeRootModel>(graph);
auto model_manager = ModelManager::GetInstance();
shared_ptr<DavinciModel> davinci_model1 = MakeShared<DavinciModel>(1, nullptr);
davinci_model1->SetId(1);
model_manager->InsertModel(1, davinci_model1);
ge_root_model->SetModelId(1);
auto model_id = executor.GetExecuteModelId(ge_root_model);
EXPECT_EQ(model_id, 1);
}

TEST_F(UtestGraphExecuteTest, get_execute_model_id_2) {
GraphExecutor executor;
ComputeGraphPtr graph = MakeShared<ComputeGraph>("test");
GeRootModelPtr ge_root_model = MakeShared<GeRootModel>(graph);
auto model_manager = ModelManager::GetInstance();
// model1 with 2 load
shared_ptr<DavinciModel> davinci_model1 = MakeShared<DavinciModel>(1, nullptr);
davinci_model1->SetId(1);
davinci_model1->data_inputer_ = new DataInputer();
auto data = MakeShared<InputDataWrapper>();
davinci_model1->data_inputer_->Push(data);
davinci_model1->data_inputer_->Push(data);
model_manager->InsertModel(1, davinci_model1);
// model 2 with 3 load
shared_ptr<DavinciModel> davinci_model2 = MakeShared<DavinciModel>(1, nullptr);
davinci_model2->SetId(2);
davinci_model2->data_inputer_ = new DataInputer();
davinci_model2->data_inputer_->Push(data);
davinci_model2->data_inputer_->Push(data);
davinci_model2->data_inputer_->Push(data);
model_manager->InsertModel(2, davinci_model2);
// model 3 witH 1 load
shared_ptr<DavinciModel> davinci_model3 = MakeShared<DavinciModel>(1, nullptr);
davinci_model3->SetId(3);
davinci_model3->data_inputer_ = new DataInputer();
davinci_model3->data_inputer_->Push(data);
model_manager->InsertModel(3, davinci_model3);

ge_root_model->SetModelId(1);
ge_root_model->SetModelId(2);
ge_root_model->SetModelId(3);

auto model_id = executor.GetExecuteModelId(ge_root_model);
// model 3 is picked for having least loads
EXPECT_EQ(model_id, 3);
}

TEST_F(UtestGraphExecuteTest, test_set_callback) {
GraphExecutor executor;
ComputeGraphPtr graph = MakeShared<ComputeGraph>("test");
// is_unknown_shape_graph_ = false
GeRootModelPtr ge_root_model = MakeShared<GeRootModel>(graph);
RunAsyncCallback callback = [](Status, std::vector<ge::OutputTensorInfo> &) {};

auto model_manager = ModelManager::GetInstance();
auto listener = MakeShared<RunAsyncListener>();
shared_ptr<DavinciModel> davinci_model1 = MakeShared<DavinciModel>(1, listener);
davinci_model1->SetId(1);
model_manager->InsertModel(1, davinci_model1);
auto status = executor.SetCallback(1, ge_root_model, callback);
EXPECT_EQ(status, SUCCESS);
}
} // namespace ge

+ 12
- 0
tests/ut/ge/graph/load/davinci_model_unittest.cc View File

@@ -1034,4 +1034,16 @@ TEST_F(UtestDavinciModel, NnExecute) {
model.task_list_.resize(1);
EXPECT_EQ(model.NnExecute(stream, false, input_data, output_data), SUCCESS);
}
TEST_F(UtestDavinciModel, update_io_addr_success) {
DavinciModel model(0, nullptr);
uint32_t task_id = 1;
uint32_t stream_id = 2;
model.fixed_mem_base_ = 0x22;
model.mem_base_ = reinterpret_cast<uint8_t *>(&task_id);
OpDescInfo op_desc_info = {"Save", "Save", 1, 2, {FORMAT_NCHW}, {{1}}, {DT_FLOAT}, {nullptr}, {2},
{FORMAT_NCHW}, {{1}}, {DT_FLOAT}, {nullptr}, {2}};
model.exception_dumper_.op_desc_info_ = { op_desc_info };
vector<void *> io_addr = {nullptr, nullptr};
model.UpdateOpIOAddrs(task_id, stream_id, io_addr);
}
} // namespace ge

+ 375
- 0
tests/ut/ge/graph/manager/graph_manager_unittest.cc View File

@@ -0,0 +1,375 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>
#include <memory>
#define protected public
#define private public
#include "graph/manager/graph_manager.h"
#include "graph/load/model_manager/model_manager.h"
#include "graph/load/model_manager/davinci_model.h"
#define const
#include "common/helper/model_cache_helper.h"
#undef const
#include "init/gelib.h"
#undef private
#undef public

#include <pthread.h>
#include <algorithm>
#include <future>
#include <set>
#include <sstream>
#include <string>
#include <thread>
#include <future>

#include "common/math/math_util.h"
#include "common/thread_pool.h"
#include "common/dump/dump_manager.h"
#include "analyzer/analyzer.h"
#include "graph/common/ge_call_wrapper.h"
#include "graph/common/local_context.h"
#include "graph/common/transop_util.h"
#include "graph/ge_context.h"
#include "graph/ge_global_options.h"
#include "graph/manager/util/rt_context_util.h"
#include "graph/partition/dynamic_shape_partition.h"
#include "graph/passes/enter_pass.h"
#include "graph/partition/stage_partition.h"
#include "graph/passes/addn_pass.h"
#include "graph/passes/bitcast_pass.h"
#include "graph/passes/assign_remove_pass.h"
#include "graph/passes/inplace_support_check_pass.h"
#include "graph/passes/atomic_addr_clean_pass.h"
#include "graph/passes/attach_stream_label_pass.h"
#include "graph/passes/cast_remove_pass.h"
#include "graph/passes/common_subexpression_elimination_pass.h"
#include "graph/passes/compile_nodes_pass.h"
#include "graph/passes/cond_remove_pass.h"
#include "graph/passes/constant_folding_pass.h"
#include "graph/passes/constant_fuse_same_pass.h"
#include "graph/passes/control_trigger_pass.h"
#include "graph/passes/ctrl_edge_transfer_pass.h"
#include "graph/passes/dimension_adjust_pass.h"
#include "graph/passes/dimension_compute_pass.h"
#include "graph/passes/flow_ctrl_pass.h"
#include "graph/passes/fuse_data_nodes_with_common_input_pass.h"
#include "graph/passes/identity_pass.h"
#include "graph/passes/input_output_connection_identify_pass.h"
#include "graph/passes/iterator_op_pass.h"
#include "graph/passes/link_gen_mask_nodes_pass.h"
#include "graph/passes/mark_graph_unknown_status_pass.h"
#include "graph/passes/merge_pass.h"
#include "graph/passes/merge_input_memcpy_pass.h"
#include "graph/passes/merge_to_stream_merge_pass.h"
#include "graph/passes/multi_batch_pass.h"
#include "graph/passes/next_iteration_pass.h"
#include "graph/passes/permute_pass.h"
#include "graph/passes/prune_pass.h"
#include "graph/passes/ref_identity_delete_op_pass.h"
#include "graph/passes/remove_same_const_pass.h"
#include "graph/passes/reshape_recovery_pass.h"
#include "graph/passes/reshape_remove_pass.h"
#include "graph/passes/same_transdata_breadth_fusion_pass.h"
#include "graph/passes/subgraph_pass.h"
#include "graph/passes/switch_data_edges_bypass.h"
#include "graph/passes/switch_dead_branch_elimination.h"
#include "graph/passes/switch_logic_remove_pass.h"
#include "graph/passes/switch_to_stream_switch_pass.h"
#include "graph/passes/transop_breadth_fusion_pass.h"
#include "graph/passes/transop_nearby_allreduce_fusion_pass.h"
#include "graph/passes/transop_symmetry_elimination_pass.h"
#include "graph/passes/transop_without_reshape_fusion_pass.h"
#include "graph/passes/transpose_transdata_pass.h"
#include "graph/passes/useless_control_out_remove_pass.h"
#include "graph/passes/variable_op_pass.h"
#include "graph/passes/variable_ref_delete_op_pass.h"
#include "graph/passes/variable_ref_useless_control_out_delete_pass.h"
#include "graph/passes/end_of_sequence_add_control_pass.h"
#include "graph/passes/subexpression_migration_pass.h"
#include "graph/passes/subgraph_const_migration_pass.h"
#include "graph/passes/unused_args_clean_pass.h"
#include "graph/passes/global_step_insert_pass.h"
#include "graph/passes/memcpy_addr_async_pass.h"
#include "graph/passes/hccl_continuous_memcpy_pass.h"
#include "graph/build/label_allocator.h"
#include "graph/utils/tensor_adapter.h"
#include "inc/pass_manager.h"
#include "ir_build/atc_ir_common.h"
#include "graph/common/local_context.h"
#include "graph/common/omg_util.h"
#include "common/formats/utils/formats_trans_utils.h"
#include "register/custom_pass_helper.h"
#include "graph/ops_stub.h"

using namespace std;
using namespace testing;
using namespace ge;
using namespace domi;

namespace {
const uint32_t kNotAdded = 0;
const uint32_t kStartAdd = 1;
const uint32_t kDoneAdded = 2;
}
class UtestGraphManagerTest : public testing::Test {
protected:
void SetUp() {}

void TearDown() {}
};

void CreateGraph(Graph &graph) {
TensorDesc desc(ge::Shape({1, 3, 224, 224}));
uint32_t size = desc.GetShape().GetShapeSize();
desc.SetSize(size);
auto data = op::Data("Data").set_attr_index(0);
data.update_input_desc_data(desc);
data.update_output_desc_out(desc);

auto flatten = op::Flatten("Flatten").set_input_x(data, data.name_out_out());

std::vector<Operator> inputs{data};
std::vector<Operator> outputs{flatten};
std::vector<Operator> targets{flatten};
// Graph graph("test_graph");
graph.SetInputs(inputs).SetOutputs(outputs).SetTargets(targets);
}

TEST_F(UtestGraphManagerTest, set_and_get_add_graph_flag) {
GraphId graph_id = 1;
GraphManager graph_manager;
graph_manager.SetAddGraphCondition(graph_id, 1);
uint32_t res = graph_manager.GetAddGraphCondition(graph_id);
EXPECT_EQ(res, 1);
}

TEST_F(UtestGraphManagerTest, test_add_graph_1) {
GraphId graph_id = 1;
GraphManager graph_manager;
// create graph
Graph graph("test_graph");
CreateGraph(graph);

std::map<std::string, std::string> options;
OmgContext context;
Status status = graph_manager.AddGraph(graph_id, graph, options, context);
EXPECT_EQ(status, ge::SUCCESS);
}

TEST_F(UtestGraphManagerTest, test_add_graph_2) {
GraphId graph_id = 1;
GraphManager graph_manager;
GraphNodePtr graph_node = MakeShared<ge::GraphNode>(graph_id);
graph_manager.AddGraphNode(graph_id, graph_node);
graph_manager.SetAddGraphCondition(graph_id, kDoneAdded);
Graph graph("test_graph");
CreateGraph(graph);
std::map<std::string, std::string> options;
OmgContext context;
Status status = graph_manager.AddGraph(graph_id, graph, options, context);
EXPECT_EQ(status, ge::SUCCESS);
}

TEST_F(UtestGraphManagerTest, test_add_graph_3) {
GraphId graph_id = 1;
GraphManager graph_manager;
Graph graph("test_graph");
CreateGraph(graph);

std::map<std::string, std::string> options;
OmgContext context;

std::future<Status> fut1 = std::async(std::launch::async,
&GraphManager::AddGraph, &graph_manager, graph_id, graph, options, context);
std::future<Status> fut2 = std::async(std::launch::async,
&GraphManager::AddGraph, &graph_manager, graph_id, graph, options, context);
fut1.wait();
fut2.wait();
Status status1 = fut1.get();
Status status2 = fut2.get();
EXPECT_EQ(status1, ge::SUCCESS);
EXPECT_EQ(status2, ge::SUCCESS);
}

TEST_F(UtestGraphManagerTest, test_remove_graph_1) {
GraphId graph_id = 1;
GraphManager graph_manager;
GraphNodePtr graph_node = MakeShared<ge::GraphNode>(graph_id);
Status status = graph_manager.RemoveGraph(graph_id);
EXPECT_EQ(status, ge::GE_GRAPH_GRAPH_NOT_EXIST);
graph_manager.AddGraphNode(graph_id, graph_node);
graph_node->SetRunFlag(true);
status = graph_manager.RemoveGraph(graph_id);
EXPECT_EQ(status, ge::SUCCESS);
}

TEST_F(UtestGraphManagerTest, test_remove_graph_2) {
GraphId graph_id = 1;
GraphManager graph_manager;
GraphNodePtr graph_node = MakeShared<ge::GraphNode>(graph_id);
Graph graph("test_graph");
CreateGraph(graph);
auto compute_graph = GraphUtils::GetComputeGraph(graph);
GeRootModelPtr ge_root_model = MakeShared<GeRootModel>(compute_graph);
auto model_manager = ModelManager::GetInstance();
auto listener = MakeShared<RunAsyncListener>();
shared_ptr<DavinciModel> davinci_model1 = MakeShared<DavinciModel>(1, listener);
davinci_model1->SetId(1);
shared_ptr<DavinciModel> davinci_model2 = MakeShared<DavinciModel>(2, listener);
davinci_model1->SetId(2);
model_manager->InsertModel(1, davinci_model1);
model_manager->InsertModel(2, davinci_model2);
ge_root_model->SetModelId(1);
ge_root_model->SetModelId(2);
graph_node->SetGeRootModel(ge_root_model);
graph_node->SetLoadFlag(true);
graph_manager.AddGraphNode(graph_id, graph_node);
Status status = graph_manager.RemoveGraph(graph_id);
EXPECT_EQ(status, ge::SUCCESS);
}

TEST_F(UtestGraphManagerTest, test_pre_run_thread) {
GraphManager graph_manager;
graph_manager.thread_run_flag_ = true;

GraphId graph_id = 1;
std::vector<ge::InputTensorInfo> input_tensor;
uint64_t session_id = 0;
ErrorMessage::Context error_context;
GEThreadLocalContext context;
RunAsyncCallback callback;
// PreRunArgs args{graph_id, input_tensor, session_id, error_context, context, callback};
bool ret = graph_manager.prerun_args_q_.Push({graph_id, input_tensor, session_id, error_context, context, callback});
EXPECT_EQ(ret, true);

GraphNodePtr graph_node = MakeShared<ge::GraphNode>(graph_id);
graph_manager.AddGraphNode(graph_id, graph_node);
graph_manager.PreRunThread(&graph_manager);
// end with failed
}

TEST_F(UtestGraphManagerTest, test_pre_run_thread_2) {
GraphManager graph_manager;
graph_manager.thread_run_flag_ = true;

GraphId graph_id = 1;
GraphNodePtr graph_node_1 = MakeShared<ge::GraphNode>(graph_id);
graph_manager.AddGraphNode(graph_id, graph_node_1);
graph_manager.IncreaseGraphCount(graph_id);
graph_manager.IncreaseGraphCount(graph_id);
graph_node_1->SetBuildFlag(true);
std::vector<ge::InputTensorInfo> input_tensor;
uint64_t session_id = 0;
ErrorMessage::Context error_context;
GEThreadLocalContext context;
RunAsyncCallback callback;
// PreRunArgs args{graph_id, input_tensor, session_id, error_context, context, callback};
bool ret = graph_manager.prerun_args_q_.Push({graph_id, input_tensor, session_id, error_context, context, callback});
EXPECT_EQ(ret, true);
graph_id = 2;
GraphNodePtr graph_node_2 = MakeShared<ge::GraphNode>(graph_id);
graph_manager.AddGraphNode(graph_id, graph_node_2);
ret = graph_manager.prerun_args_q_.Push({graph_id, input_tensor, session_id, error_context, context, callback});
EXPECT_EQ(ret, true);
graph_manager.PreRunThread(&graph_manager);
// end with failed
}

TEST_F(UtestGraphManagerTest, test_check_and_release_memory) {
GraphManager graph_manager;
GeModelPtr ge_model = make_shared<GeModel>();
int64_t memory_size = 25 * 1024UL * 1024UL * 1024UL;
int64_t weight_size = 25 * 1024UL * 1024UL * 1024UL;
uint64_t session_id = 0;
ge::AttrUtils::SetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, memory_size);
ge::AttrUtils::SetInt(ge_model, ATTR_MODEL_WEIGHT_SIZE, weight_size);
ge::AttrUtils::SetInt(ge_model, MODEL_ATTR_SESSION_ID, session_id);

GraphId graph_id = 1;
GraphNodePtr graph_node = MakeShared<ge::GraphNode>(graph_id);
graph_manager.AddGraphNode(graph_id, graph_node);
graph_manager.IncreaseGraphCount(graph_id);
graph_manager.IncreaseGraphCount(graph_id);

auto model_manager = ModelManager::GetInstance();
auto listener = MakeShared<RunAsyncListener>();
shared_ptr<DavinciModel> davinci_model1 = MakeShared<DavinciModel>(1, listener);
davinci_model1->SetId(1);
shared_ptr<DavinciModel> davinci_model2 = MakeShared<DavinciModel>(2, listener);
davinci_model1->SetId(2);
model_manager->InsertModel(1, davinci_model1);
model_manager->InsertModel(2, davinci_model2);
ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("test_graph");
bool is_dynamic_shape = false;
(void)AttrUtils::GetBool(compute_graph, ATTR_NAME_DYNAMIC_SHAPE_PARTITIONED, is_dynamic_shape);
GeRootModelPtr ge_root_model = MakeShared<GeRootModel>(compute_graph);
ge_root_model->SetModelId(1);
ge_root_model->SetModelId(2);
graph_node->SetGeRootModel(ge_root_model);
graph_node->SetLoadFlag(true);
Status status = graph_manager.CheckAndReleaseMemory(ge_model, graph_node);
EXPECT_EQ(status, ge::SUCCESS);
}

TEST_F(UtestGraphManagerTest, test_check_incre_build_and_pre_run_1) {
// no need to build
GraphId graph_id = 1;
GraphManager graph_manager;
ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("test_graph");
GeRootModelPtr ge_root_model = MakeShared<GeRootModel>(compute_graph);
GraphManager::PreRunArgs arg;
GraphNodePtr graph_node = MakeShared<ge::GraphNode>(graph_id);
graph_node->SetBuildFlag(true);
Status status = graph_manager.CheckIncreBuildAndPreRun(&graph_manager, arg, graph_node, ge_root_model);
EXPECT_EQ(status, ge::SUCCESS);
}

TEST_F(UtestGraphManagerTest, test_check_incre_build_and_pre_run_2) {
// need build while buildflag is true, var format changed
GraphId graph_id = 1;
GraphManager graph_manager;
ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("test_graph");
GeRootModelPtr ge_root_model = MakeShared<GeRootModel>(compute_graph);
GraphManager::PreRunArgs arg;
arg.callback = [](Status, std::vector<ge::OutputTensorInfo> &) {};
GraphNodePtr graph_node = MakeShared<ge::GraphNode>(graph_id);
graph_node->SetBuildFlag(true);
graph_node->Lock();
graph_manager.var_acc_ctrl_.graph_ids_need_rebuild_.insert(graph_id);
Status status = graph_manager.CheckIncreBuildAndPreRun(&graph_manager, arg, graph_node, ge_root_model);
EXPECT_EQ(status, ge::PARAM_INVALID);
}

TEST_F(UtestGraphManagerTest, test_check_incre_build_and_pre_run_3) {
// need build while buildflag is false, var format unchanged
GraphId graph_id = 1;
GraphManager graph_manager;
ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("test_graph");
GeRootModelPtr ge_root_model = MakeShared<GeRootModel>(compute_graph);
GraphManager::PreRunArgs arg;
arg.callback = [](Status, std::vector<ge::OutputTensorInfo> &) {};
GraphNodePtr graph_node = MakeShared<ge::GraphNode>(graph_id);
graph_node->SetBuildFlag(false);
graph_node->Lock();
Status status = graph_manager.CheckIncreBuildAndPreRun(&graph_manager, arg, graph_node, ge_root_model);
EXPECT_NE(status, ge::SUCCESS);
}

+ 21
- 1
tests/ut/ge/graph/passes/dimension_adjust_pass_unittest.cc View File

@@ -28,6 +28,7 @@
#include "graph/types.h"
#include "graph/utils/graph_utils.h"
#include "graph/utils/op_desc_utils.h"
#include "inc/kernel.h"
#include "inc/kernel_factory.h"
#undef protected
#undef private
@@ -37,11 +38,27 @@ using namespace testing;

namespace ge {

class TestExpandDimKernel : public Kernel {
public:
Status Compute(const NodePtr &node_ptr) override {
return SUCCESS;
}
};
REGISTER_KERNEL(EXPANDDIMS, TestExpandDimKernel);
class TestExpandDimKernelNotChange : public Kernel {
public:
Status Compute(const NodePtr &node_ptr) override {
return NOT_CHANGED;
}
};

class UtestGraphPassesDimensionAdjustPass : public testing::Test {
protected:
void SetUp() {}

void TearDown() {}
void TearDown() {
KernelFactory::Instance().creator_map_.clear();
}
};

TEST_F(UtestGraphPassesDimensionAdjustPass, succ) {
@@ -96,8 +113,11 @@ TEST_F(UtestGraphPassesDimensionAdjustPass, succ) {
GraphUtils::AddEdge(op_node->GetOutDataAnchor(0), netoutput_node->GetInDataAnchor(0));

std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
NamesToPass names_to_passes;
EXPECT_EQ(4, graph->GetDirectNodesSize());
ge::Status ret = pass->Run(op_node);
EXPECT_EQ(SUCCESS, ret);
EXPECT_EQ(2, op_node->GetOwnerComputeGraph()->GetDirectNodesSize());
}

TEST_F(UtestGraphPassesDimensionAdjustPass, input_node_is_nullptr) {


+ 17
- 0
tests/ut/ge/graph/passes/net_output_pass_unittest.cc View File

@@ -631,6 +631,23 @@ TEST_F(UtestGraphPassesNetOutputPass, no_output_no_target_no_retval_success) {
EXPECT_EQ(status, ge::SUCCESS);
}

TEST_F(UtestGraphPassesNetOutputPass, no_output_no_target_no_retval_no_outnodes_success) {
ge::ComputeGraphPtr compute_graph = build_graph();

ge::PassManager pass_managers;
pass_managers.AddPass("", new (std::nothrow) NetOutputPass);
Status status = pass_managers.Run(compute_graph);
EXPECT_EQ(status, ge::SUCCESS);

NodePtr net_out_node = compute_graph->FindNode(NODE_NAME_NET_OUTPUT);
EXPECT_NE(net_out_node, nullptr);
EXPECT_EQ(net_out_node->GetInControlNodes().size(), 2);

int stream_label = -1;
EXPECT_TRUE(ge::AttrUtils::GetInt(net_out_node->GetOpDesc(), ATTR_NAME_TRUE_BRANCH_STREAM, stream_label));
EXPECT_EQ(stream_label, 0);
}

TEST_F(UtestGraphPassesNetOutputPass, user_out_node_success) {
ge::ComputeGraphPtr compute_graph = build_graph();



+ 34
- 2
tests/ut/ge/hybrid/ge_hybrid_unittest.cc View File

@@ -39,7 +39,7 @@
#include "hybrid/common/npu_memory_allocator.h"
#include "graph/types.h"
#include "graph/utils/tensor_utils.h"
#include "graph/testcase/ge_graph/graph_builder_utils.h"
#undef private
#undef protected

@@ -154,9 +154,11 @@ TEST_F(UtestGeHybrid, index_taskdefs_failed) {

ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
ge_root_model->SetModelName("test_name");
HybridModel hybrid_model(ge_root_model);
HybridModelBuilder hybrid_model_builder(hybrid_model);

ASSERT_EQ(hybrid_model_builder.Build(), INTERNAL_ERROR);
ASSERT_EQ(hybrid_model_builder.IndexTaskDefs(graph, ge_model), INTERNAL_ERROR);
}

@@ -173,6 +175,36 @@ TEST_F(UtestGeHybrid, parse_force_infershape_nodes) {
HybridModelBuilder hybrid_model_builder(hybrid_model);
ASSERT_EQ(hybrid_model_builder.ParseForceInfershapeNodes(node, *new_node), SUCCESS);
}
static ComputeGraphPtr BuildDataDirectConnectGraph() {
const char *kRefIndex = "_parent_node_index";
ge::ut::GraphBuilder builder("subgraph");
auto data = builder.AddNode("Data", "Data", 1, 1);
auto netoutput = builder.AddNode("NetOutput", "NetOutput", 1, 1);
(void)AttrUtils::SetInt(netoutput->GetOpDesc()->MutableInputDesc(0), kRefIndex, 0);

builder.AddDataEdge(data, 0, netoutput, 0);
return builder.GetGraph();
}
TEST_F(UtestGeHybrid, data_direct_connect) {
std::unique_ptr<NodeItem> node_item;
auto root_graph = make_shared<ComputeGraph>("root_graph");
OpDescPtr op_desc = CreateOpDesc("PartitionedCall", "PartitionedCall");
auto node = root_graph->AddNode(op_desc);
node->SetOwnerComputeGraph(root_graph);
auto sub_graph = BuildDataDirectConnectGraph();
sub_graph->SetParentGraph(root_graph);
sub_graph->SetParentNode(node);
node->GetOpDesc()->AddSubgraphName("subgraph");
node->GetOpDesc()->SetSubgraphInstanceName(0, "subgraph");
root_graph->AddSubgraph("subgraph", sub_graph);
std::unique_ptr<NodeItem> new_node;
NodeItem::Create(node, new_node);
GeRootModelPtr ge_root_model = make_shared<GeRootModel>(root_graph);
HybridModel hybrid_model(ge_root_model);
HybridModelBuilder hybrid_model_builder(hybrid_model);
auto ret = hybrid_model_builder.IdentifyVariableOutputs(*new_node.get());
ASSERT_EQ(ret, SUCCESS);
}

TEST_F(UtestGeHybrid, index_taskdefs_success) {
// build aicore task
@@ -625,4 +657,4 @@ TEST_F(UtestGeHybrid, TestParseDependentInputNodesForHccl) {
ASSERT_EQ(builder.ParseDependentInputNodes(*node_item_1, deps), SUCCESS);
ASSERT_TRUE(model.GetNodeItem(node)->has_observer);
ASSERT_EQ(node_item_1->dependents_for_execution.size(), 1);
}
}

Loading…
Cancel
Save