Browse Source

Fix GELOG format

pull/1002/head
zhangxiaokun 5 years ago
parent
commit
24cc769305
5 changed files with 16 additions and 16 deletions
  1. +2
    -2
      ge/common/profiling/ge_profiling.cc
  2. +4
    -4
      ge/common/profiling/profiling_manager.cc
  3. +3
    -3
      ge/hybrid/executor/hybrid_model_async_executor.cc
  4. +1
    -1
      ge/hybrid/model/hybrid_model_builder.cc
  5. +6
    -6
      ge/hybrid/node_executor/node_executor.cc

+ 2
- 2
ge/common/profiling/ge_profiling.cc View File

@@ -181,7 +181,7 @@ ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t le
if (type != kProfCommandhandleFinalize) {
command.module_index = prof_config_param->profSwitch;
}
GELOGI("GE commandhandle execute, Command Type: %s, data type config: 0x%llx", iter->second.c_str(),
GELOGI("GE commandhandle execute, Command Type: %s, data type config: 0x%lx", iter->second.c_str(),
command.module_index);
if (type == kProfCommandhandleStart || type == kProfCommandhandleStop) {
GELOGI("Profiling device nums:%s , deviceID:[%s]", prof_params[0].c_str(), prof_params[kDeviceListIndex].c_str());
@@ -192,7 +192,7 @@ ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t le
return ge::FAILED;
}

GELOGI("Successfully execute profiling command type: %d, command 0x%llx.", type, command.module_index);
GELOGI("Successfully execute profiling command type: %d, command 0x%lx.", type, command.module_index);
return ge::SUCCESS;
}


+ 4
- 4
ge/common/profiling/profiling_manager.cc View File

@@ -540,7 +540,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfFi
for (auto device_id_module : device_id_module_map_) {
if (device_id_module.second != 0) {
uint32_t device_id = static_cast<uint32_t>(device_id_module.first);
GELOGI("Prof finalize: device_id: %u, module: 0x%llx.", device_id, device_id_module.second);
GELOGI("Prof finalize: device_id: %u, module: 0x%lx.", device_id, device_id_module.second);
rt_ret = rtProfilerStop(device_id_module.second, 1, &device_id);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(FAILED, "Runtime profiler stop failed.");
@@ -629,7 +629,7 @@ Status ProfilingManager::ProfParseParam(const std::map<std::string, std::string>
}

if (device_num == 0 || device_num > kMaxDeviceNum || device_num != static_cast<int32_t>(device_list.size())) {
GELOGE(FAILED, "Config para device num: %d not equal to device list size: %d.", device_num, device_list.size());
GELOGE(FAILED, "Config para device num: %d not equal to device list size: %zu.", device_num, device_list.size());
return FAILED;
}
#endif
@@ -659,7 +659,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfSt
for (int32_t i = 0; i < device_num; i++) {
device_id_ptr[i] = static_cast<uint32_t>(device_list[i]);
}
GELOGI("Runtime config param: 0x%llx, device num: %d.", module, device_num);
GELOGI("Runtime config param: 0x%lx, device num: %d.", module, device_num);

rtError_t rt_ret = rtProfilerStart(module, device_num, device_id_ptr.get());
if (rt_ret != RT_ERROR_NONE) {
@@ -701,7 +701,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfSt
for (int32_t i = 0; i < device_num; i++) {
device_id_ptr[i] = static_cast<uint32_t>(device_list[i]);
}
GELOGI("Prof stop: runtime config param: 0x%llx, device num: %d", module, device_num);
GELOGI("Prof stop: runtime config param: 0x%lx, device num: %d", module, device_num);
rtError_t rt_ret = rtProfilerStop(module, device_num, device_id_ptr.get());
if (rt_ret != RT_ERROR_NONE) {
GELOGE(FAILED, "Prof stop: runtime profiler config proc failed.");


+ 3
- 3
ge/hybrid/executor/hybrid_model_async_executor.cc View File

@@ -221,7 +221,7 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData &current_data, Hy
auto &tensor_desc = input_tensor_desc_[input_index];
tensor_desc->SetShape(GeShape(current_data.shapes[input_index]));
args.input_desc[input_index] = tensor_desc;
GELOGD("Update shape of input[%u] to [%s]", input_index, tensor_desc->MutableShape().ToString().c_str());
GELOGD("Update shape of input[%zu] to [%s]", input_index, tensor_desc->MutableShape().ToString().c_str());
GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetTensorMemorySizeInBytes(*tensor_desc, tensor_size),
"Failed to calc tensor size, index = %zu, shape = [%s]",
input_index,
@@ -238,7 +238,7 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData &current_data, Hy
GE_CHECK_NOTNULL(tensor_buffer);
args.inputs.emplace_back(std::shared_ptr<TensorBuffer>(tensor_buffer.release()));

GELOGD("To copy input data for input[%u]", input_index);
GELOGD("To copy input data for input[%zu]", input_index);
const DataBuffer &data_buf = blobs[input_index];
auto mem_size = static_cast<uint64_t>(tensor_size);
GE_CHK_BOOL_RET_STATUS(mem_size >= data_buf.length,
@@ -247,7 +247,7 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData &current_data, Hy
data_buf.length,
mem_size);

GELOGI("[IMAS]CopyPlainData memcpy graph_%u type[F] output[%u] memaddr[%p] mem_size[%u] datasize[%lu]",
GELOGI("[IMAS]CopyPlainData memcpy graph_%u type[F] output[%zu] memaddr[%p] mem_size[%zu] datasize[%lu]",
model_->root_runtime_param_.graph_id,
input_index,
args.inputs[input_index].GetData(),


+ 1
- 1
ge/hybrid/model/hybrid_model_builder.cc View File

@@ -939,7 +939,7 @@ Status HybridModelBuilder::InitVariableTensors() {
GELOGE(MEMALLOC_FAILED, "Malloc host memory for an existed GeTensor failed.");
return MEMALLOC_FAILED;
}
GELOGD("Host variable [%s] malloc success, size=%lld.", it.first.c_str(), tensor_size);
GELOGD("Host variable [%s] malloc success, size=%ld.", it.first.c_str(), tensor_size);

std::unique_ptr<TensorValue> tensor(new (std::nothrow) TensorValue(mem_info.host_aligned_ptr->MutableGet(),
tensor_size));


+ 6
- 6
ge/hybrid/node_executor/node_executor.cc View File

@@ -117,11 +117,11 @@ Status NodeExecutorManager::GetExecutor(Node &node, const NodeExecutor **executo
auto executor_type = ResolveExecutorType(node);
const auto it = executors_.find(executor_type);
if (it == executors_.end()) {
GELOGE(INTERNAL_ERROR, "Failed to get executor by type: %d.", executor_type);
GELOGE(INTERNAL_ERROR, "Failed to get executor by type: %d.", static_cast<int>(executor_type));
return INTERNAL_ERROR;
}

GELOGD("[%s] Set node executor by type: %d.", node.GetName().c_str(), executor_type);
GELOGD("[%s] Set node executor by type: %d.", node.GetName().c_str(), static_cast<int>(executor_type));
*executor = it->second.get();
return SUCCESS;
}
@@ -165,7 +165,7 @@ Status NodeExecutorManager::CalcOpRunningParam(Node &node) const {
TensorUtils::SetSize(output_tensor, output_mem_size);
GE_CHK_STATUS_RET(op_desc->UpdateOutputDesc(static_cast<uint32_t>(i), output_tensor),
"hccl update output size failed.");
GELOGD("%s output desc[%u], dim_size: %zu, mem_size: %ld.", node.GetName().c_str(), i,
GELOGD("%s output desc[%zu], dim_size: %zu, mem_size: %ld.", node.GetName().c_str(), i,
output_tensor.GetShape().GetDimNum(), output_mem_size);
}
return SUCCESS;
@@ -189,14 +189,14 @@ Status NodeExecutorManager::InitializeExecutors() {
GE_CHECK_NOTNULL(build_fn);
auto executor = std::unique_ptr<NodeExecutor>(build_fn());
if (executor == nullptr) {
GELOGE(INTERNAL_ERROR, "Failed to create executor for engine type = %d", engine_type);
GELOGE(INTERNAL_ERROR, "Failed to create executor for engine type = %d", static_cast<int>(engine_type));
return INTERNAL_ERROR;
}

GELOGD("Executor of engine type = %d was created successfully", engine_type);
GELOGD("Executor of engine type = %d was created successfully", static_cast<int>(engine_type));
auto ret = executor->Initialize();
if (ret != SUCCESS) {
GELOGE(ret, "Failed to initialize NodeExecutor of type = %d, clear executors", engine_type);
GELOGE(ret, "Failed to initialize NodeExecutor of type = %d, clear executors", static_cast<int>(engine_type));
for (auto &executor_it : executors_) {
executor_it.second->Finalize();
}


Loading…
Cancel
Save