Browse Source

modify log level for pynative

tags/v1.3.0
chujinjin 4 years ago
parent
commit
1dd950218b
4 changed files with 19 additions and 22 deletions
  1. +7
    -10
      mindspore/ccsrc/backend/session/session_basic.cc
  2. +9
    -9
      mindspore/ccsrc/pipeline/pynative/pynative_execute.cc
  3. +2
    -2
      mindspore/ccsrc/runtime/device/kernel_runtime.cc
  4. +1
    -1
      mindspore/core/ir/primitive.h

+ 7
- 10
mindspore/ccsrc/backend/session/session_basic.cc View File

@@ -244,10 +244,10 @@ BaseRef CreateNodeOutputTensors(const AnfNodePtr &anf, const KernelGraphPtr &gra
MS_EXCEPTION_IF_NULL(anf);
MS_EXCEPTION_IF_NULL(tensor_to_node);
MS_EXCEPTION_IF_NULL(node_to_tensor);
MS_LOG(INFO) << "Create tensor for output[" << anf->DebugString() << "]";
MS_LOG(DEBUG) << "Create tensor for output[" << anf->DebugString() << "]";
auto item_with_index = AnfAlgo::VisitKernelWithReturnType(anf, 0);
MS_EXCEPTION_IF_NULL(item_with_index.first);
MS_LOG(INFO) << "Create tensor for output after visit:" << item_with_index.first->DebugString();
MS_LOG(DEBUG) << "Create tensor for output after visit:" << item_with_index.first->DebugString();
// special handle for maketuple
if (AnfAlgo::CheckPrimitiveType(item_with_index.first, prim::kPrimMakeTuple)) {
auto cnode = item_with_index.first->cast<CNodePtr>();
@@ -370,8 +370,8 @@ BaseRef CreateNodeOutputPlaceholder(const session::KernelWithIndex &node_output_
MS_EXCEPTION_IF_NULL(node);
MS_EXCEPTION_IF_NULL(graph);
MS_EXCEPTION_IF_NULL(output_indexes);
MS_LOG(INFO) << "Create placeholder for output[" << node->DebugString() << "] index[" << node_output_pair.second
<< "]";
MS_LOG(DEBUG) << "Create placeholder for output[" << node->DebugString() << "] index[" << node_output_pair.second
<< "]";
// if node is a value node, no need sync addr from device to host
if (node->isa<ValueNode>()) {
auto value_node = node->cast<ValueNodePtr>();
@@ -400,10 +400,10 @@ BaseRef CreateNodeOutputPlaceholder(const AnfNodePtr &anf, const KernelGraphPtr
std::map<KernelWithIndex, std::vector<std::vector<size_t>>> *output_indexes) {
MS_EXCEPTION_IF_NULL(anf);
MS_EXCEPTION_IF_NULL(output_indexes);
MS_LOG(INFO) << "Create placeholder for output[" << anf->DebugString() << "]";
MS_LOG(DEBUG) << "Create placeholder for output[" << anf->DebugString() << "]";
auto item_with_index = AnfAlgo::VisitKernelWithReturnType(anf, 0);
MS_EXCEPTION_IF_NULL(item_with_index.first);
MS_LOG(INFO) << "Create placeholder for output after visit:" << item_with_index.first->DebugString();
MS_LOG(DEBUG) << "Create placeholder for output after visit:" << item_with_index.first->DebugString();
// special handle for maketuple
if (AnfAlgo::CheckPrimitiveType(item_with_index.first, prim::kPrimMakeTuple)) {
auto cnode = item_with_index.first->cast<CNodePtr>();
@@ -1499,7 +1499,7 @@ void SessionBasic::UpdateOutputs(const std::shared_ptr<KernelGraph> &kernel_grap
auto anf_outputs = kernel_graph->outputs();
for (auto &item : anf_outputs) {
MS_EXCEPTION_IF_NULL(item);
MS_LOG(INFO) << "Update output[" << item->DebugString() << "]";
MS_LOG(DEBUG) << "Update output[" << item->DebugString() << "]";
outputs->emplace_back(CreateNodeOutputTensors(item, kernel_graph, input_tensors, &tensor_to_node, &node_to_tensor));
}

@@ -1945,7 +1945,6 @@ CNodePtr SessionBasic::ConstructOutput(const AnfNodePtrList &outputs, const std:
}

void SessionBasic::CreateOutputNode(const CNodePtr &cnode, const std::shared_ptr<KernelGraph> &graph) {
MS_LOG(INFO) << "Start!";
std::vector<AnfNodePtr> make_tuple_inputs;
make_tuple_inputs.push_back(NewValueNode(prim::kPrimMakeTuple));
MS_EXCEPTION_IF_NULL(graph);
@@ -1967,7 +1966,6 @@ void SessionBasic::CreateOutputNode(const CNodePtr &cnode, const std::shared_ptr
// create output
auto g_output = graph->NewCNode(make_tuple_inputs);
graph->set_output(g_output);
MS_LOG(INFO) << "Finish!";
}

std::shared_ptr<KernelGraph> SessionBasic::ConstructSingleOpGraph(const OpRunInfo &op_run_info,
@@ -1983,7 +1981,6 @@ std::shared_ptr<KernelGraph> SessionBasic::ConstructSingleOpGraph(const OpRunInf
MS_EXCEPTION_IF_NULL(op_prim);
inputs.push_back(std::make_shared<ValueNode>(op_prim));
// set input parameter
MS_LOG(INFO) << "Input tensor size: " << input_tensors.size();
if (input_tensors.size() != tensors_mask.size()) {
MS_LOG(EXCEPTION) << "Input tensors size " << input_tensors.size() << " should be equal to tensors mask size "
<< tensors_mask.size();


+ 9
- 9
mindspore/ccsrc/pipeline/pynative/pynative_execute.cc View File

@@ -468,7 +468,7 @@ void ConstructInputTensor(const OpExecInfoPtr &op_run_info, std::vector<int64_t>
bool reg_exist = opt::ConstInputToAttrInfoRegistry::Instance().GetRegisterByOpName(op_run_info->op_name, &reg);
if (op_run_info->is_dynamic_shape &&
dynamic_shape_const_input_to_attr.find(op_run_info->op_name) == dynamic_shape_const_input_to_attr.end()) {
MS_LOG(INFO) << "current node is dynamic shape: " << op_run_info->op_name;
MS_LOG(DEBUG) << "current node is dynamic shape: " << op_run_info->op_name;
reg_exist = false;
}
auto ms_context = MsContext::GetInstance();
@@ -1560,14 +1560,14 @@ py::object ForwardExecutor::RunOpWithBackendPolicy(MsBackendPolicy backend_polic
switch (backend_policy) {
case kMsBackendVmOnly: {
// use vm only
MS_LOG(INFO) << "RunOp use VM only backend";
MS_LOG(DEBUG) << "RunOp use VM only backend";
result = RunOpInVM(op_exec_info, status);
break;
}
case kMsBackendGePrior: {
#ifdef ENABLE_GE
// use GE first, use vm when GE fails
MS_LOG(INFO) << "RunOp use GE first backend";
MS_LOG(DEBUG) << "RunOp use GE first backend";
result = RunOpInGE(op_exec_info, status);
if (*status != PYNATIVE_SUCCESS) {
result = RunOpInVM(op_exec_info, status);
@@ -1577,7 +1577,7 @@ py::object ForwardExecutor::RunOpWithBackendPolicy(MsBackendPolicy backend_polic
}
case kMsBackendMsPrior: {
// use Ms first,use others when ms failed
MS_LOG(INFO) << "RunOp use Ms first backend";
MS_LOG(DEBUG) << "RunOp use Ms first backend";
result = RunOpInMs(op_exec_info, status);
if (*status != PYNATIVE_SUCCESS) {
MS_LOG(ERROR) << "RunOp use Ms backend failed!!!";
@@ -1591,7 +1591,7 @@ py::object ForwardExecutor::RunOpWithBackendPolicy(MsBackendPolicy backend_polic
}

py::object ForwardExecutor::RunOpInVM(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *status) {
MS_LOG(INFO) << "RunOpInVM start";
MS_LOG(DEBUG) << "RunOpInVM start";
MS_EXCEPTION_IF_NULL(status);
MS_EXCEPTION_IF_NULL(op_exec_info);
MS_EXCEPTION_IF_NULL(op_exec_info->py_primitive);
@@ -1617,14 +1617,14 @@ py::object ForwardExecutor::RunOpInVM(const OpExecInfoPtr &op_exec_info, Pynativ
}
}
*status = PYNATIVE_SUCCESS;
MS_LOG(INFO) << "RunOpInVM end";
MS_LOG(DEBUG) << "RunOpInVM end";
return std::move(result);
}

auto primitive = op_exec_info->py_primitive;
MS_EXCEPTION_IF_NULL(primitive);
auto result = primitive->RunPyComputeFunction(op_inputs);
MS_LOG(INFO) << "RunOpInVM end";
MS_LOG(DEBUG) << "RunOpInVM end";
if (py::isinstance<py::none>(result)) {
MS_LOG(ERROR) << "VM got the result none, please check whether it is failed to get func";
*status = PYNATIVE_OP_NOT_IMPLEMENTED_ERR;
@@ -1642,7 +1642,7 @@ py::object ForwardExecutor::RunOpInVM(const OpExecInfoPtr &op_exec_info, Pynativ
py::object ForwardExecutor::RunOpInMs(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *status) {
MS_EXCEPTION_IF_NULL(op_exec_info);
MS_EXCEPTION_IF_NULL(status);
MS_LOG(INFO) << "Start run op [" << op_exec_info->op_name << "] with backend policy ms";
MS_LOG(DEBUG) << "Start run op [" << op_exec_info->op_name << "] with backend policy ms";
auto ms_context = MsContext::GetInstance();
ms_context->set_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER, true);

@@ -1696,7 +1696,7 @@ py::object ForwardExecutor::RunOpInMs(const OpExecInfoPtr &op_exec_info, Pynativ
auto result = BaseRefToPyData(outputs);
ms_context->set_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER, false);
*status = PYNATIVE_SUCCESS;
MS_LOG(INFO) << "End run op [" << op_exec_info->op_name << "] with backend policy ms";
MS_LOG(DEBUG) << "End run op [" << op_exec_info->op_name << "] with backend policy ms";
return result;
}



+ 2
- 2
mindspore/ccsrc/runtime/device/kernel_runtime.cc View File

@@ -717,7 +717,7 @@ void KernelRuntime::AssignValueNodeTensor(const ValueNodePtr &value_node, const
void KernelRuntime::AssignStaticMemoryValueNode(session::KernelGraph *graph) {
MS_EXCEPTION_IF_NULL(graph);
MS_EXCEPTION_IF_NULL(mem_manager_);
MS_LOG(INFO) << "AssignStaticMemoryValueNode start";
MS_LOG(DEBUG) << "AssignStaticMemoryValueNode start";
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
// order the value nodes
@@ -757,7 +757,7 @@ void KernelRuntime::AssignStaticMemoryValueNode(session::KernelGraph *graph) {
}
}
}
MS_LOG(INFO) << "AssignStaticMemoryValueNode end";
MS_LOG(DEBUG) << "AssignStaticMemoryValueNode end";
}

void KernelRuntime::AssignDynamicMemory(session::KernelGraph *graph) {


+ 1
- 1
mindspore/core/ir/primitive.h View File

@@ -85,7 +85,7 @@ class Primitive : public Named {
const std::unordered_map<std::string, ValuePtr> &evaluate_added_attrs() const { return evaluate_added_attrs_; }
void set_evaluate_added_attrs(const std::unordered_map<std::string, ValuePtr> &attrs) {
for (auto &attr : attrs) {
MS_LOG(INFO) << " set evalu attrl " << name() << attr.first;
MS_LOG(DEBUG) << " set evalu attrl " << name() << attr.first;
attrs_[attr.first] = attr.second;
}
}


Loading…
Cancel
Save