diff --git a/mindspore/ccsrc/pynative/base.h b/mindspore/ccsrc/pynative/base.h index 37ff000b04..fc143da3c1 100644 --- a/mindspore/ccsrc/pynative/base.h +++ b/mindspore/ccsrc/pynative/base.h @@ -31,7 +31,6 @@ namespace mindspore { namespace pynative { - namespace py = pybind11; enum PynativeStatusCode { @@ -61,7 +60,6 @@ using OpExecInfoPtr = std::shared_ptr; OpExecInfoPtr GenerateOpExecInfo(const py::args &args); const std::set ignore_infer_prim = {"partial", "make_ref"}; - } // namespace pynative } // namespace mindspore diff --git a/mindspore/ccsrc/pynative/pynative_execute_ge.cc b/mindspore/ccsrc/pynative/pynative_execute_ge.cc index 0bf2a391f9..7357bdd710 100644 --- a/mindspore/ccsrc/pynative/pynative_execute_ge.cc +++ b/mindspore/ccsrc/pynative/pynative_execute_ge.cc @@ -33,7 +33,6 @@ const char SINGLE_OP_GRAPH[] = "single_op_graph"; namespace mindspore { namespace pynative { - using MeTensor = mindspore::tensor::Tensor; using MeTensorPtr = mindspore::tensor::TensorPtr; using GeOperator = ge::Operator; @@ -307,5 +306,4 @@ py::object RunOpInGE(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *stat return std::move(result); } } // namespace pynative - } // namespace mindspore diff --git a/mindspore/ccsrc/session/session_basic.cc b/mindspore/ccsrc/session/session_basic.cc index 0ef0ad97ea..b2daa07d71 100755 --- a/mindspore/ccsrc/session/session_basic.cc +++ b/mindspore/ccsrc/session/session_basic.cc @@ -226,8 +226,7 @@ void ConvertValueTupleToTensor(const py::object &input_object, std::vectorcast(); MS_EXCEPTION_IF_NULL(value_tuple); - tensor::TensorPtr tensor_ptr = nullptr; - tensor_ptr = opt::CreateTupleTensor(value_tuple); + tensor::TensorPtr tensor_ptr = opt::CreateTupleTensor(value_tuple); MS_EXCEPTION_IF_NULL(tensor_ptr); input_tensor->push_back(tensor_ptr); } @@ -583,12 +582,9 @@ void SessionBasic::LoadInputData(const std::shared_ptr &kernel_grap MS_EXCEPTION_IF_NULL(kernel_graph); auto input_nodes = kernel_graph->inputs(); if ((inputs.size() + input_ctrl_size) - 1 != input_nodes.size()) { - MS_LOG(EXCEPTION) << "tensor input size:" << inputs.size() - << " is not equal graph inputs size:" << input_nodes.size() + MS_LOG(EXCEPTION) << "tensor input:" << inputs.size() << " is not equal graph inputs:" << input_nodes.size() << ", input_ctrl_size:" << input_ctrl_size; } - auto ms_context = MsContext::GetInstance(); - MS_EXCEPTION_IF_NULL(ms_context); for (size_t i = 0; i < inputs.size(); ++i) { auto tensor = inputs[i]; MS_EXCEPTION_IF_NULL(tensor); @@ -598,7 +594,8 @@ void SessionBasic::LoadInputData(const std::shared_ptr &kernel_grap auto pk_node = input_node->cast(); auto device_address = AnfAlgo::GetMutableOutputAddr(pk_node, 0); bool need_sync = false; - if (ms_context->enable_pynative_infer()) { + MS_EXCEPTION_IF_NULL(MsContext::GetInstance()); + if (MsContext::GetInstance()->enable_pynative_infer()) { if (tensor->device_address().get() == nullptr || tensor->device_address() != device_address) { need_sync = true; }