From: @Margaret_wangrui Reviewed-by: @ginfung,@zh_qh Signed-off-by: @zh_qhpull/15696/MERGE
| @@ -29,7 +29,7 @@ int64_t FusionIdAllocator::AllocateFusionId() { | |||||
| return fusion_id; | return fusion_id; | ||||
| } | } | ||||
| bool FusionIdAllocator::HasFusionIdAttr(const AnfNodePtr &node) { | |||||
| bool FusionIdAllocator::HasFusionIdAttr(const AnfNodePtr &node) const { | |||||
| MS_EXCEPTION_IF_NULL(node); | MS_EXCEPTION_IF_NULL(node); | ||||
| if (!node->isa<CNode>()) { | if (!node->isa<CNode>()) { | ||||
| return false; | return false; | ||||
| @@ -30,7 +30,7 @@ class FusionIdAllocator { | |||||
| void Init(); | void Init(); | ||||
| int64_t AllocateFusionId(); | int64_t AllocateFusionId(); | ||||
| bool HasFusionIdAttr(const AnfNodePtr &node); | |||||
| bool HasFusionIdAttr(const AnfNodePtr &node) const; | |||||
| int64_t GetFusionId(const AnfNodePtr &node); | int64_t GetFusionId(const AnfNodePtr &node); | ||||
| void SetFusionId(const AnfNodePtr &node, int64_t id); | void SetFusionId(const AnfNodePtr &node, int64_t id); | ||||
| @@ -1468,7 +1468,7 @@ class ExecuteOrderGenerator { | |||||
| return input; | return input; | ||||
| } | } | ||||
| void RemoveSameInputsAssigns(std::vector<CNodePtr> *exec_order) { | |||||
| void RemoveSameInputsAssigns(std::vector<CNodePtr> *exec_order) const { | |||||
| for (auto iter = exec_order->begin(); iter != exec_order->end();) { | for (auto iter = exec_order->begin(); iter != exec_order->end();) { | ||||
| auto &node = *iter; | auto &node = *iter; | ||||
| auto &inputs = node->inputs(); | auto &inputs = node->inputs(); | ||||
| @@ -704,7 +704,7 @@ AnfNodePtr SessionBasic::CreateNewParameterFromCNode(const AnfNodePtr &anf, Kern | |||||
| return CreateParameterFromTuple(anf, graph); | return CreateParameterFromTuple(anf, graph); | ||||
| } | } | ||||
| void SessionBasic::GetCNodeInfo(const CNodePtr &cnode, std::vector<AnfNodePtr> *cnode_inputs) { | |||||
| void SessionBasic::GetCNodeInfo(const CNodePtr &cnode, std::vector<AnfNodePtr> *cnode_inputs) const { | |||||
| MS_EXCEPTION_IF_NULL(cnode); | MS_EXCEPTION_IF_NULL(cnode); | ||||
| MS_EXCEPTION_IF_NULL(cnode_inputs); | MS_EXCEPTION_IF_NULL(cnode_inputs); | ||||
| auto prim = AnfAlgo::GetCNodePrimitive(cnode); | auto prim = AnfAlgo::GetCNodePrimitive(cnode); | ||||
| @@ -151,7 +151,7 @@ class SessionBasic : public std::enable_shared_from_this<SessionBasic> { | |||||
| std::vector<AnfNodePtr> CreateValueNode(const CNodePtr &cnode, KernelGraph *graph); | std::vector<AnfNodePtr> CreateValueNode(const CNodePtr &cnode, KernelGraph *graph); | ||||
| void CreateCNodeInputs(const CNodePtr &cnode, KernelGraph *graph, std::vector<AnfNodePtr> *cnode_inputs); | void CreateCNodeInputs(const CNodePtr &cnode, KernelGraph *graph, std::vector<AnfNodePtr> *cnode_inputs); | ||||
| std::vector<AnfNodePtr> CreateCallSwitchInputs(const CNodePtr &cnode, KernelGraph *graph); | std::vector<AnfNodePtr> CreateCallSwitchInputs(const CNodePtr &cnode, KernelGraph *graph); | ||||
| void GetCNodeInfo(const CNodePtr &cnode, std::vector<AnfNodePtr> *cnode_inputs); | |||||
| void GetCNodeInfo(const CNodePtr &cnode, std::vector<AnfNodePtr> *cnode_inputs) const; | |||||
| void GetNewCNodeInputs(const CNodePtr &cnode, KernelGraph *graph, std::vector<AnfNodePtr> *cnode_inputs, | void GetNewCNodeInputs(const CNodePtr &cnode, KernelGraph *graph, std::vector<AnfNodePtr> *cnode_inputs, | ||||
| std::unordered_map<AnfNodePtr, AnfNodePtr> *other_graph_cnode); | std::unordered_map<AnfNodePtr, AnfNodePtr> *other_graph_cnode); | ||||
| std::vector<AnfNodePtr> CreateCallSwitchLayerInputs(const CNodePtr &cnode, KernelGraph *graph); | std::vector<AnfNodePtr> CreateCallSwitchLayerInputs(const CNodePtr &cnode, KernelGraph *graph); | ||||
| @@ -28,6 +28,5 @@ class LoadEliminater : public AnfVisitor { | |||||
| public: | public: | ||||
| AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override; | AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override; | ||||
| }; | }; | ||||
| } // namespace mindspore::opt::irpass | } // namespace mindspore::opt::irpass | ||||
| #endif // MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_LOAD_ELIMINATE_H_ | #endif // MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_LOAD_ELIMINATE_H_ | ||||
| @@ -67,7 +67,7 @@ AnfNodePtr AddMonadParameter(const FuncGraphPtr &func_graph, const std::string & | |||||
| // If io monad parameter added before u monad parameter, should insert u monad before io monad in parameters | // If io monad parameter added before u monad parameter, should insert u monad before io monad in parameters | ||||
| if (io_monad_location != params_size && abs->isa<abstract::AbstractUMonad>()) { | if (io_monad_location != params_size && abs->isa<abstract::AbstractUMonad>()) { | ||||
| std::vector<AnfNodePtr> params = func_graph->parameters(); | std::vector<AnfNodePtr> params = func_graph->parameters(); | ||||
| (void)params.insert(params.begin() + io_monad_location, para); | |||||
| (void)params.insert(params.begin() + SizeToInt(io_monad_location), para); | |||||
| func_graph->set_parameters(params); | func_graph->set_parameters(params); | ||||
| } else { | } else { | ||||
| func_graph->add_parameter(para); | func_graph->add_parameter(para); | ||||
| @@ -80,7 +80,7 @@ void KernelRuntime::RunOpAssignMemory(const std::vector<tensor::TensorPtr> &inpu | |||||
| UpdateRefNodeOutputMem(graph); | UpdateRefNodeOutputMem(graph); | ||||
| } | } | ||||
| void KernelRuntime::RunOpClearMemory(const session::KernelGraph *graph) { | |||||
| void KernelRuntime::RunOpClearMemory(const session::KernelGraph *graph) const { | |||||
| MS_EXCEPTION_IF_NULL(graph); | MS_EXCEPTION_IF_NULL(graph); | ||||
| // clear input parameter memory resource | // clear input parameter memory resource | ||||
| for (const auto &input_node : graph->inputs()) { | for (const auto &input_node : graph->inputs()) { | ||||
| @@ -54,7 +54,7 @@ class KernelRuntime { | |||||
| virtual bool Init() = 0; | virtual bool Init() = 0; | ||||
| virtual void AssignMemory(session::KernelGraph *graph); | virtual void AssignMemory(session::KernelGraph *graph); | ||||
| void RunOpAssignMemory(const std::vector<tensor::TensorPtr> &input_tensors, session::KernelGraph *graph); | void RunOpAssignMemory(const std::vector<tensor::TensorPtr> &input_tensors, session::KernelGraph *graph); | ||||
| void RunOpClearMemory(const session::KernelGraph *graph); | |||||
| void RunOpClearMemory(const session::KernelGraph *graph) const; | |||||
| static bool DumpDataEnabled(); | static bool DumpDataEnabled(); | ||||
| static bool DumpDataEnabledIteration(); | static bool DumpDataEnabledIteration(); | ||||
| virtual bool LoadData(session::KernelGraph *graph); | virtual bool LoadData(session::KernelGraph *graph); | ||||