You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

kernel_graph.h 19 kB

adapte to remove inline merge me commit for remove inline deal witch multiple cases of switch in ConstructKernelGraph deal with switch and call cases in ConstructKernelGraph fix bug and rebase master ConstructKernelGraph adapte to remove inline fix InsertMultipleAssignToGraph bug add graph input to new graph which is created for switch input replace CreateNewParameterFromCNode to NewParameter in order to set new parameter's abstract and kernel_info avoids create a new switch repeatedly when the cnode is a call switch without real input null pointer check update frontend code Revert "update frontend code" This reverts commit ce1f600d1e9b4b47d9b81122f981bbbe505dd250. update frontend code PR_2948 fix bug of CheckLabalIndex handle switch_layer in ConstructKernelGraph add attr for assign node to avoid erasing by cse pass cherry-pick ms commit[59b35f690ddcc94ff35a4f4eaf3816121b32235b]:temporary avoid list getitem problem rebase master Revert "cherry-pick ms commit[59b35f690ddcc94ff35a4f4eaf3816121b32235b]:temporary avoid list getitem problem" This reverts commit 74c258f94260ca0769a1ef69c6ef8e831c301dbf. Revert "handle switch_layer in ConstructKernelGraph" This reverts commit cb5367f02d69facbca8d39e9234c501608aee27f. Revert "update frontend code PR_2948" This reverts commit 234ac583400a96a8ddd641f7a722e1ccd5e056c6. Revert "merge me commit for remove inline" This reverts commit 55c0ebd42b6699c7686f5ce585e745f87dd42280. fix diff after rebase master doing remove inline in me overwrite FindNodePrimitive Revert "doing remove inline in me" This reverts commit b42e893125bc624d323e855ac6ae615333c06e65.
5 years ago
6 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. /**
  2. * Copyright 2019-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_BACKEND_SESSION_KERNEL_GRAPH_H
  17. #define MINDSPORE_CCSRC_BACKEND_SESSION_KERNEL_GRAPH_H
  18. #include <vector>
  19. #include <memory>
  20. #include <utility>
  21. #include <string>
  22. #include <queue>
  23. #include <map>
  24. #include <unordered_map>
  25. #include <set>
  26. #include <unordered_set>
  27. #include <stack>
  28. #include <atomic>
  29. #include "ir/func_graph.h"
  30. #include "ir/anf.h"
  31. #include "ir/graph_utils.h"
  32. #include "utils/contract.h"
  33. #include "runtime/device/kernel_info.h"
  34. namespace mindspore {
  35. namespace session {
  36. using AnfWithOutIndex = std::pair<AnfNodePtr, size_t>;
  37. class KernelGraph : public FuncGraph {
  38. public:
  39. KernelGraph() : graph_id_(0), start_label_(nullptr), end_goto_(nullptr), current_epoch_(0), is_dynamic_shape_(false) {
  40. inputs_ = std::make_shared<std::vector<AnfNodePtr>>();
  41. execution_order_ = {};
  42. mem_reuse_exec_order_ = {};
  43. executable_ = true;
  44. summary_node_exist_ = false;
  45. stream_distinction_label_ = kInvalidDistincLabel;
  46. }
  47. KernelGraph(const KernelGraph &graph) : FuncGraph(graph) {
  48. inputs_ = graph.inputs_;
  49. child_graph_result_ = graph.child_graph_result_;
  50. execution_order_ = graph.execution_order_;
  51. mem_reuse_exec_order_ = graph.mem_reuse_exec_order_;
  52. graph_id_ = graph.graph_id_;
  53. stream_distinction_label_ = graph.stream_distinction_label_;
  54. front_backend_anf_map_ = graph.front_backend_anf_map_;
  55. backend_front_anf_map_ = graph.backend_front_anf_map_;
  56. tensor_to_value_node_map_ = graph.tensor_to_value_node_map_;
  57. graph_value_nodes_ = graph.graph_value_nodes_;
  58. node_input_num_ = graph.node_input_num_;
  59. node_input_edges_ = graph.node_input_edges_;
  60. ref_out_in_map_ = graph.ref_out_in_map_;
  61. node_output_edges_ = graph.node_output_edges_;
  62. summary_nodes_ = graph.summary_nodes_;
  63. executable_ = graph.executable_;
  64. summary_node_exist_ = graph.summary_node_exist_;
  65. valid_inputs_ = graph.valid_inputs_;
  66. child_graph_order_ = graph.child_graph_order_;
  67. input_ctrl_tensors_ = graph.input_ctrl_tensors_;
  68. parent_graph_ = graph.parent_graph_;
  69. start_label_ = graph.start_label_;
  70. end_goto_ = graph.end_goto_;
  71. front_to_internal_outputs_map_ = graph.front_to_internal_outputs_map_;
  72. internal_outputs_to_front_map_ = graph.internal_outputs_to_front_map_;
  73. internal_outputs_tensor_map_ = graph.internal_outputs_tensor_map_;
  74. current_epoch_ = graph.current_epoch_;
  75. tuple_parameter_to_make_tuple_map_ = graph.tuple_parameter_to_make_tuple_map_;
  76. visited_nodes_ = graph.visited_nodes_;
  77. edge_to_ = graph.edge_to_;
  78. loop_nodes_ = graph.loop_nodes_;
  79. input_nodes_ = graph.input_nodes_;
  80. pre_graphs_ = graph.pre_graphs_;
  81. post_graphs_ = graph.post_graphs_;
  82. size_t pre_graph_finished_count = graph.pre_graph_finished_count_;
  83. pre_graph_finished_count_ = pre_graph_finished_count;
  84. size_t post_graph_finished_count = graph.post_graph_finished_count_;
  85. post_graph_finished_count_ = post_graph_finished_count;
  86. first_step_ = graph.first_step_;
  87. has_optimizer_ = graph.has_optimizer_;
  88. is_dynamic_shape_ = graph.is_dynamic_shape_;
  89. }
  90. ~KernelGraph() override;
  91. MS_DECLARE_PARENT(KernelGraph, FuncGraph);
  92. const std::vector<AnfNodePtr> &inputs() const;
  93. std::vector<AnfNodePtr> *MutableInputs() const { return inputs_.get(); }
  94. void ReplaceGraphInput(const AnfNodePtr &old_parameter, const AnfNodePtr &new_parameter);
  95. std::vector<AnfNodePtr> outputs() const;
  96. CNodePtr NewCNode(const std::vector<AnfNodePtr> &inputs) override;
  97. void CreateKernelInfoFromNewParameter(const CNodePtr &cnode);
  98. CNodePtr NewCNode(const CNodePtr &cnode);
  99. void ResetAssignInputFeaatureMapFlag(const CNodePtr &cnode) const;
  100. ParameterPtr NewParameter(const ParameterPtr &parameter = nullptr);
  101. ParameterPtr NewParameter(const abstract::AbstractBasePtr &abstract);
  102. ValueNodePtr NewValueNode(const AbstractBasePtr &abstract, const ValuePtr &value);
  103. ValueNodePtr NewValueNode(const ValueNodePtr &value_node = nullptr);
  104. // trans tuple output to maketuple + no_tuple out
  105. AnfNodePtr TransTupleToMakeTuple(const AnfNodePtr &node);
  106. void set_execution_order(const std::vector<CNodePtr> &order) { execution_order_ = order; }
  107. void set_execution_order(std::vector<CNodePtr> &&order) { execution_order_ = std::move(order); }
  108. const std::vector<CNodePtr> &execution_order() const { return execution_order_; }
  109. // Set new exec_order for mem_reuse
  110. void set_mem_reuse_exec_order(const std::vector<CNodePtr> &order) { mem_reuse_exec_order_ = order; }
  111. const std::vector<CNodePtr> &mem_reuse_exec_order() const { return mem_reuse_exec_order_; }
  112. void SetExecOrderByDefault();
  113. uint32_t graph_id() const { return graph_id_; }
  114. void set_graph_id(uint32_t graph_id) { graph_id_ = graph_id; }
  115. // and a new front to backend anf relation to maop
  116. void FrontBackendlMapAdd(const AnfNodePtr &front_anf, const AnfNodePtr &backend_anf);
  117. // replace old backend anf with new backend anf
  118. void FrontBackendlMapUpdate(const AnfNodePtr &old_backend_anf, const AnfNodePtr &new_backend_anf);
  119. // get backend anf by front anf
  120. AnfNodePtr GetBackendAnfByFrontAnf(const AnfNodePtr &front_anf);
  121. // get front anf by backend anf
  122. AnfNodePtr GetFrontAnfByBackendAnf(const AnfNodePtr &backend_anf);
  123. // check backend node whether exist in map
  124. bool BackendNodeExistInFrontBackendMap(const AnfNodePtr &backend_anf);
  125. // get value node by tensor
  126. ValueNodePtr GetValueNodeByTensor(const tensor::TensorPtr &tensor);
  127. // add value node tensor relation map
  128. void TensorValueNodeMapAdd(const tensor::TensorPtr &tensor, const ValueNodePtr &value_node);
  129. // get all value nodes of graph
  130. const std::unordered_set<ValueNodePtr> graph_value_nodes() const { return graph_value_nodes_; }
  131. // add value node to graph
  132. void AddValueNodeToGraph(const ValueNodePtr &value_node);
  133. // ref output is in map
  134. bool IsInRefOutputMap(const AnfWithOutIndex &pair) const;
  135. // get ref correspond pairs
  136. AnfWithOutIndex GetRefCorrespondOutput(const AnfWithOutIndex &out_pair) const;
  137. // add ref correspond pairs
  138. void AddRefCorrespondPairs(const AnfWithOutIndex &final_pair, const AnfWithOutIndex &origin_pair);
  139. // get map
  140. std::map<AnfWithOutIndex, AnfWithOutIndex> GetRefMap() const { return ref_out_in_map_; }
  141. // check whether graph is executable
  142. bool executable() const { return executable_; }
  143. // set executable of graph
  144. void set_executable(bool executable) { executable_ = executable; }
  145. // set summary_node of graph
  146. void set_summary_node_exist(bool summary_node_exist) { summary_node_exist_ = summary_node_exist; }
  147. // check whether exist summary node in graph
  148. bool summary_node_exist() const { return summary_node_exist_; }
  149. // set invalid inputs for control sink
  150. std::vector<bool> *MutableValidInputs() { return &valid_inputs_; }
  151. std::vector<bool> valid_inputs() const { return valid_inputs_; }
  152. // replace node in graph
  153. void ReplaceNode(NotNull<AnfNodePtr> old_anf_node, NotNull<AnfNodePtr> new_anf_node);
  154. // set stream label of graph
  155. void set_stream_distinction_label(uint32_t stream_label) { stream_distinction_label_ = stream_label; }
  156. // get stream label of graph
  157. uint32_t stream_distinction_label() { return stream_distinction_label_; }
  158. // refresh execute kernel stream label
  159. void UpdateExecuteKernelStreamLabel();
  160. // calculate the leaf graph order of root graph
  161. std::vector<std::shared_ptr<KernelGraph>> GetLeafGraphOrder();
  162. // the child graph of current graph
  163. const std::vector<std::weak_ptr<KernelGraph>> &child_graph_order() const { return child_graph_order_; }
  164. void set_child_graph_order(const std::vector<std::weak_ptr<KernelGraph>> &order) { child_graph_order_ = order; }
  165. // checkout whether current graph is leaf graph
  166. bool IsLeafGraph() const;
  167. // set input_tensors pointer of control parameter
  168. void set_input_ctrl_tensors(const std::shared_ptr<std::vector<tensor::TensorPtr>> &input_tensors_ptr) {
  169. input_ctrl_tensors_ = input_tensors_ptr;
  170. }
  171. // get input_tensors pointer of control parameter
  172. std::shared_ptr<std::vector<tensor::TensorPtr>> input_ctrl_tensors() const { return input_ctrl_tensors_; }
  173. // get parent kernel graph
  174. std::weak_ptr<KernelGraph> parent_graph() const { return parent_graph_; }
  175. // set parent kernel graph
  176. void set_parent_graph(const std::weak_ptr<KernelGraph> &parent_graph) { parent_graph_ = parent_graph; }
  177. // find anf node in graph
  178. std::vector<CNodePtr> FindNodeByPrimitive(const PrimitivePtr &primitive) const;
  179. std::vector<CNodePtr> FindNodeByPrimitive(const std::vector<PrimitivePtr> &primitive_list) const;
  180. // used to dump ir
  181. std::string ToString() const override;
  182. void set_start_label(const CNodePtr &start_label) { start_label_ = start_label; }
  183. CNodePtr get_start_label() { return start_label_; }
  184. void set_end_goto(const CNodePtr &end_goto) { end_goto_ = end_goto; }
  185. CNodePtr get_end_goto() { return end_goto_; }
  186. void PrintGraphExecuteOrder() const;
  187. const std::map<std::string, std::pair<AnfNodePtr, int>> &summary_nodes() const { return summary_nodes_; }
  188. void set_summary_nodes(const std::map<std::string, std::pair<AnfNodePtr, int>> &nodes) { summary_nodes_ = nodes; }
  189. void AddInternalOutput(const AnfNodePtr &front_node, const AnfNodePtr &node, int output_idx = 0,
  190. bool unique_target = false);
  191. void ReplaceInternalOutput(const AnfNodePtr &node, const AnfNodePtr &new_node, int src_output_idx = -1,
  192. int dst_output_idx = -1);
  193. AnfNodePtr GetInternalOutputByFrontNode(const AnfNodePtr &front_node) const;
  194. bool IsInternalOutput(const AnfNodePtr &node, int output_idx = -1) const;
  195. bool IsUniqueTargetInternalOutput(const AnfNodePtr &node, int output_idx) const;
  196. void AddInternalOutputTensor(const AnfNodePtr &node, int output_idx, const tensor::TensorPtr &tensor);
  197. tensor::TensorPtr GetInternalOutputTensor(const AnfNodePtr &node, int output_idx);
  198. uint32_t current_epoch() const { return current_epoch_; }
  199. void set_current_epoch(uint32_t epoch) { current_epoch_ = epoch; }
  200. void UpdateChildGraphOrder();
  201. const std::vector<AnfNodePtr> &child_graph_result() const { return child_graph_result_; }
  202. void AddChildGraphResult(const AnfNodePtr &parameter) { child_graph_result_.push_back(parameter); }
  203. void set_child_graph_result(const std::vector<AnfNodePtr> &child_graph_result) {
  204. child_graph_result_ = child_graph_result;
  205. }
  206. void InsertTupleParameterToMakeTupleMap(const AnfNodePtr &param, const AnfNodePtr &make_tuple) {
  207. if (tuple_parameter_to_make_tuple_map_.find(param) != tuple_parameter_to_make_tuple_map_.end()) {
  208. return;
  209. }
  210. tuple_parameter_to_make_tuple_map_[param] = make_tuple;
  211. }
  212. AnfNodePtr FindTupleParameterToMakeTupleMap(const AnfNodePtr &param) {
  213. if (tuple_parameter_to_make_tuple_map_.find(param) != tuple_parameter_to_make_tuple_map_.end()) {
  214. return tuple_parameter_to_make_tuple_map_[param];
  215. } else {
  216. return nullptr;
  217. }
  218. }
  219. void RemoveNodeFromGraph(const AnfNodePtr &node);
  220. void UpdateGraphDynamicAttr();
  221. bool is_dynamic_shape() const { return is_dynamic_shape_; }
  222. void SetOptimizerFlag();
  223. void SetInputNodes();
  224. const std::vector<AnfNodePtr> &input_nodes() const { return input_nodes_; }
  225. bool has_optimizer() const { return has_optimizer_; }
  226. // handle graph dependency
  227. void AddPreGraph(const std::shared_ptr<session::KernelGraph> &graph) {
  228. if (graph != nullptr) {
  229. pre_graphs_[graph->graph_id()] = graph;
  230. }
  231. }
  232. void AddPostGraph(const std::shared_ptr<session::KernelGraph> &graph) {
  233. if (graph != nullptr) {
  234. post_graphs_[graph->graph_id()] = graph;
  235. }
  236. }
  237. bool IsPreGraphFinished() { return pre_graphs_.size() == pre_graph_finished_count_; }
  238. bool IsPostGraphFinished() {
  239. if (first_step_) {
  240. return true;
  241. }
  242. return post_graphs_.size() == post_graph_finished_count_;
  243. }
  244. void IncPreGraphFinishedCount() { pre_graph_finished_count_++; }
  245. void IncPostGraphFinishedCount() { post_graph_finished_count_++; }
  246. void ResetGraphRunningStatus() {
  247. first_step_ = false;
  248. post_graph_finished_count_ = 0;
  249. pre_graph_finished_count_ = 0;
  250. }
  251. void OnRunGraphFinished() {
  252. for (auto post_graph : post_graphs_) {
  253. auto post_graph_ptr = post_graph.second.lock();
  254. if (post_graph_ptr != nullptr) {
  255. post_graph_ptr->IncPreGraphFinishedCount();
  256. }
  257. }
  258. for (auto pre_graph : pre_graphs_) {
  259. auto pre_graph_ptr = pre_graph.second.lock();
  260. if (pre_graph_ptr != nullptr) {
  261. pre_graph_ptr->IncPostGraphFinishedCount();
  262. }
  263. }
  264. }
  265. // end of handle graph dependency
  266. uint32_t label_num() const { return label_num_; }
  267. void set_label_num(uint32_t num) { label_num_ = num; }
  268. // The graphs has recursion.
  269. bool recursive_call() const { return has_recursive_call_; }
  270. // The graphs has subgraph multi-call.
  271. bool subgraph_multi_call() const { return has_subgraph_multicall_; }
  272. // set flag to indicate whether has recursion.
  273. void set_recursive_call(bool flag) { has_recursive_call_ = flag; }
  274. // set flag to indicate whether has multi-call.
  275. void set_subgraph_multi_call(bool flag) { has_subgraph_multicall_ = flag; }
  276. bool is_all_nop_node() const { return is_all_nop_node_; }
  277. void set_is_all_nop_node(bool is_all_nop_node) { is_all_nop_node_ = is_all_nop_node; }
  278. private:
  279. // remove value node form graph
  280. bool RemoveValueNodeFromGraph(const ValueNodePtr &value_node);
  281. void SetKernelInfoForNode(const AnfNodePtr &node) const;
  282. AnfNodePtr MakeValueNode(const AnfNodePtr &node) const;
  283. void EnqueueActiveNodes(const AnfNodePtr &node, std::queue<AnfNodePtr> *visit_queue,
  284. std::unordered_set<AnfNodePtr> *visited_nodes, bool comm_first = true);
  285. // update node edge list
  286. void UpdateNodeEdgeList(std::queue<AnfNodePtr> *seed_nodes);
  287. // add node depend edge by data edge
  288. void AddDependEdge(const AnfNodePtr &node, const AnfNodePtr &input, size_t depend_edge_num);
  289. void UpdateNodeInputOutputEdges(const std::vector<AnfNodePtr> &real_prior_nodes,
  290. const std::vector<AnfNodePtr> &real_depend_nodes);
  291. std::vector<AnfNodePtr> GetOutputNodes(const AnfNodePtr &node);
  292. AnfNodePtr TransValueNodeTuple(const AbstractBasePtr abstract, const ValuePtr &value);
  293. AnfNodePtr TransParameterTuple(const AbstractBasePtr &abstract);
  294. AnfNodePtr TransCNodeTuple(const CNodePtr &node);
  295. AnfNodePtr CreatTupleGetItemNode(const AnfNodePtr &node, size_t output_idx);
  296. std::vector<CNodePtr> SortStartLabelAndEndGoto();
  297. // checkout whether loop exist in graph
  298. void CheckLoop();
  299. uint32_t GetLoopNum(std::map<AnfNodePtr, size_t> none_zero_nodes);
  300. void GetLoopNodesByDFS(const AnfNodePtr &node, uint32_t *loop_num);
  301. // members
  302. std::shared_ptr<std::vector<AnfNodePtr>> inputs_;
  303. std::vector<AnfNodePtr> child_graph_result_;
  304. std::vector<CNodePtr> execution_order_;
  305. std::vector<CNodePtr> mem_reuse_exec_order_;
  306. uint32_t graph_id_;
  307. uint32_t stream_distinction_label_;
  308. // record map bettween front anf and backend anf,use two map implement bidirectional map
  309. std::unordered_map<AnfNodePtr, AnfNodePtr> front_backend_anf_map_;
  310. std::unordered_map<AnfNodePtr, AnfNodePtr> backend_front_anf_map_;
  311. // there may be a tensor from ME backend ,a value ndoe will be create according the tensor,map record
  312. std::unordered_map<tensor::TensorPtr, ValueNodePtr> tensor_to_value_node_map_;
  313. // include all value nodes
  314. std::unordered_set<ValueNodePtr> graph_value_nodes_;
  315. std::unordered_map<AnfNodePtr, size_t> node_input_num_;
  316. std::unordered_map<AnfNodePtr, std::vector<std::pair<AnfNodePtr, size_t>>> node_input_edges_;
  317. // record map between ref final output anf with index and ref origin input with index
  318. std::map<AnfWithOutIndex, AnfWithOutIndex> ref_out_in_map_;
  319. std::unordered_map<AnfNodePtr, std::vector<std::pair<AnfNodePtr, size_t>>> node_output_edges_;
  320. std::map<std::string, std::pair<AnfNodePtr, int>> summary_nodes_;
  321. // graph needn't execute
  322. bool executable_{false};
  323. // exist summary node in graph
  324. bool summary_node_exist_{false};
  325. // valid inputs
  326. std::vector<bool> valid_inputs_;
  327. // child graph execute order in parent graph
  328. std::vector<std::weak_ptr<KernelGraph>> child_graph_order_;
  329. // input_tensors of control parameter
  330. std::shared_ptr<std::vector<tensor::TensorPtr>> input_ctrl_tensors_;
  331. // parameter graph
  332. std::weak_ptr<KernelGraph> parent_graph_;
  333. CNodePtr start_label_;
  334. CNodePtr end_goto_;
  335. std::unordered_map<AnfNodePtr, AnfNodePtr> front_to_internal_outputs_map_;
  336. std::unordered_map<AnfNodePtr, std::unordered_map<int, std::pair<AnfNodePtr, bool>>> internal_outputs_to_front_map_;
  337. std::unordered_map<AnfNodePtr, std::unordered_map<int, tensor::TensorPtr>> internal_outputs_tensor_map_;
  338. uint32_t current_epoch_;
  339. std::unordered_map<AnfNodePtr, AnfNodePtr> tuple_parameter_to_make_tuple_map_;
  340. std::set<AnfNodePtr> visited_nodes_;
  341. std::map<AnfNodePtr, AnfNodePtr> edge_to_;
  342. std::stack<AnfNodePtr> loop_nodes_;
  343. std::vector<AnfNodePtr> input_nodes_;
  344. std::unordered_map<uint32_t, std::weak_ptr<session::KernelGraph>> pre_graphs_;
  345. std::unordered_map<uint32_t, std::weak_ptr<session::KernelGraph>> post_graphs_;
  346. std::atomic<size_t> pre_graph_finished_count_{0};
  347. std::atomic<size_t> post_graph_finished_count_{0};
  348. bool first_step_{true};
  349. bool has_optimizer_{false};
  350. bool is_dynamic_shape_{false};
  351. // Indicate the graphs has recursion or multi-call or not as the root graph.
  352. bool has_recursive_call_{false};
  353. bool has_subgraph_multicall_{false};
  354. // Number of labels. This is also the 'batch_num' for DavinciModel,
  355. // It should be 1 if no labels used for control flow.
  356. uint32_t label_num_ = 1;
  357. // If all the nodes of graph is the nop node.
  358. bool is_all_nop_node_{false};
  359. };
  360. } // namespace session
  361. using KernelGraphPtr = std::shared_ptr<session::KernelGraph>;
  362. } // namespace mindspore
  363. #endif // MINDSPORE_CCSRC_BACKEND_SESSION_KERNEL_GRAPH_H