You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

session_basic.h 5.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_SESSION_SESSION_BASIC_H
  17. #define MINDSPORE_CCSRC_SESSION_SESSION_BASIC_H
  18. #include <vector>
  19. #include <string>
  20. #include <unordered_map>
  21. #include <utility>
  22. #include <memory>
  23. #include <map>
  24. #include "session/session_context.h"
  25. #include "session/kernel_graph.h"
  26. #include "ir/anf.h"
  27. #include "ir/meta_tensor.h"
  28. #include "utils/any.h"
  29. #include "utils/base_ref.h"
  30. #include "pynative/pynative_execute.h"
  31. #include "device/kernel_info.h"
  32. namespace mindspore {
  33. using GraphId = uint32_t;
  34. using GraphInfo = std::string;
  35. namespace session {
  36. void ClearPythonParasMap();
  37. using CallBackFunc = uint32_t (*)(uint32_t graph_id,
  38. const std::map<std::string, mindspore::tensor::TensorPtr> &params_list);
  39. using AnyList = std::vector<Any>;
  40. using AnyListPtr = std::shared_ptr<AnyList>;
  41. using OpRunInfo = pynative::OpExecInfo;
  42. using OpRunInfoPtr = std::shared_ptr<OpRunInfo>;
  43. class SessionBasic {
  44. public:
  45. SessionBasic() : device_id_(0) {
  46. graphs_ = {};
  47. run_op_graphs_ = {};
  48. summary_callback_ = nullptr;
  49. }
  50. virtual void Init(uint32_t device_id) { device_id_ = device_id; }
  51. virtual ~SessionBasic() { summary_callback_ = nullptr; }
  52. virtual GraphId CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) = 0;
  53. // build graph, used to handle multiple child graphs
  54. virtual void BuildGraph(GraphId) {}
  55. virtual void RunGraph(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs, VectorRef *outputs) = 0;
  56. virtual void BuildOp(const OpRunInfo &, const GraphInfo &, const std::vector<tensor::TensorPtr> &input_tensors,
  57. const std::vector<bool> &tensors_mask) {}
  58. virtual py::tuple RunOp(const OpRunInfo &, const GraphInfo &, const std::vector<tensor::TensorPtr> &input_tensors) {
  59. return py::tuple();
  60. }
  61. virtual void RegisterSummaryCallBackFunc(const CallBackFunc &callback);
  62. std::shared_ptr<KernelGraph> ConstructKernelGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs);
  63. CNodePtr CreateNewCNode(const CNodePtr &cnode, bool valid_input, KernelGraph *graph, bool *from_other_graph,
  64. std::unordered_map<AnfNodePtr, AnfNodePtr> *other_graph_cnode);
  65. // set parameters of final graph
  66. virtual GraphId SetFinalGraphInput(const std::vector<AnfNodePtr> &) { return kInvalidGraphId; }
  67. // set output of final graph
  68. virtual void SetFinalGraphOutput(const BaseRef &) {}
  69. // insert switch and set the relative active ops
  70. virtual void SwitchCompile(GraphId, GraphId, GraphId, const AnfNodePtr &) {}
  71. // set args of child graph.the arg maybe come from a output of other child graphs,or from final graph's parameter
  72. virtual void SetChildGraphInput(GraphId, const VectorRef &) {}
  73. // get graph id in child graphs by ME front anf node pointer
  74. virtual GraphId GetGraphIdByNode(const AnfNodePtr &) const { return kInvalidGraphId; }
  75. virtual GraphId GetFinalRunGraph() const { return kInvalidGraphId; }
  76. virtual void SetActive(GraphId, GraphId) {}
  77. protected:
  78. virtual void LoadInputData(const std::shared_ptr<KernelGraph> &kernel_graph,
  79. const std::vector<tensor::TensorPtr> &inputs_const) const;
  80. void UpdateOutputs(const std::shared_ptr<KernelGraph> &kernel_graph, VectorRef *const outputs,
  81. const std::vector<tensor::TensorPtr> &input_tensors) const;
  82. void Reorder(std::vector<CNodePtr> *node_list);
  83. void Summary(KernelGraph *graph);
  84. // create graph output for RunOp
  85. void CreateOutputNode(const CNodePtr &cnode, const std::shared_ptr<KernelGraph> &graph);
  86. CNodePtr ConstructOutput(const AnfNodePtrList &outputs, const std::shared_ptr<KernelGraph> &graph);
  87. // create a single run op graph
  88. std::shared_ptr<KernelGraph> ConstructSingleOpGraph(const OpRunInfo &op_run_info,
  89. const std::vector<tensor::TensorPtr> &input_tensors,
  90. const std::vector<bool> &tensors_mask);
  91. // trans BaseRef list to py::tuple
  92. BaseRef TransformBaseRefListToTuple(const BaseRef &base_ref);
  93. // create a new kernel graph and update the graph sum
  94. KernelGraphPtr NewKernelGraph();
  95. ParameterPtr CreateNewParameterFromParameter(const AnfNodePtr &anf, bool valid_input, KernelGraph *graph);
  96. std::unordered_map<GraphId, std::shared_ptr<KernelGraph>> graphs_;
  97. std::unordered_map<GraphInfo, std::shared_ptr<KernelGraph>> run_op_graphs_;
  98. std::shared_ptr<Context> context_;
  99. CallBackFunc summary_callback_;
  100. static GraphId graph_sum_;
  101. uint32_t device_id_;
  102. };
  103. using SessionPtr = std::shared_ptr<session::SessionBasic>;
  104. } // namespace session
  105. } // namespace mindspore
  106. #endif // MINDSPORE_CCSRC_SESSION_SESSION_BASIC_H