You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

kernel_runtime.h 4.7 kB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_DEVICE_KERNEL_RUNTIME_H_
  17. #define MINDSPORE_CCSRC_DEVICE_KERNEL_RUNTIME_H_
  18. #include <vector>
  19. #include <memory>
  20. #include <string>
  21. #include <map>
  22. #include "device/device_address.h"
  23. #include "ir/tensor.h"
  24. #include "predict/generator/utils/ir_model_util.h"
  25. #ifdef ENABLE_DUMP_E2E
  26. #include "debug/e2e_dump.h"
  27. #endif
  28. #include "session/kernel_graph.h"
  29. #include "session/anf_runtime_algorithm.h"
  30. #include "kernel/kernel.h"
  31. #include "utils/context/ms_context.h"
  32. #include "device/memory_manager.h"
  33. using mindspore::tensor::Tensor;
  34. using TensorPtr = std::shared_ptr<Tensor>;
  35. using mindspore::kernel::AddressPtr;
  36. using AddressPtrList = std::vector<mindspore::kernel::AddressPtr>;
  37. namespace mindspore {
  38. namespace device {
  39. class KernelRuntime {
  40. public:
  41. KernelRuntime() = default;
  42. virtual ~KernelRuntime();
  43. virtual bool Init() = 0;
  44. virtual void AssignMemory(session::KernelGraph *graph);
  45. void RunOpAssignMemory(const std::vector<tensor::TensorPtr> &input_tensors, session::KernelGraph *graph);
  46. virtual bool Run(session::KernelGraph *graph);
  47. virtual bool DumpData(session::KernelGraph *graph);
  48. virtual bool RunTask(const session::KernelGraph *graph);
  49. virtual bool GenTask(const session::KernelGraph *graph);
  50. bool LaunchKernel(const session::KernelGraph *graph);
  51. virtual void AssignStaticMemoryInput(const session::KernelGraph *graph);
  52. virtual void AssignStaticMemoryValueNode(session::KernelGraph *graph);
  53. virtual void ClearGraphRuntimeResource(uint32_t graph_id);
  54. virtual bool SyncStream() = 0;
  55. #ifdef ENABLE_DUMP_E2E
  56. DumpConfPtr GetDumpConf();
  57. #endif
  58. virtual bool LoadTask(const session::KernelGraph *graph);
  59. // for GPU and D to impl
  60. virtual void ReleaseDeviceRes() {}
  61. void set_device_id(uint32_t device_id) { device_id_ = device_id; }
  62. protected:
  63. virtual DeviceAddressPtr CreateDeviceAddress(void *device_ptr, size_t device_size, const string &format,
  64. TypeId type_id) = 0;
  65. virtual bool NodeOutputDeviceAddressExist(const AnfNodePtr &node, size_t index);
  66. void AssignStaticMemory(session::KernelGraph *graph);
  67. void AssignDynamicMemory(session::KernelGraph *graph);
  68. void ReuseAssignDynamicMemory(session::KernelGraph *graph);
  69. void AssignNodeOutputMem(int flag, const AnfNodePtr &node, int index);
  70. void AssignWorkSpaceMem(int flag, const AnfNodePtr &node);
  71. void AssignReuseWorkSpaceMem(const AnfNodePtr &node);
  72. void UpdateRefNodeOutputMem(const session::KernelGraph *graph);
  73. void AssignCommunicationNodeOutputMem(int flag, const AnfNodePtr &node);
  74. void AssignCommunicationNodeInputMem(const AnfNodePtr &node);
  75. void AssignCommunicationNodeMem(int flag, const AnfNodePtr &node);
  76. #ifdef ENABLE_DUMP_E2E
  77. bool SetDumpConf();
  78. #endif
  79. private:
  80. void AssignStaticMemoryOutput(const session::KernelGraph *graph);
  81. void GenLaunchArgs(const mindspore::kernel::KernelMod &kernel_mod, const AnfNodePtr &kernel,
  82. AddressPtrList *kernel_inputs, AddressPtrList *kernel_workspaces, AddressPtrList *kernel_outputs);
  83. bool LaunchKernelMod(const session::KernelGraph &graph);
  84. void GenAddrCleanLaunchArgs(const CNodePtr &cnode, AddressPtrList *kernel_inputs);
  85. size_t CountNodeDeviceMemorySize(const AnfNodePtr &node, size_t output_index);
  86. void RunOpAssignInputMemory(const std::vector<tensor::TensorPtr> &input_tensors, const session::KernelGraph *graph);
  87. void RunOpAssignOutputMemory(const AnfNodePtr &kernel);
  88. void RunOpAssignWorkSpaceMemory(const AnfNodePtr &kernel);
  89. void AssignValueNodeTensor(const ValueNodePtr &value_node, const ValuePtr &node_value, size_t output_idx);
  90. DeviceAddressPtr PreAssignCNodeMemory(const AnfNodePtr &anf_node, size_t index);
  91. protected:
  92. uint32_t device_id_{0};
  93. #ifdef ENABLE_DUMP_E2E
  94. DumpConfPtr dump_conf_ptr_;
  95. #endif
  96. void *stream_ = nullptr;
  97. std::shared_ptr<MemoryManager> mem_manager_{nullptr};
  98. };
  99. using KernelRuntimePtr = std::shared_ptr<KernelRuntime>;
  100. } // namespace device
  101. } // namespace mindspore
  102. #endif // MINDSPORE_CCSRC_DEVICE_KERNEL_RUNTIME_H_