You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

kernel_runtime.h 4.4 kB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_DEVICE_KERNEL_RUNTIME_H_
  17. #define MINDSPORE_CCSRC_DEVICE_KERNEL_RUNTIME_H_
  18. #include <vector>
  19. #include <memory>
  20. #include <string>
  21. #include <map>
  22. #include "device/device_address.h"
  23. #include "ir/meta_tensor.h"
  24. #include "predict/generator/utils/ir_model_util.h"
  25. #ifdef ENABLE_DUMP_E2E
  26. #include "debug/e2e_dump.h"
  27. #endif
  28. #include "session/kernel_graph.h"
  29. #include "session/anf_runtime_algorithm.h"
  30. #include "kernel/kernel.h"
  31. #include "utils/context/ms_context.h"
  32. #include "device/memory_manager.h"
  33. using mindspore::tensor::Tensor;
  34. using TensorPtr = std::shared_ptr<Tensor>;
  35. using mindspore::kernel::AddressPtr;
  36. using AddressPtrList = std::vector<mindspore::kernel::AddressPtr>;
  37. namespace mindspore {
  38. namespace device {
  39. class KernelRuntime {
  40. public:
  41. KernelRuntime() = default;
  42. virtual ~KernelRuntime();
  43. virtual bool Init() = 0;
  44. virtual void AssignMemory(session::KernelGraph *graph);
  45. void RunOpAssignMemory(const std::vector<tensor::TensorPtr> &input_tensors, session::KernelGraph *graph);
  46. virtual bool Run(session::KernelGraph *graph);
  47. virtual bool DumpData(session::KernelGraph *graph);
  48. virtual bool RunTask(const session::KernelGraph *graph);
  49. virtual bool GenTask(const session::KernelGraph *graph);
  50. bool LaunchKernel(const session::KernelGraph *graph);
  51. virtual void AssignStaticMemoryInput(const session::KernelGraph *graph);
  52. #ifdef ENABLE_DUMP_E2E
  53. DumpConfPtr GetDumpConf();
  54. #endif
  55. virtual bool LoadTask(const session::KernelGraph *graph);
  56. // for GPU and D to impl
  57. virtual void ReleaseDeviceRes() {}
  58. void set_device_id(uint32_t device_id) { device_id_ = device_id; }
  59. protected:
  60. virtual DeviceAddressPtr CreateDeviceAddress(void *device_ptr, size_t device_size, const string &format,
  61. TypeId type_id) = 0;
  62. virtual bool SyncStream() = 0;
  63. void AssignStaticMemory(session::KernelGraph *graph);
  64. void AssignStaticMemoryValueNode(session::KernelGraph *graph);
  65. void AssignDynamicMemory(session::KernelGraph *graph);
  66. void ReuseAssignDynamicMemory(session::KernelGraph *graph);
  67. void AssignNodeOutputMem(int flag, const AnfNodePtr &node, int index);
  68. void AssignWorkSpaceMem(int flag, const AnfNodePtr &node);
  69. void AssignReuseWorkSpaceMem(const AnfNodePtr &node);
  70. void AssignCommunicationNodeOutputMem(int flag, const AnfNodePtr &node);
  71. void UpdateRefNodeOutputMem(const session::KernelGraph *graph);
  72. void UpdateCommunicationOpInputMem(const AnfNodePtr &node);
  73. #ifdef ENABLE_DUMP_E2E
  74. bool SetDumpConf();
  75. #endif
  76. private:
  77. void AssignStaticMemoryOutput(const session::KernelGraph *graph);
  78. void GenLaunchArgs(const mindspore::kernel::KernelMod &kernel_mod, const AnfNodePtr &kernel,
  79. AddressPtrList *kernel_inputs, AddressPtrList *kernel_workspaces, AddressPtrList *kernel_outputs);
  80. bool LaunchKernelMod(const session::KernelGraph &graph);
  81. void GenAddrCleanLaunchArgs(const CNodePtr &cnode, AddressPtrList *kernel_inputs);
  82. size_t CountNodeDeviceMemorySize(const AnfNodePtr &node, size_t output_index);
  83. void RunOpAssignInputMemory(const std::vector<tensor::TensorPtr> &input_tensors, const session::KernelGraph *graph);
  84. void RunOpAssignOutputMemory(const AnfNodePtr &kernel);
  85. void RunOpAssignWorkSpaceMemory(const AnfNodePtr &kernel);
  86. void AssignValueNodeTensor(const ValueNodePtr &value_node, const ValuePtr &node_value, size_t output_idx);
  87. protected:
  88. uint32_t device_id_{0};
  89. #ifdef ENABLE_DUMP_E2E
  90. DumpConfPtr dump_conf_ptr_;
  91. #endif
  92. void *stream_ = nullptr;
  93. std::shared_ptr<MemoryManager> mem_manager_{nullptr};
  94. };
  95. using KernelRuntimePtr = std::shared_ptr<KernelRuntime>;
  96. } // namespace device
  97. } // namespace mindspore
  98. #endif // MINDSPORE_CCSRC_DEVICE_KERNEL_RUNTIME_H_