You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

device_context.h 5.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. /**
  2. * Copyright 2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef MINDSPORE_CCSRC_RUNTIME_HARDWARE_DEVICE_CONTEXT_H_
  17. #define MINDSPORE_CCSRC_RUNTIME_HARDWARE_DEVICE_CONTEXT_H_
  18. #include <string>
  19. #include <vector>
  20. #include <memory>
  21. #include "runtime/device/device_address.h"
  22. #include "runtime/device/bucket.h"
  23. #include "backend/session/kernel_graph.h"
  24. #include "backend/session/anf_runtime_algorithm.h"
  25. namespace mindspore {
  26. namespace device {
  27. using mindspore::kernel::AddressPtr;
  28. using mindspore::kernel::KernelMod;
  29. const size_t kDeviceContextsNumOne = 1;
  30. const size_t kDeviceContextsNumTwo = 2;
  31. struct DeviceContextKey {
  32. // device type name, such as 'GPU' 'Ascend' 'CPU'.
  33. std::string device_name_;
  34. uint32_t device_id_{0};
  35. // Use the result of ToString() as key to look up DeviceContext
  36. // in cache map which maintains created DeviceContext objects.
  37. std::string ToString() const { return device_name_ + "_" + std::to_string(device_id_); }
  38. };
  39. // DeviceContext is unified interface of interaction with device.
  40. class DeviceContext {
  41. public:
  42. explicit DeviceContext(const DeviceContextKey &device_context_key) : device_context_key_(device_context_key) {}
  43. virtual ~DeviceContext() = default;
  44. // Initialize the device context.
  45. virtual void Initialize() = 0;
  46. // Destroy device context and release device resource.
  47. virtual void Destroy() {}
  48. // Relevant function to allocate and free device memory.
  49. virtual bool AllocateMemory(DeviceAddress *const &address, size_t size) const = 0;
  50. virtual void FreeMemory(DeviceAddress *const &address) const = 0;
  51. // Allocate continuous device memory end to end into 'addr_list'.
  52. // Communication operators may need continuous memory for input and output
  53. // to optimize the communication performance.
  54. virtual bool AllocateContinuousMemory(const std::vector<DeviceAddressPtr> &addr_list, size_t total_size,
  55. const std::vector<size_t> &size_list) const {
  56. return true;
  57. }
  58. // Create concrete device address according different device type.
  59. virtual DeviceAddressPtr CreateDeviceAddress(void *const device_ptr, size_t device_size, const string &format,
  60. TypeId type_id) const = 0;
  61. // Get device address type according different device type, such GPU, Ascend.
  62. virtual DeviceAddressType GetDeviceAddressType() const = 0;
  63. // Optimize the kernel graph for graph mode.
  64. virtual void OptimizeGraph(const KernelGraphPtr &graph) const {}
  65. // Optimize the single operator graph for PyNative mode.
  66. virtual void OptimizeSingleOpGraph(const KernelGraphPtr &graph) const {}
  67. // Select the matching backend kernels according to the data type and format of input and output for all
  68. // execution operators, and set final device data type and format information for backend kernels, device
  69. // data type and format which replace original data type and format will use for executing kernels.
  70. virtual void SetOperatorInfo(const std::vector<CNodePtr> &nodes) const = 0;
  71. // Generate 'KernelMod' for all kernels and set 'KernelMod' into kernel,
  72. // 'KernelMod' is real executive object of kernel.
  73. virtual void CreateKernel(const std::vector<CNodePtr> &nodes) const = 0;
  74. // Adjust kernel graph before run graph, used in Graph Mode.
  75. virtual void PreprocessBeforeRunGraph(const KernelGraphPtr &graph) const {}
  76. // Adjust single op kernel graph before run graph, used in PyNative Mode.
  77. virtual void PreprocessBeforeRunSingleOpGraph(const KernelGraphPtr &graph) const {}
  78. // Infer kernel shape and update abstract info for dynamic shape kernel.
  79. virtual void UpdateDynamicShape(const CNodePtr &kernel) const { AnfAlgo::InferShape(kernel); }
  80. // Launch a kernel via 'KernelMod' of the kernel.
  81. virtual bool LaunchKernel(const CNodePtr &kernel, const std::vector<AddressPtr> &inputs,
  82. const std::vector<AddressPtr> &workspace, const std::vector<AddressPtr> &outputs,
  83. bool is_dynamic_shape = false) const = 0;
  84. // Synchronize stream, device such as GPU and Ascend need stream to launch kernel asynchronously,
  85. // using 'SyncStream' to block thread and wait for completing all tasks in stream.
  86. // Devices that do not need stream could ignore the implementation of this function.
  87. virtual bool SyncStream(size_t stream_id = 0) const { return true; }
  88. // Get device_context_key_ to obtain device name and device id.
  89. const DeviceContextKey &device_context_key() const { return device_context_key_; }
  90. // Get rank id for distributed training.
  91. virtual uint32_t GetRankID() const { return 0; }
  92. // Create and initialize bucket for every allreduce operator. Bucket is used in PyNative distributed training mode,
  93. // one bucket handles all resource to launch and sync allreduce operator.
  94. virtual std::shared_ptr<Bucket> CreateBucket(uint32_t bucket_id, uint32_t bucket_size) const { return nullptr; }
  95. protected:
  96. DeviceContextKey device_context_key_;
  97. };
  98. using DeviceContextPtr = std::shared_ptr<DeviceContext>;
  99. } // namespace device
  100. } // namespace mindspore
  101. #endif // MINDSPORE_CCSRC_RUNTIME_HARDWARE_DEVICE_CONTEXT_H_