You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

cuda_driver.cc 8.1 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "runtime/device/gpu/cuda_driver.h"
  17. #include <iostream>
  18. #include "utils/log_adapter.h"
  19. #include "utils/convert_utils.h"
  20. namespace mindspore {
  21. namespace device {
  22. namespace gpu {
  23. size_t CudaDriver::AllocDeviceMem(size_t size, DeviceMemPtr *addr) {
  24. size_t retreat_count = 0;
  25. auto ret = cudaMalloc(reinterpret_cast<void **>(addr), size);
  26. // If free memory is not enough, then retry with mem_malloc_retry_rate_.
  27. while (ret == cudaErrorMemoryAllocation) {
  28. size = FloatToSize(size * mem_malloc_retry_rate_);
  29. size = (size / mem_malloc_align_size_) * mem_malloc_align_size_;
  30. ret = cudaMalloc(reinterpret_cast<void **>(addr), size);
  31. retreat_count++;
  32. if (retreat_count > mem_malloc_retry_conut_max_) {
  33. break;
  34. }
  35. }
  36. if (ret != cudaSuccess) {
  37. MS_LOG(ERROR) << "cudaMalloc failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  38. return 0;
  39. }
  40. return size;
  41. }
  42. bool CudaDriver::FreeDeviceMem(const DeviceMemPtr &addr) {
  43. auto ret = cudaFree(addr);
  44. if (ret != cudaSuccess) {
  45. MS_LOG(ERROR) << "cudaFree failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  46. return false;
  47. }
  48. return true;
  49. }
  50. size_t CudaDriver::AllocHostPinnedMem(size_t size, void **addr) {
  51. if (size == 0) {
  52. MS_LOG(EXCEPTION) << "The memory allocate size is 0";
  53. }
  54. auto ret = cudaHostAlloc(addr, size, cudaHostAllocDefault);
  55. if (ret != cudaSuccess) {
  56. MS_LOG(ERROR) << "cudaHostAlloc failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  57. return 0;
  58. }
  59. return size;
  60. }
  61. void CudaDriver::FreeHostPinnedMem(void *addr) {
  62. if (addr) {
  63. auto ret = cudaFreeHost(addr);
  64. if (ret != cudaSuccess) {
  65. MS_LOG(EXCEPTION) << "cudaFreeHost failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  66. }
  67. }
  68. }
  69. bool CudaDriver::CopyHostMemToDevice(const DeviceMemPtr &dst, const void *src, size_t size) {
  70. auto ret = cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice);
  71. if (ret != cudaSuccess) {
  72. MS_LOG(ERROR) << "cudaMemcpy failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  73. return false;
  74. }
  75. return true;
  76. }
  77. bool CudaDriver::CopyDeviceMemToHost(const HostMemPtr &dst, const DeviceMemPtr &src, size_t size) {
  78. auto ret = cudaMemcpy(dst, src, size, cudaMemcpyDeviceToHost);
  79. if (ret != cudaSuccess) {
  80. MS_LOG(ERROR) << "cudaMemcpy failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  81. return false;
  82. }
  83. return true;
  84. }
  85. bool CudaDriver::CopyHostMemToDeviceAsync(const DeviceMemPtr &dst, const void *src, size_t size, DeviceStream stream) {
  86. auto ret = cudaMemcpyAsync(dst, src, size, cudaMemcpyHostToDevice, (cudaStream_t)stream);
  87. if (ret != cudaSuccess) {
  88. MS_LOG(ERROR) << "cudaMemcpyAsync failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  89. return false;
  90. }
  91. return true;
  92. }
  93. bool CudaDriver::CopyDeviceMemToHostAsync(const HostMemPtr &dst, const DeviceMemPtr &src, size_t size,
  94. DeviceStream stream) {
  95. auto ret = cudaMemcpyAsync(dst, src, size, cudaMemcpyDeviceToHost, (cudaStream_t)stream);
  96. if (ret != cudaSuccess) {
  97. MS_LOG(ERROR) << "cudaMemcpyAsync failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  98. return false;
  99. }
  100. return true;
  101. }
  102. size_t CudaDriver::total_mem_size() {
  103. size_t free;
  104. size_t total;
  105. auto ret = cudaMemGetInfo(&free, &total);
  106. if (ret != cudaSuccess) {
  107. MS_LOG(ERROR) << "cudaMemGetInfo failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  108. return 0;
  109. }
  110. return total;
  111. }
  112. size_t CudaDriver::free_mem_size() {
  113. size_t free;
  114. size_t total;
  115. auto ret = cudaMemGetInfo(&free, &total);
  116. if (ret != cudaSuccess) {
  117. MS_LOG(ERROR) << "cudaMemGetInfo failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  118. return 0;
  119. }
  120. return free;
  121. }
  122. bool CudaDriver::CreateStream(DeviceStream *stream) {
  123. auto ret = cudaStreamCreateWithFlags(reinterpret_cast<CUstream_st **>(stream), cudaStreamNonBlocking);
  124. if (ret != cudaSuccess) {
  125. MS_LOG(ERROR) << "cudaStreamCreate failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  126. return false;
  127. }
  128. return true;
  129. }
  130. bool CudaDriver::DestroyStream(const DeviceStream &stream) {
  131. auto ret = cudaStreamDestroy((cudaStream_t)stream);
  132. if (ret != cudaSuccess) {
  133. MS_LOG(ERROR) << "cudaStreamDestroy failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  134. return false;
  135. }
  136. return true;
  137. }
  138. bool CudaDriver::SyncStream(const DeviceStream &stream) {
  139. auto ret = cudaStreamSynchronize((cudaStream_t)stream);
  140. if (ret != cudaSuccess) {
  141. MS_LOG(ERROR) << "cudaStreamSynchronize failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  142. return false;
  143. }
  144. return true;
  145. }
  146. bool CudaDriver::CreateEvent(DeviceEvent *event, unsigned int flag) {
  147. auto ret = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(event), flag);
  148. if (ret != cudaSuccess) {
  149. MS_LOG(ERROR) << "cudaEventCreateWithFlags failed, ret[" << static_cast<int>(ret) << "], "
  150. << cudaGetErrorString(ret);
  151. return false;
  152. }
  153. return true;
  154. }
  155. bool CudaDriver::DestroyEvent(const DeviceEvent &event) {
  156. auto ret = cudaEventDestroy((cudaEvent_t)event);
  157. if (ret != cudaSuccess) {
  158. MS_LOG(ERROR) << "cudaEventDestroy failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  159. return false;
  160. }
  161. return true;
  162. }
  163. bool CudaDriver::RecordEvent(DeviceEvent event, DeviceStream stream) {
  164. auto ret = cudaEventRecord((cudaEvent_t)event, (cudaStream_t)stream);
  165. if (ret != cudaSuccess) {
  166. MS_LOG(ERROR) << "cudaEventRecord failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  167. return false;
  168. }
  169. return true;
  170. }
  171. bool CudaDriver::SyncEvent(const DeviceEvent &event) {
  172. auto ret = cudaEventSynchronize((cudaEvent_t)event);
  173. if (ret != cudaSuccess) {
  174. MS_LOG(ERROR) << "cudaEventSynchronize failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  175. return false;
  176. }
  177. return true;
  178. }
  179. bool CudaDriver::QueryEvent(const DeviceEvent &event) {
  180. auto ret = cudaEventQuery((cudaEvent_t)event);
  181. if (ret == cudaSuccess) {
  182. return true;
  183. } else if (ret == cudaErrorNotReady) {
  184. return false;
  185. } else {
  186. MS_LOG(ERROR) << "cudaEventQuery failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  187. return false;
  188. }
  189. }
  190. bool CudaDriver::ElapsedTime(float *cost_time, const DeviceEvent &start, const DeviceEvent &end) {
  191. auto ret = cudaEventElapsedTime(cost_time, (cudaEvent_t)start, (cudaEvent_t)end);
  192. if (ret == cudaSuccess) {
  193. return true;
  194. } else {
  195. MS_LOG(ERROR) << "cudaEventElapsedTime failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  196. return false;
  197. }
  198. }
  199. int CudaDriver::device_count() {
  200. int dev_count;
  201. auto ret = cudaGetDeviceCount(&dev_count);
  202. if (ret != cudaSuccess) {
  203. MS_LOG(ERROR) << "cudaGetDeviceCount failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  204. }
  205. return dev_count;
  206. }
  207. bool CudaDriver::set_current_device(int index) {
  208. auto ret = cudaSetDevice(index);
  209. if (ret != cudaSuccess) {
  210. MS_LOG(ERROR) << "cudaSetDevice failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  211. return false;
  212. }
  213. return true;
  214. }
  215. } // namespace gpu
  216. } // namespace device
  217. } // namespace mindspore