You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

arena.cc 9.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "minddata/dataset/util/arena.h"
  17. #include <unistd.h>
  18. #include <utility>
  19. #include "minddata/dataset/util/log_adapter.h"
  20. #include "minddata/dataset/util/system_pool.h"
  21. namespace mindspore {
  22. namespace dataset {
  23. struct MemHdr {
  24. uint32_t sig;
  25. uint64_t addr;
  26. uint64_t blk_size;
  27. MemHdr(uint64_t a, uint64_t sz) : sig(0xDEADBEEF), addr(a), blk_size(sz) {}
  28. static void setHdr(void *p, uint64_t addr, uint64_t sz) { new (p) MemHdr(addr, sz); }
  29. static void getHdr(void *p, MemHdr *hdr) {
  30. auto *tmp = reinterpret_cast<MemHdr *>(p);
  31. *hdr = *tmp;
  32. }
  33. };
  34. ArenaImpl::ArenaImpl(void *ptr, size_t sz) : size_in_bytes_(sz), ptr_(ptr) {
  35. // Divide the memory into blocks. Ignore the last partial block.
  36. uint64_t num_blks = size_in_bytes_ / ARENA_BLK_SZ;
  37. MS_LOG(DEBUG) << "Arena memory pool is created. Number of blocks : " << num_blks << ". Block size : " << ARENA_BLK_SZ
  38. << ".";
  39. tr_.Insert(0, num_blks);
  40. }
  41. Status ArenaImpl::Allocate(size_t n, void **p) {
  42. RETURN_UNEXPECTED_IF_NULL(p);
  43. if (n == 0) {
  44. *p = nullptr;
  45. return Status::OK();
  46. }
  47. // Round up n to 1K block
  48. uint64_t req_size = static_cast<uint64_t>(n) + ARENA_WALL_OVERHEAD_SZ;
  49. if (req_size > this->get_max_size()) {
  50. return Status(StatusCode::kMDOutOfMemory);
  51. }
  52. uint64_t reqBlk = SizeToBlk(req_size);
  53. // Do a first fit search
  54. auto blk = tr_.Top();
  55. if (blk.second && reqBlk <= blk.first.priority) {
  56. uint64_t addr = blk.first.key;
  57. uint64_t size = blk.first.priority;
  58. // Trim to the required size and return the rest to the tree.
  59. tr_.Pop();
  60. if (size > reqBlk) {
  61. tr_.Insert(addr + reqBlk, size - reqBlk);
  62. }
  63. char *q = static_cast<char *>(ptr_) + addr * ARENA_BLK_SZ;
  64. MemHdr::setHdr(q, addr, reqBlk);
  65. *p = get_user_addr(q);
  66. } else {
  67. return Status(StatusCode::kMDOutOfMemory);
  68. }
  69. return Status::OK();
  70. }
  71. std::pair<std::pair<uint64_t, uint64_t>, bool> ArenaImpl::FindPrevBlk(uint64_t addr) {
  72. for (auto &it : tr_) {
  73. if (it.key + it.priority == addr) {
  74. return std::make_pair(std::make_pair(it.key, it.priority), true);
  75. } else if (it.key > addr) {
  76. break;
  77. }
  78. }
  79. return std::make_pair(std::make_pair(0, 0), false);
  80. }
  81. void ArenaImpl::Deallocate(void *p) {
  82. if (p == nullptr) {
  83. MS_LOG(ERROR) << "The pointer[p] is null.";
  84. return;
  85. }
  86. auto *q = get_base_addr(p);
  87. MemHdr hdr(0, 0);
  88. MemHdr::getHdr(q, &hdr);
  89. MS_ASSERT(hdr.sig == 0xDEADBEEF);
  90. // We are going to insert a free block back to the treap. But first, check if we can combine
  91. // with the free blocks before and after to form a bigger block.
  92. // Query if we have a free block after us.
  93. auto nextBlk = tr_.Search(hdr.addr + hdr.blk_size);
  94. if (nextBlk.second) {
  95. // Form a bigger block
  96. hdr.blk_size += nextBlk.first.priority;
  97. tr_.DeleteKey(nextBlk.first.key);
  98. }
  99. // Next find a block in front of us.
  100. auto result = FindPrevBlk(hdr.addr);
  101. if (result.second) {
  102. // We can combine with this block
  103. hdr.addr = result.first.first;
  104. hdr.blk_size += result.first.second;
  105. tr_.DeleteKey(result.first.first);
  106. }
  107. // Now we can insert the free node
  108. tr_.Insert(hdr.addr, hdr.blk_size);
  109. }
  110. bool ArenaImpl::BlockEnlarge(uint64_t *addr, uint64_t old_sz, uint64_t new_sz) {
  111. uint64_t size = old_sz;
  112. // The logic is very much identical to Deallocate. We will see if we can combine with the blocks before and after.
  113. auto next_blk = tr_.Search(*addr + old_sz);
  114. if (next_blk.second) {
  115. size += next_blk.first.priority;
  116. if (size >= new_sz) {
  117. // In this case, we can just enlarge the block without doing any moving.
  118. tr_.DeleteKey(next_blk.first.key);
  119. // Return unused back to the tree.
  120. if (size > new_sz) {
  121. tr_.Insert(*addr + new_sz, size - new_sz);
  122. }
  123. }
  124. return true;
  125. }
  126. // If we still get here, we have to look at the block before us.
  127. auto result = FindPrevBlk(*addr);
  128. if (result.second) {
  129. // We can combine with this block together with the next block (if any)
  130. size += result.first.second;
  131. *addr = result.first.first;
  132. if (size >= new_sz) {
  133. // We can combine with this block together with the next block (if any)
  134. tr_.DeleteKey(*addr);
  135. if (next_blk.second) {
  136. tr_.DeleteKey(next_blk.first.key);
  137. }
  138. // Return unused back to the tree.
  139. if (size > new_sz) {
  140. tr_.Insert(*addr + new_sz, size - new_sz);
  141. }
  142. return true;
  143. }
  144. }
  145. return false;
  146. }
  147. Status ArenaImpl::FreeAndAlloc(void **pp, size_t old_sz, size_t new_sz) {
  148. RETURN_UNEXPECTED_IF_NULL(pp);
  149. RETURN_UNEXPECTED_IF_NULL(*pp);
  150. void *p = nullptr;
  151. void *q = *pp;
  152. RETURN_IF_NOT_OK(Allocate(new_sz, &p));
  153. errno_t err = memmove_s(p, new_sz, q, old_sz);
  154. if (err) {
  155. RETURN_STATUS_UNEXPECTED("Error from memmove: " + std::to_string(err));
  156. }
  157. *pp = p;
  158. // Free the old one.
  159. Deallocate(q);
  160. return Status::OK();
  161. }
  162. Status ArenaImpl::Reallocate(void **pp, size_t old_sz, size_t new_sz) {
  163. RETURN_UNEXPECTED_IF_NULL(pp);
  164. RETURN_UNEXPECTED_IF_NULL(*pp);
  165. uint64_t actual_size = static_cast<uint64_t>(new_sz) + ARENA_WALL_OVERHEAD_SZ;
  166. if (actual_size > this->get_max_size()) {
  167. RETURN_STATUS_UNEXPECTED("Request size too big : " + std::to_string(new_sz));
  168. }
  169. uint64_t req_blk = SizeToBlk(actual_size);
  170. char *oldAddr = reinterpret_cast<char *>(*pp);
  171. auto *oldHdr = get_base_addr(oldAddr);
  172. MemHdr hdr(0, 0);
  173. MemHdr::getHdr(oldHdr, &hdr);
  174. MS_ASSERT(hdr.sig == 0xDEADBEEF);
  175. if (hdr.blk_size > req_blk) {
  176. // Refresh the header with the new smaller size.
  177. MemHdr::setHdr(oldHdr, hdr.addr, req_blk);
  178. // Return the unused memory back to the tree. Unlike allocate, we we need to merge with the block after us.
  179. auto next_blk = tr_.Search(hdr.addr + hdr.blk_size);
  180. if (next_blk.second) {
  181. hdr.blk_size += next_blk.first.priority;
  182. tr_.DeleteKey(next_blk.first.key);
  183. }
  184. tr_.Insert(hdr.addr + req_blk, hdr.blk_size - req_blk);
  185. } else if (hdr.blk_size < req_blk) {
  186. uint64_t addr = hdr.addr;
  187. // Attempt a block enlarge. No guarantee it is always successful.
  188. bool success = BlockEnlarge(&addr, hdr.blk_size, req_blk);
  189. if (success) {
  190. auto *newHdr = static_cast<char *>(ptr_) + addr * ARENA_BLK_SZ;
  191. MemHdr::setHdr(newHdr, addr, req_blk);
  192. if (addr != hdr.addr) {
  193. errno_t err =
  194. memmove_s(get_user_addr(newHdr), (req_blk * ARENA_BLK_SZ) - ARENA_WALL_OVERHEAD_SZ, oldAddr, old_sz);
  195. if (err) {
  196. RETURN_STATUS_UNEXPECTED("Error from memmove: " + std::to_string(err));
  197. }
  198. }
  199. *pp = get_user_addr(newHdr);
  200. return Status::OK();
  201. }
  202. return FreeAndAlloc(pp, old_sz, new_sz);
  203. }
  204. return Status::OK();
  205. }
  206. int ArenaImpl::PercentFree() const {
  207. uint64_t sz = 0;
  208. for (auto &it : tr_) {
  209. sz += it.priority;
  210. }
  211. if (size_in_bytes_ == 0) {
  212. MS_LOG(ERROR) << "size_in_bytes_ can not be zero.";
  213. return 0;
  214. }
  215. double ratio = static_cast<double>(sz * ARENA_BLK_SZ) / static_cast<double>(size_in_bytes_);
  216. return static_cast<int>(ratio * 100.0);
  217. }
  218. uint64_t ArenaImpl::SizeToBlk(uint64_t sz) {
  219. uint64_t req_blk = sz / ARENA_BLK_SZ;
  220. if (sz % ARENA_BLK_SZ) {
  221. ++req_blk;
  222. }
  223. return req_blk;
  224. }
  225. std::ostream &operator<<(std::ostream &os, const ArenaImpl &s) {
  226. for (auto &it : s.tr_) {
  227. os << "Address : " << it.key << ". Size : " << it.priority << "\n";
  228. }
  229. return os;
  230. }
  231. Status Arena::Init() {
  232. try {
  233. int64_t sz = size_in_MB_ * 1048576L;
  234. #ifdef ENABLE_GPUQUE
  235. if (is_cuda_malloc_) {
  236. auto ret = cudaHostAlloc(&ptr_, sz, cudaHostAllocDefault);
  237. if (ret != cudaSuccess) {
  238. MS_LOG(ERROR) << "cudaHostAlloc failed, ret[" << static_cast<int>(ret) << "], " << cudaGetErrorString(ret);
  239. return Status(StatusCode::kMDOutOfMemory);
  240. }
  241. impl_ = std::make_unique<ArenaImpl>(ptr_, sz);
  242. } else {
  243. RETURN_IF_NOT_OK(DeMalloc(sz, &ptr_, false));
  244. impl_ = std::make_unique<ArenaImpl>(ptr_, sz);
  245. }
  246. #else
  247. RETURN_IF_NOT_OK(DeMalloc(sz, &ptr_, false));
  248. impl_ = std::make_unique<ArenaImpl>(ptr_, sz);
  249. #endif
  250. } catch (std::bad_alloc &e) {
  251. return Status(StatusCode::kMDOutOfMemory);
  252. }
  253. return Status::OK();
  254. }
  255. #ifdef ENABLE_GPUQUE
  256. Arena::Arena(size_t val_in_MB, bool is_cuda_malloc)
  257. : ptr_(nullptr), size_in_MB_(val_in_MB), is_cuda_malloc_(is_cuda_malloc) {}
  258. Status Arena::CreateArena(std::shared_ptr<Arena> *p_ba, size_t val_in_MB, bool is_cuda_malloc) {
  259. RETURN_UNEXPECTED_IF_NULL(p_ba);
  260. auto ba = new (std::nothrow) Arena(val_in_MB, is_cuda_malloc);
  261. if (ba == nullptr) {
  262. return Status(StatusCode::kMDOutOfMemory);
  263. }
  264. (*p_ba).reset(ba);
  265. RETURN_IF_NOT_OK(ba->Init());
  266. return Status::OK();
  267. }
  268. #else
  269. Arena::Arena(size_t val_in_MB) : ptr_(nullptr), size_in_MB_(val_in_MB) {}
  270. Status Arena::CreateArena(std::shared_ptr<Arena> *p_ba, size_t val_in_MB) {
  271. RETURN_UNEXPECTED_IF_NULL(p_ba);
  272. auto ba = new (std::nothrow) Arena(val_in_MB);
  273. if (ba == nullptr) {
  274. return Status(StatusCode::kMDOutOfMemory);
  275. }
  276. (*p_ba).reset(ba);
  277. RETURN_IF_NOT_OK(ba->Init());
  278. return Status::OK();
  279. }
  280. #endif
  281. } // namespace dataset
  282. } // namespace mindspore