You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor.cpp 9.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /**
  2. * \file lite-c/src/tensor.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "lite/tensor.h"
  12. #include "../../src/tensor_impl_base.h"
  13. #include "common.h"
  14. #include "lite-c/tensor_c.h"
  15. #include <set>
  16. #include <string>
  17. #include <unordered_map>
  18. const LiteLayout default_layout = {.shapes = {0, 0, 0, 0, 0},
  19. .ndim = 0,
  20. .data_type = LiteDataType::LITE_FLOAT};
  21. const LiteTensorDesc default_desc = {.is_pinned_host = false,
  22. .layout = default_layout,
  23. .device_type = LiteDeviceType::LITE_CPU,
  24. .device_id = 0};
  25. namespace {
  26. std::unordered_map<void*, std::shared_ptr<lite::Tensor>>&
  27. get_global_tensor_holder() {
  28. static thread_local std::unordered_map<void*, std::shared_ptr<lite::Tensor>>
  29. global_holder;
  30. return global_holder;
  31. }
  32. std::unordered_map<std::string, lite::LiteAny>&
  33. get_global_tensor_attr_holder() {
  34. static thread_local std::unordered_map<std::string, lite::LiteAny>
  35. global_holder;
  36. return global_holder;
  37. }
  38. } // namespace
  39. //! convert the lite::Layout to Layout
  40. LiteLayout convert_to_clayout(const lite::Layout& layout) {
  41. LiteLayout clayout;
  42. clayout.ndim = layout.ndim;
  43. LITE_ASSERT(layout.ndim < LAYOUT_MAX_DIM, "layout ndim is to large");
  44. for (size_t i = 0; i < layout.ndim; i++) {
  45. clayout.shapes[i] = layout.shapes[i];
  46. }
  47. clayout.data_type = layout.data_type;
  48. return clayout;
  49. }
  50. //! convert the C Layout to lite::Layout
  51. lite::Layout convert_to_layout(const LiteLayout& clayout) {
  52. lite::Layout layout;
  53. layout.ndim = clayout.ndim;
  54. LITE_ASSERT(layout.ndim < LAYOUT_MAX_DIM, "clayout ndim is to large");
  55. for (size_t i = 0; i < layout.ndim; i++) {
  56. layout.shapes[i] = clayout.shapes[i];
  57. }
  58. layout.data_type = clayout.data_type;
  59. return layout;
  60. }
  61. int LITE_make_tensor(const LiteTensorDesc tensor_describe, LiteTensor* tensor) {
  62. LITE_CAPI_BEGIN();
  63. LITE_ASSERT(tensor, "The tensor pass to LITE_make_tensor is null");
  64. lite::Layout layout = convert_to_layout(tensor_describe.layout);
  65. auto lite_tensor = std::make_shared<lite::Tensor>(
  66. tensor_describe.device_id, tensor_describe.device_type, layout,
  67. tensor_describe.is_pinned_host);
  68. get_global_tensor_holder()[lite_tensor.get()] = lite_tensor;
  69. *tensor = lite_tensor.get();
  70. LITE_CAPI_END();
  71. }
  72. int LITE_destroy_tensor(LiteTensor tensor) {
  73. LITE_CAPI_BEGIN();
  74. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  75. get_global_tensor_holder().erase(tensor);
  76. LITE_CAPI_END();
  77. }
  78. int LITE_set_tensor_layout(LiteTensor tensor, const LiteLayout layout) {
  79. LITE_CAPI_BEGIN();
  80. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  81. auto tensor_ptr = static_cast<lite::Tensor*>(tensor);
  82. tensor_ptr->set_layout(convert_to_layout(layout));
  83. LITE_CAPI_END();
  84. }
  85. int LITE_reset_tensor_memory(LiteTensor tensor, void* prepared_data,
  86. size_t data_length_in_byte) {
  87. LITE_CAPI_BEGIN();
  88. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  89. LITE_ASSERT(prepared_data, "The prepared_data pass to LITE c_api is null");
  90. static_cast<lite::Tensor*>(tensor)->reset(prepared_data,
  91. data_length_in_byte);
  92. LITE_CAPI_END();
  93. }
  94. int LITE_reset_tensor(LiteTensor tensor, const LiteLayout layout,
  95. void* prepared_data) {
  96. LITE_CAPI_BEGIN();
  97. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  98. LITE_ASSERT(prepared_data, "The prepared_data pass to LITE c_api is null");
  99. static_cast<lite::Tensor*>(tensor)->reset(prepared_data,
  100. convert_to_layout(layout));
  101. LITE_CAPI_END();
  102. }
  103. int LITE_tensor_reshape(LiteTensor tensor, const int* shape, int size) {
  104. LITE_CAPI_BEGIN();
  105. LITE_ASSERT(tensor && shape, "The tensor pass to LITE c_api is null");
  106. std::vector<int> shapes;
  107. for (int i = 0; i < size; i++) {
  108. shapes.push_back(shape[i]);
  109. }
  110. static_cast<lite::Tensor*>(tensor)->reshape(shapes);
  111. LITE_CAPI_END();
  112. }
  113. int LITE_tensor_slice(const LiteTensor tensor, const size_t* start,
  114. const size_t* end, const size_t* step, size_t size,
  115. LiteTensor* slice_tensor) {
  116. LITE_CAPI_BEGIN();
  117. LITE_ASSERT(tensor && start && end && slice_tensor,
  118. "The tensor pass to LITE c_api is null");
  119. std::vector<size_t> starts, ends, steps;
  120. for (size_t i = 0; i < size; i++) {
  121. starts.push_back(start[i]);
  122. ends.push_back(end[i]);
  123. if (step) {
  124. steps.push_back(step[i]);
  125. }
  126. }
  127. auto ret_tensor =
  128. static_cast<lite::Tensor*>(tensor)->slice(starts, ends, steps);
  129. get_global_tensor_holder()[ret_tensor.get()] = ret_tensor;
  130. *slice_tensor = ret_tensor.get();
  131. LITE_CAPI_END();
  132. }
  133. int LITE_tensor_fill_zero(LiteTensor tensor) {
  134. LITE_CAPI_BEGIN();
  135. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  136. static_cast<lite::Tensor*>(tensor)->fill_zero();
  137. LITE_CAPI_END();
  138. }
  139. int LITE_tensor_copy(LiteTensor dst_tensor, const LiteTensor src_tensor) {
  140. LITE_CAPI_BEGIN();
  141. LITE_ASSERT(dst_tensor && src_tensor,
  142. "The tensor pass to LITE c_api is null");
  143. static_cast<lite::Tensor*>(dst_tensor)
  144. ->copy_from(*static_cast<lite::Tensor*>(src_tensor));
  145. LITE_CAPI_END();
  146. }
  147. int LITE_tensor_share_memory_with(LiteTensor dst_tensor,
  148. const LiteTensor src_tensor) {
  149. LITE_CAPI_BEGIN();
  150. LITE_ASSERT(dst_tensor && src_tensor,
  151. "The tensor pass to LITE c_api is null");
  152. static_cast<lite::Tensor*>(dst_tensor)
  153. ->share_memory_with(*static_cast<lite::Tensor*>(src_tensor));
  154. LITE_CAPI_END();
  155. }
  156. int LITE_get_tensor_memory(const LiteTensor tensor, void** data) {
  157. LITE_CAPI_BEGIN();
  158. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  159. LITE_ASSERT(data, "The data ptr pass to LITE c_api is null");
  160. *data = static_cast<lite::Tensor*>(tensor)->get_memory_ptr();
  161. LITE_CAPI_END();
  162. }
  163. int LITE_get_tensor_memory_with_index(const LiteTensor tensor,
  164. const size_t* index, size_t size,
  165. void** data) {
  166. LITE_CAPI_BEGIN();
  167. LITE_ASSERT(tensor && index && data,
  168. "The tensor pass to LITE c_api is null");
  169. std::vector<size_t> index_v;
  170. for (size_t i = 0; i < size; i++) {
  171. index_v.push_back(index[i]);
  172. }
  173. *data = static_cast<lite::Tensor*>(tensor)->get_memory_ptr(index_v);
  174. LITE_CAPI_END();
  175. }
  176. int LITE_get_tensor_total_size_in_byte(const LiteTensor tensor, size_t* size) {
  177. LITE_CAPI_BEGIN();
  178. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  179. LITE_ASSERT(size, "The size ptr pass to LITE c_api is null");
  180. *size = static_cast<lite::Tensor*>(tensor)->get_tensor_total_size_in_byte();
  181. LITE_CAPI_END();
  182. }
  183. int LITE_get_tensor_layout(const LiteTensor tensor, LiteLayout* layout) {
  184. LITE_CAPI_BEGIN();
  185. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  186. LITE_ASSERT(layout, "The layout ptr pass to LITE c_api is null");
  187. *layout = convert_to_clayout(
  188. static_cast<lite::Tensor*>(tensor)->get_layout());
  189. LITE_CAPI_END();
  190. }
  191. int LITE_get_tensor_device_type(const LiteTensor tensor,
  192. LiteDeviceType* device_type) {
  193. LITE_CAPI_BEGIN();
  194. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  195. LITE_ASSERT(device_type, "The device ptr pass to LITE c_api is null");
  196. *device_type = static_cast<lite::Tensor*>(tensor)->get_device_type();
  197. LITE_CAPI_END();
  198. }
  199. int LITE_get_tensor_device_id(const LiteTensor tensor, int* device_id) {
  200. LITE_CAPI_BEGIN();
  201. LITE_ASSERT(tensor && device_id, "The tensor pass to LITE c_api is null");
  202. *device_id = static_cast<lite::Tensor*>(tensor)->get_device_id();
  203. LITE_CAPI_END();
  204. }
  205. int LITE_is_pinned_host(const LiteTensor tensor, int* is_pinned_host) {
  206. LITE_CAPI_BEGIN();
  207. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  208. LITE_ASSERT(is_pinned_host,
  209. "The is_pinned_host ptr pass to LITE c_api is null");
  210. *is_pinned_host = static_cast<lite::Tensor*>(tensor)->is_pinned_host();
  211. LITE_CAPI_END();
  212. }
  213. int LITE_is_memory_continue(const LiteTensor tensor, int* is_continue) {
  214. LITE_CAPI_BEGIN();
  215. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  216. LITE_ASSERT(is_continue, "The is_continue ptr pass to LITE c_api is null");
  217. *is_continue = static_cast<lite::Tensor*>(tensor)->is_continue_memory();
  218. LITE_CAPI_END();
  219. }
  220. int LITE_tensor_concat(LiteTensor* tensors, int nr_tensor, int dim,
  221. LiteDeviceType dst_device, int device_id,
  222. LiteTensor* result_tensor) {
  223. LITE_CAPI_BEGIN();
  224. std::vector<lite::Tensor> v_tensors;
  225. for (int i = 0; i < nr_tensor; i++) {
  226. v_tensors.push_back(*static_cast<lite::Tensor*>(tensors[i]));
  227. }
  228. auto tensor =
  229. lite::TensorUtils::concat(v_tensors, dim, dst_device, device_id);
  230. get_global_tensor_holder()[tensor.get()] = tensor;
  231. *result_tensor = tensor.get();
  232. LITE_CAPI_END()
  233. }
  234. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台