You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor.cpp 8.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /**
  2. * \file imperative/python/src/tensor.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "./tensor.h"
  12. #include "./grad.h"
  13. #include "./common.h"
  14. #include "./numpy_dtypes.h"
  15. #include <pybind11/numpy.h>
  16. #include <pybind11/operators.h>
  17. namespace py = pybind11;
  18. namespace mgb::imperative::python {
  19. std::unique_ptr<interpreter::Interpreter::Channel> interpreter_for_py;
  20. apply_result_t apply(ApplyContext& ctx) {
  21. // emulating scalar should be put to specific op's apply, e.g.,
  22. // elementwise, reduce, typecvt. Currently it's still handled at python
  23. // side. It could be move to C++ side if it has an impact on performance
  24. if (ctx.flags & Tensor::Flags::SCALAR) {
  25. // TODO: emulate scalar
  26. }
  27. if (ctx.flags & Tensor::Flags::GRAD) {
  28. return apply_grad(ctx);
  29. }
  30. if (ctx.flags & Tensor::Flags::TRACE) {
  31. // TODO: trace
  32. } else {
  33. SmallVector<interpreter::Interpreter::Handle> handles(ctx.nargs);
  34. for (size_t i = 0; i < ctx.nargs; ++i) {
  35. handles[i] = ctx.args[i]->m_handle.get();
  36. }
  37. auto output_handles = interpreter_for_py->apply_op(ctx.op, handles);
  38. apply_result_t outputs;
  39. outputs.reserve(output_handles.size());
  40. for (auto h : output_handles) {
  41. outputs.emplace_back(std::make_shared<Tensor>(h));
  42. }
  43. return outputs;
  44. }
  45. mgb_assert(0);
  46. }
  47. PyObject* py_apply(PyObject* self, PyObject*const* args, size_t nargs/* , PyObject* kwnames */) {
  48. try {
  49. // if (kwnames && PyTuple_GET_SIZE(kwnames)) {
  50. // PyErr_SetString(PyExc_TypeError, "keyword argument not allowed");
  51. // return nullptr;
  52. // }
  53. if (!nargs) {
  54. PyErr_SetString(PyExc_TypeError, "expect Op");
  55. return nullptr;
  56. }
  57. auto* op = args[0];
  58. if (!strcmp(op->ob_type->tp_base->tp_name,"PodOpVisitor") || !strcmp(op->ob_type->tp_base->tp_name,"IndexingOpBase")){
  59. op = PyObject_CallMethod(op,"to_c","");
  60. }
  61. PyTypeObject* pytype = args[1]->ob_type;
  62. ++args;
  63. --nargs;
  64. ApplyContext ctx;
  65. ctx.flags = 0;
  66. ctx.op = py::handle(op).cast<std::shared_ptr<OpDef>>();
  67. SmallVector<Tensor*, 64> tensors(nargs);
  68. ctx.args = &tensors[0];
  69. ctx.nargs = nargs;
  70. for (size_t i = 0; i < nargs; ++i) {
  71. TensorWrapper* tw = TensorWrapper::cast_safe(args[i]);
  72. if (!tw) {
  73. PyErr_SetString(PyExc_TypeError, "expect Tensor");
  74. return nullptr;
  75. }
  76. auto* t = tensors[i] = tw->m_tensor.get();
  77. ctx.flags |= t->m_flags;
  78. }
  79. // TODO: set TRACE flag
  80. auto outputs = apply(ctx);
  81. size_t nout = outputs.size();
  82. auto ret = py::tuple(nout);
  83. for (size_t i = 0; i < nout; ++i) {
  84. ret[i] = TensorWrapper::make(pytype, std::move(outputs[i]));
  85. }
  86. return ret.release().ptr();
  87. } catch (std::exception& e) {
  88. PyErr_SetString(PyExc_RuntimeError, e.what());
  89. return nullptr;
  90. }
  91. }
  92. TensorWrapper::TensorWrapper(PyObject* args, PyObject* kwargs) {
  93. if (kwargs && PyDict_Size(kwargs)) {
  94. throw py::type_error("keyword argument not allowed");
  95. }
  96. auto nargs = PyTuple_Size(args);
  97. auto tup = py::reinterpret_borrow<py::tuple>(args);
  98. if (nargs == 0) {
  99. throw py::type_error("too few arguments");
  100. }
  101. if (auto* t = cast_safe(tup[0].ptr())) {
  102. if (nargs > 1) {
  103. throw py::type_error("expect 1 argument");
  104. }
  105. m_tensor = t->m_tensor;
  106. } else {
  107. if (nargs != 3) {
  108. throw py::type_error("expect 3 arguments");
  109. }
  110. py::detail::loader_life_support life_sup; // required to cast DType
  111. auto data = tup[0].cast<py::array>();
  112. DType dtype = tup[1].cast<DType>();
  113. CompNode cn = tup[2].cast<CompNode>();
  114. interpreter::Interpreter::Handle handle;
  115. constexpr auto size_threshhold = TensorShape::MAX_NDIM;
  116. if (data.size() > size_threshhold) {
  117. handle = interpreter_for_py->put(npy::np2tensor(data.ptr(), npy::Meth::borrow(cn), dtype));
  118. } else {
  119. HostTensorND ret(cn);
  120. handle = interpreter_for_py->put(npy::np2tensor(data.ptr(), npy::Meth::copy_into(&ret), dtype));
  121. }
  122. m_tensor = std::make_shared<Tensor>(handle);
  123. if (data.ndim() == 0) {
  124. m_tensor->m_flags |= Tensor::Flags::SCALAR;
  125. }
  126. }
  127. }
  128. PyObject* TensorWrapper::shape() {
  129. if (m_tensor->m_flags & Tensor::Flags::SCALAR) {
  130. return PyTuple_New(0);
  131. }
  132. auto&& shape = m_tensor->shape();
  133. if (!shape.ndim) {
  134. Py_RETURN_NONE;
  135. }
  136. py::tuple ret(shape.ndim);
  137. for (size_t i = 0; i < shape.ndim; ++i) {
  138. ret[i] = shape[i];
  139. }
  140. return ret.release().ptr();
  141. }
  142. PyObject* TensorWrapper::dtype() {
  143. return py::cast(m_tensor->dtype()).release().ptr();
  144. }
  145. PyObject* TensorWrapper::device() {
  146. return py::cast(m_tensor->comp_node()).release().ptr();
  147. }
  148. PyObject* TensorWrapper::numpy() {
  149. auto&& hv = interpreter_for_py->get_value(m_tensor->m_handle.get());
  150. auto arr = py::reinterpret_steal<py::array>(npy::ndarray_from_tensor(hv, npy::ShareType::TRY_SHARE));
  151. if (!arr) return nullptr;
  152. if (m_tensor->m_flags & Tensor::Flags::SCALAR) {
  153. mgb_assert(PyArray_Check(arr.ptr()));
  154. return PyArray_Squeeze(reinterpret_cast<PyArrayObject*>(arr.ptr()));
  155. }
  156. return arr.release().ptr();
  157. }
  158. void TensorWrapper::reset(PyObject* tensor) {
  159. TensorWrapper* t = TensorWrapper::cast_safe(tensor);
  160. if (!t) {
  161. throw py::type_error("expect Tensor");
  162. }
  163. m_tensor = t->m_tensor;
  164. }
  165. PyObject* TensorWrapper::isscalar() {
  166. if(m_tensor->m_flags & Tensor::Flags::SCALAR) {
  167. Py_RETURN_TRUE;
  168. } else {
  169. Py_RETURN_FALSE;
  170. }
  171. }
  172. void TensorWrapper::setscalar() {
  173. m_tensor->m_flags |= Tensor::Flags::SCALAR;
  174. }
  175. struct TensorWeakRef {
  176. std::weak_ptr<Tensor> wptr;
  177. TensorWeakRef(const TensorWrapper& tw) : wptr(tw.m_tensor) {}
  178. py::object operator()() {
  179. if (auto p = wptr.lock()) {
  180. return TensorWrapper::make(p);
  181. }
  182. return py::none();
  183. }
  184. };
  185. void init_tensor(py::module m) {
  186. interpreter_for_py = interpreter::Interpreter::inst().create_channel();
  187. auto* tensor_type = TensorWrapper::wrap_t::type()
  188. .def<&TensorWrapper::numpy>("numpy")
  189. .def_getset<&TensorWrapper::shape>("shape")
  190. .def_getset<&TensorWrapper::dtype>("dtype")
  191. .def_getset<&TensorWrapper::device>("device")
  192. .def<&TensorWrapper::reset>("_reset")
  193. .def<&TensorWrapper::isscalar>("isscalar")
  194. .def<&TensorWrapper::setscalar>("setscalar")
  195. .finalize();
  196. if (!tensor_type) throw py::error_already_set();
  197. py::setattr(m, "Tensor", tensor_type);
  198. py::class_<TensorWeakRef>(m, "TensorWeakRef")
  199. .def(py::init<const TensorWrapper&>())
  200. .def("__call__", &TensorWeakRef::operator());
  201. static PyMethodDef apply_def{"apply", (PyCFunction)py_apply, METH_FASTCALL, nullptr};
  202. auto* apply_func = PyCFunction_NewEx(&apply_def, nullptr, nullptr);
  203. if (!apply_func) throw py::error_already_set();
  204. py::setattr(m, "apply", apply_func);
  205. py::handle grad_key_type = GradKeyWrapper::wrap_t::type()
  206. .def<&GradKeyWrapper::attach>("attach")
  207. .finalize();
  208. if (!grad_key_type) throw py::error_already_set();
  209. py::setattr(m, "GradKey", grad_key_type);
  210. py::setattr(m, "backward", py::cpp_function(&GradKeyWrapper::backward));
  211. }
  212. } // namespace mgb::imperative::python

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台