You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ops.cpp 29 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. /**
  2. * \file imperative/python/src/ops.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "./ops.h"
  12. #include "./helper.h"
  13. #include "./tensor.h"
  14. #include "megbrain/common.h"
  15. #include "megbrain/imperative.h"
  16. #include "megbrain/imperative/graph_builder.h"
  17. #include "megbrain/imperative/ops/autogen.h"
  18. #include "megbrain/imperative/ops/backward_graph.h"
  19. #include "megbrain/imperative/ops/opr_attr.h"
  20. #include "megbrain/imperative/ops/rng.h"
  21. #include "megbrain/imperative/ops/utility.h"
  22. #include <Python.h>
  23. #include <unordered_map>
  24. namespace py = pybind11;
  25. using namespace mgb::imperative;
  26. namespace {
  27. auto normalize_enum(const std::string& in) {
  28. std::string ret;
  29. for (auto&& c : in) {
  30. ret += toupper(c);
  31. }
  32. return ret;
  33. }
  34. } // anonymous namespace
  35. #define CATCH_ALL(RETVAL) \
  36. catch (py::error_already_set & e) { \
  37. e.restore(); \
  38. return RETVAL; \
  39. } \
  40. catch (py::builtin_exception & e) { \
  41. e.set_error(); \
  42. return RETVAL; \
  43. } \
  44. catch (std::exception & e) { \
  45. PyErr_SetString(PyExc_RuntimeError, e.what()); \
  46. return RETVAL; \
  47. }
  48. namespace {
  49. #define PyOp(name) Py##name
  50. #define PyOpType(name) PyOp(name)::py_type
  51. #define PyOpDefBegin(name) \
  52. struct PyOp(name) : PyOpDef { \
  53. using Ty = name; \
  54. Ty& inst() { return op->cast_final_safe<Ty>(); } \
  55. static PyTypeObject py_type;
  56. #define PyOpDefEnd(name) \
  57. } \
  58. ; \
  59. PyTypeObject PyOpType(name);
  60. #define RETURN_RICHCOMPARE(val1, val2, op) \
  61. do { \
  62. switch (op) { \
  63. case Py_EQ: \
  64. if ((val1) == (val2)) \
  65. Py_RETURN_TRUE; \
  66. Py_RETURN_FALSE; \
  67. case Py_NE: \
  68. if ((val1) != (val2)) \
  69. Py_RETURN_TRUE; \
  70. Py_RETURN_FALSE; \
  71. case Py_LT: \
  72. if ((val1) < (val2)) \
  73. Py_RETURN_TRUE; \
  74. Py_RETURN_FALSE; \
  75. case Py_GT: \
  76. if ((val1) > (val2)) \
  77. Py_RETURN_TRUE; \
  78. Py_RETURN_FALSE; \
  79. case Py_LE: \
  80. if ((val1) <= (val2)) \
  81. Py_RETURN_TRUE; \
  82. Py_RETURN_FALSE; \
  83. case Py_GE: \
  84. if ((val1) >= (val2)) \
  85. Py_RETURN_TRUE; \
  86. Py_RETURN_FALSE; \
  87. default: \
  88. Py_FatalError("Unreachable C code path reached"); \
  89. } \
  90. } while (0)
  91. template <typename T>
  92. PyObject* py_new_generic(PyTypeObject* type, PyObject*, PyObject*) {
  93. PyObject* obj = type->tp_alloc(type, 0);
  94. T* self = reinterpret_cast<T*>(obj);
  95. if (self != NULL) {
  96. self->op = T::Ty::make();
  97. }
  98. return obj;
  99. }
  100. template <typename T, typename SNIFAE = void>
  101. struct serialization {
  102. static T load(py::object obj) { return py::cast<T>(obj); }
  103. template <
  104. typename U, typename = std::enable_if_t<std::is_same_v<T, std::decay_t<U>>>>
  105. static py::object dump(U&& t) {
  106. return py::cast(std::forward<U>(t));
  107. }
  108. };
  109. template <typename T>
  110. void py_dealloc_generic(PyObject* obj) {
  111. reinterpret_cast<T*>(obj)->op.reset();
  112. Py_TYPE(obj)->tp_free(obj);
  113. }
  114. template <typename T, typename U, U T::Ty::*attr>
  115. PyObject* py_get_generic_impl(PyObject* obj, void* /* closure */) {
  116. auto& op = reinterpret_cast<T*>(obj)->inst();
  117. return py::cast(op.*attr).release().ptr();
  118. }
  119. #define py_get_generic(name, attr) \
  120. py_get_generic_impl<PyOp(name), decltype(std::declval<name>().attr), &name::attr>
  121. template <typename T, typename U, U T::Ty::*attr>
  122. int py_set_generic_impl(PyObject* obj, PyObject* value, void* /* closure */) {
  123. if (value == NULL) {
  124. PyErr_SetString(PyExc_TypeError, "Cannot delete the attribute");
  125. return -1;
  126. }
  127. auto& op = reinterpret_cast<T*>(obj)->inst();
  128. try {
  129. // TODO: remove this guard which is used for pybind11 implicit conversion
  130. py::detail::loader_life_support guard{};
  131. op.*attr = py::cast<U>(py::handle(value));
  132. }
  133. CATCH_ALL(-1)
  134. return 0;
  135. }
  136. #define py_set_generic(name, attr) \
  137. py_set_generic_impl<PyOp(name), decltype(std::declval<name>().attr), &name::attr>
  138. struct PyOpDef {
  139. PyObject_HEAD std::shared_ptr<OpDef> op;
  140. static PyTypeObject py_type;
  141. static std::unordered_map<mgb::Typeinfo*, PyTypeObject*> ctype2pytype;
  142. static PyGetSetDef py_getsetters[];
  143. static Py_hash_t tp_hash(PyObject* obj);
  144. static PyObject* tp_richcompare(PyObject* self, PyObject* other, int op);
  145. static PyObject* py_repr(PyObject* self) {
  146. return py::cast(reinterpret_cast<PyOpDef*>(self)->op->make_name())
  147. .release()
  148. .ptr();
  149. }
  150. };
  151. PyTypeObject PyOpType(OpDef);
  152. std::unordered_map<mgb::Typeinfo*, PyTypeObject*> PyOp(OpDef)::ctype2pytype;
  153. PyObject* py_get_scope(PyObject* obj, void* /* closure */) {
  154. return py::cast(reinterpret_cast<PyOp(OpDef)*>(obj)->op->scope()).release().ptr();
  155. }
  156. int py_set_scope(PyObject* obj, PyObject* value, void* /* closure */) {
  157. if (value == NULL) {
  158. PyErr_SetString(PyExc_TypeError, "Cannot delete the attribute");
  159. return -1;
  160. }
  161. try {
  162. reinterpret_cast<PyOp(OpDef)*>(obj)->op->set_scope(
  163. py::cast<std::string>(py::handle(value)));
  164. }
  165. CATCH_ALL(-1)
  166. return 0;
  167. }
  168. PyGetSetDef PyOp(OpDef)::py_getsetters[] = {
  169. {const_cast<char*>("scope"), py_get_scope, py_set_scope, "scope", NULL},
  170. {NULL}};
  171. Py_hash_t PyOp(OpDef)::tp_hash(PyObject* obj) {
  172. return static_cast<Py_hash_t>(reinterpret_cast<PyOp(OpDef)*>(obj)->op->hash());
  173. }
  174. PyObject* PyOp(OpDef)::tp_richcompare(PyObject* self, PyObject* other, int op) {
  175. bool same = reinterpret_cast<PyOp(OpDef)*>(self)->op->is_same(
  176. *reinterpret_cast<PyOp(OpDef)*>(other)->op);
  177. if (op == Py_EQ || op == Py_NE) {
  178. RETURN_RICHCOMPARE(same, true, op);
  179. }
  180. Py_RETURN_NOTIMPLEMENTED;
  181. }
  182. template <typename T>
  183. struct EnumTrait;
  184. #define PyEnumHead \
  185. static_assert(std::is_enum_v<T>); \
  186. PyObject_HEAD T value; \
  187. constexpr static const char* name = EnumTrait<T>::name; \
  188. static PyTypeObject* type; \
  189. static const char* members[]; \
  190. static std::unordered_map<std::string, T> mem2value; \
  191. static PyObject* pyobj_insts[];
  192. template <typename T>
  193. struct EnumWrapper {
  194. PyEnumHead std::string to_string() const {
  195. return members[static_cast<size_t>(value)];
  196. }
  197. static PyObject* py_repr(PyObject* self) {
  198. return py::cast(
  199. std::string(name) + "." +
  200. reinterpret_cast<EnumWrapper*>(self)->to_string())
  201. .release()
  202. .ptr();
  203. }
  204. static PyObject* py_dump(PyObject* self) {
  205. return py::cast(reinterpret_cast<EnumWrapper*>(self)->to_string())
  206. .release()
  207. .ptr();
  208. }
  209. static PyObject* tp_richcompare(PyObject* self, PyObject* other, int op) {
  210. if (op == Py_EQ || op == Py_NE) {
  211. T lhs, rhs;
  212. if (load(other, rhs) && load(self, lhs)) {
  213. RETURN_RICHCOMPARE(lhs, rhs, op);
  214. } else {
  215. RETURN_RICHCOMPARE(0, 1, op);
  216. }
  217. }
  218. Py_RETURN_NOTIMPLEMENTED;
  219. }
  220. static bool load(py::handle src, T& value) {
  221. PyObject* obj = src.ptr();
  222. if (PyObject_TypeCheck(obj, type)) {
  223. value = reinterpret_cast<EnumWrapper*>(obj)->value;
  224. return true;
  225. }
  226. if (py::isinstance<py::str>(src)) {
  227. auto&& iter = mem2value.find(normalize_enum(py::cast<std::string>(src)));
  228. if (iter != mem2value.end()) {
  229. value = iter->second;
  230. return true;
  231. } else {
  232. return false;
  233. }
  234. }
  235. return false;
  236. }
  237. static PyObject* cast(const T& value) {
  238. auto v = static_cast<std::underlying_type_t<T>>(value);
  239. mgb_assert(v <= EnumTrait<T>::max);
  240. PyObject* obj = pyobj_insts[v];
  241. Py_INCREF(obj);
  242. return obj;
  243. }
  244. };
  245. template <typename T>
  246. struct BitCombinedEnumWrapper {
  247. PyEnumHead std::string to_string() const {
  248. uint32_t value_int = static_cast<uint32_t>(value);
  249. if (value_int == 0) {
  250. return "None";
  251. } else {
  252. std::string ret;
  253. bool first = true;
  254. for (uint32_t i = 0; i < 32; i++) {
  255. if (value_int >> i & 1) {
  256. if (!first) {
  257. ret += " + ";
  258. } else {
  259. first = false;
  260. }
  261. ret += (std::string(name) + "." + members[i]);
  262. }
  263. }
  264. return ret;
  265. }
  266. }
  267. static PyObject* py_new_combined_enum(
  268. PyTypeObject* type, PyObject* args, PyObject*) {
  269. if (!PyTuple_Size(args)) {
  270. PyObject* obj = type->tp_alloc(type, 0);
  271. reinterpret_cast<BitCombinedEnumWrapper*>(obj)->value = T();
  272. return obj;
  273. } else {
  274. PyObject* input;
  275. if (!PyArg_ParseTuple(args, "|O", &input)) {
  276. return nullptr;
  277. }
  278. T value;
  279. if (load(input, value)) {
  280. return cast(value);
  281. } else {
  282. PyErr_SetString(
  283. PyExc_RuntimeError,
  284. mgb::ssprintf(
  285. "Cannot convert type %s to type %s\n",
  286. input->ob_type->tp_name, name)
  287. .c_str());
  288. return nullptr;
  289. }
  290. }
  291. }
  292. static PyObject* py_repr(PyObject* self) {
  293. return py::cast(reinterpret_cast<BitCombinedEnumWrapper*>(self)->to_string())
  294. .release()
  295. .ptr();
  296. }
  297. static PyObject* py_dump(PyObject* self) {
  298. std::vector<std::string> result;
  299. auto value = reinterpret_cast<BitCombinedEnumWrapper*>(self)->value;
  300. uint32_t value_int = static_cast<uint32_t>(value);
  301. for (uint32_t i = 0; i < 32; i++) {
  302. if (value_int >> i & 1) {
  303. result.push_back(members[i]);
  304. }
  305. }
  306. return py::tuple(py::cast(result)).release().ptr();
  307. }
  308. static PyObject* py_or(PyObject* self, PyObject* other) {
  309. if (!(self->ob_type == other->ob_type)) {
  310. return PyErr_Format(
  311. PyExc_RuntimeError,
  312. "Operand in or operator must be the same type.");
  313. }
  314. T lhs = reinterpret_cast<BitCombinedEnumWrapper*>(self)->value,
  315. rhs = reinterpret_cast<BitCombinedEnumWrapper*>(other)->value;
  316. return cast(lhs | rhs);
  317. }
  318. static PyObject* py_and(PyObject* self, PyObject* other) {
  319. if (!(self->ob_type == other->ob_type)) {
  320. return PyErr_Format(
  321. PyExc_RuntimeError,
  322. "Operand in and operator must be the same type.");
  323. }
  324. T lhs = reinterpret_cast<BitCombinedEnumWrapper*>(self)->value,
  325. rhs = reinterpret_cast<BitCombinedEnumWrapper*>(other)->value;
  326. return cast(lhs & rhs);
  327. }
  328. static PyObject* tp_richcompare(PyObject* self, PyObject* other, int op) {
  329. if (op == Py_EQ || op == Py_NE) {
  330. T lhs, rhs;
  331. if (load(other, rhs) && load(self, lhs)) {
  332. RETURN_RICHCOMPARE(lhs, rhs, op);
  333. } else {
  334. RETURN_RICHCOMPARE(0, 1, op);
  335. }
  336. }
  337. Py_RETURN_NOTIMPLEMENTED;
  338. }
  339. static bool load(py::handle src, T& value) {
  340. PyObject* obj = src.ptr();
  341. if (PyObject_TypeCheck(obj, type)) {
  342. value = reinterpret_cast<BitCombinedEnumWrapper*>(obj)->value;
  343. return true;
  344. }
  345. if (py::isinstance<py::str>(src)) {
  346. auto&& iter = mem2value.find(normalize_enum(py::cast<std::string>(src)));
  347. if (iter != mem2value.end()) {
  348. value = iter->second;
  349. return true;
  350. } else {
  351. return false;
  352. }
  353. }
  354. if (py::isinstance<py::tuple>(src)) {
  355. auto params = py::cast<std::vector<std::string>>(src);
  356. bool first = true;
  357. for (auto s : params) {
  358. auto&& iter = mem2value.find(normalize_enum(s));
  359. if (iter != mem2value.end()) {
  360. if (first) {
  361. value = iter->second;
  362. first = false;
  363. } else {
  364. value |= iter->second;
  365. }
  366. } else {
  367. return false;
  368. }
  369. }
  370. return true;
  371. }
  372. if (py::isinstance<py::int_>(obj)) {
  373. auto v = py::cast<std::underlying_type_t<T>>(src);
  374. if (v > EnumTrait<T>::max) {
  375. return false;
  376. }
  377. value = static_cast<T>(v);
  378. return true;
  379. }
  380. return false;
  381. }
  382. static PyObject* cast(const T& value) {
  383. auto v = static_cast<std::underlying_type_t<T>>(value);
  384. mgb_assert(v <= EnumTrait<T>::max);
  385. if ((!v) || (v & (v - 1))) {
  386. PyObject* obj = type->tp_alloc(type, 0);
  387. reinterpret_cast<BitCombinedEnumWrapper*>(obj)->value = value;
  388. return obj;
  389. } else {
  390. PyObject* obj = pyobj_insts[__builtin_ctz(v)];
  391. Py_INCREF(obj);
  392. return obj;
  393. }
  394. }
  395. };
  396. template <typename T>
  397. struct serialization<T, std::enable_if_t<std::is_enum_v<std::decay_t<T>>>> {
  398. static T load(py::object obj) {
  399. auto caster = pybind11::detail::type_caster<T>();
  400. if (caster.load(obj, true)) {
  401. return caster;
  402. } else {
  403. PyErr_SetString(PyExc_RuntimeError, "load faild \n");
  404. return caster;
  405. }
  406. }
  407. static py::object dump(T t) { return py::cast(t).attr("dump")(); }
  408. };
  409. void _init_py_op_def(py::module m) {
  410. using py_op = PyOp(OpDef);
  411. auto& py_type = PyOpType(OpDef);
  412. py_type = {PyVarObject_HEAD_INIT(NULL, 0)};
  413. py_type.tp_name = "megengine.core._imperative_rt.OpDef";
  414. py_type.tp_basicsize = sizeof(PyOp(OpDef));
  415. py_type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
  416. py_type.tp_doc = "OpDef";
  417. py_type.tp_base = &PyBaseObject_Type;
  418. py_type.tp_hash = PyOp(OpDef)::tp_hash;
  419. py_type.tp_richcompare = PyOp(OpDef)::tp_richcompare;
  420. py_type.tp_getset = py_op::py_getsetters;
  421. py_type.tp_repr = py_op::py_repr;
  422. py_type.tp_dealloc = py_dealloc_generic<PyOp(OpDef)>;
  423. mgb_assert(PyType_Ready(&py_type) >= 0);
  424. m.add_object("OpDef", reinterpret_cast<PyObject*>(&py_type));
  425. }
  426. /*********** begin of hand-write opdefs **************/
  427. struct PyOpBase : PyOpDef {
  428. static PyTypeObject py_type;
  429. static PyObject* tp_new(PyTypeObject* type, PyObject*, PyObject*) {
  430. auto* obj = type->tp_alloc(type, 0);
  431. if (obj) {
  432. auto* self = reinterpret_cast<PyOpBase*>(obj);
  433. new (&self->op) decltype(self->op);
  434. }
  435. return obj;
  436. }
  437. };
  438. PyTypeObject PyOpBase::py_type;
  439. void _init_py_op_base(py::module m) {
  440. using py_op = PyOpBase;
  441. auto& py_type = PyOpBase::py_type;
  442. py_type = {PyVarObject_HEAD_INIT(NULL, 0)};
  443. py_type.tp_name = "megengine.core._imperative_rt.ops.PyOpBase";
  444. py_type.tp_basicsize = sizeof(py_op);
  445. py_type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
  446. py_type.tp_doc = "PyOpBase";
  447. py_type.tp_base = &PyOpType(OpDef);
  448. py_type.tp_dealloc = py_dealloc_generic<py_op>;
  449. py_type.tp_new = py_op::tp_new;
  450. mgb_assert(PyType_Ready(&py_type) >= 0);
  451. m.add_object("PyOpBase", reinterpret_cast<PyObject*>(&py_type));
  452. }
  453. /*********** end of hand-write opdefs **************/
  454. // auto generated opdefs
  455. #include "opdef.cpy.inl"
  456. #undef CATCH_ALL
  457. } // anonymous namespace
  458. namespace PYBIND11_NAMESPACE {
  459. namespace detail {
  460. bool type_caster<OpDef>::load(handle src, bool convert) {
  461. PyObject* obj = src.ptr();
  462. if (!PyObject_TypeCheck(obj, &PyOpType(OpDef))) {
  463. return false;
  464. }
  465. value = reinterpret_cast<PyOp(OpDef)*>(obj)->op;
  466. if (!value) {
  467. // opdef only defined in Python
  468. value = std::make_shared<GenericPyOp>(reinterpret_borrow<object>(src));
  469. }
  470. return true;
  471. }
  472. handle type_caster<OpDef>::cast(const OpDef& op, return_value_policy, handle) {
  473. if (auto* pyop = op.try_cast_final<GenericPyOp>()) {
  474. return object(pyop->obj).release();
  475. }
  476. PyTypeObject* pytype;
  477. auto& c2p = PyOp(OpDef)::ctype2pytype;
  478. auto&& iter = c2p.find(op.dyn_typeinfo());
  479. if (iter != c2p.end()) { // FIXME: should always meet this condition
  480. pytype = iter->second;
  481. } else { // which means unregistered op type, jsut make it as an opaque op type
  482. // currently, only OprAttr goes into this branch
  483. pytype = &PyOpType(OpDef);
  484. }
  485. PyObject* obj = pytype->tp_alloc(pytype, 0);
  486. mgb_assert(PyObject_TypeCheck(obj, &PyOpType(OpDef)));
  487. reinterpret_cast<PyOp(OpDef)*>(obj)->op = const_cast<OpDef&>(op).shared_from_this();
  488. return py::handle(obj);
  489. }
  490. #define ENUM_CASTER_IMPL(T) \
  491. bool type_caster<T>::load(handle src, bool) { \
  492. return EnumWrapper<T>::load(src, value); \
  493. } \
  494. handle type_caster<T>::cast(const T& value, return_value_policy, handle) { \
  495. return EnumWrapper<T>::cast(value); \
  496. }
  497. FOR_EACH_ENUM_PARAM(ENUM_CASTER_IMPL)
  498. #define BIT_COMBINED_ENUM_CASTER_IMPL(T) \
  499. bool type_caster<T>::load(handle src, bool) { \
  500. return BitCombinedEnumWrapper<T>::load(src, value); \
  501. } \
  502. handle type_caster<T>::cast(const T& value, return_value_policy, handle) { \
  503. return BitCombinedEnumWrapper<T>::cast(value); \
  504. }
  505. FOR_EACH_BIT_COMBINED_ENUM_PARAM(BIT_COMBINED_ENUM_CASTER_IMPL)
  506. } // namespace detail
  507. } // namespace PYBIND11_NAMESPACE
  508. void init_ops(py::module m) {
  509. _init_py_op_def(m);
  510. _init_py_op_base(m);
  511. INIT_ALL_OP(m)
  512. m.def("new_rng_handle", &rng::new_handle);
  513. m.def(
  514. "delete_rng_handle",
  515. [](size_t handle) {
  516. // RNG op might execute after handle released due to async dispatch, so
  517. // we need sync before delete a handle to avoid memory leak or
  518. // use-after-free
  519. if (python::interpreter_for_py->check_available()) {
  520. python::interpreter_for_py->sync();
  521. }
  522. mgb::CompNode::sync_all();
  523. py_task_q.wait_all_task_finish();
  524. rng::delete_handle(handle);
  525. },
  526. py::call_guard<py::gil_scoped_release>());
  527. m.def("set_global_rng_seed", &rng::set_global_rng_seed);
  528. m.def("get_global_rng_seed", &rng::get_global_rng_seed);
  529. m.def("get_rng_handle_compnode", &rng::get_rng_handle_compnode);
  530. struct PySubgraphBuilder {
  531. explicit PySubgraphBuilder(std::string name) : name{name} {}
  532. std::string name;
  533. std::shared_ptr<Subgraph> graph_storage = std::make_shared<Subgraph>();
  534. std::shared_ptr<UniqueKey> graph_key = std::make_shared<UniqueKey>();
  535. Subgraph& graph = *graph_storage;
  536. mgb::SmallVector<bool> output_grad_mask;
  537. Subgraph::var_t next_var = 1;
  538. std::shared_ptr<OpDef> build() const {
  539. return SubgraphOp::make(name, graph_storage, output_grad_mask, graph_key);
  540. }
  541. };
  542. py::class_<PySubgraphBuilder>(m, "SubgraphBuilder")
  543. .def(py::init<std::string>())
  544. .def("input",
  545. [](PySubgraphBuilder& self) {
  546. auto var = self.next_var++;
  547. self.graph.inputs.push_back(var);
  548. return var;
  549. })
  550. .def("apply",
  551. [](PySubgraphBuilder& self, std::shared_ptr<OpDef> op,
  552. Subgraph::vars_t inputs, size_t nr_outputs) {
  553. Subgraph::vars_t outputs;
  554. for (size_t i = 0; i < nr_outputs; ++i) {
  555. outputs.push_back(self.next_var++);
  556. }
  557. self.graph.exprs.push_back({op, inputs, outputs});
  558. return outputs;
  559. })
  560. .def("apply_const",
  561. [](PySubgraphBuilder& self, py::object value, mgb::DType dtype,
  562. mgb::CompNode cn) {
  563. auto var = self.next_var++;
  564. mgb::HostTensorND hvalue(cn);
  565. npy::np2tensor(
  566. value.cast<py::array>().ptr(),
  567. npy::Meth::copy_into(&hvalue), dtype);
  568. self.graph.constants.push_back({var, Tensor::make(hvalue)});
  569. return var;
  570. })
  571. .def("outputs",
  572. [](PySubgraphBuilder& self, Subgraph::vars_t outputs) {
  573. self.graph.outputs = outputs;
  574. self.output_grad_mask.resize(outputs.size(), true);
  575. })
  576. .def("outputs_has_grad",
  577. [](PySubgraphBuilder& self, mgb::SmallVector<bool> outputs_has_grad) {
  578. mgb_assert(
  579. self.graph.outputs.size() == self.output_grad_mask.size());
  580. self.output_grad_mask = outputs_has_grad;
  581. })
  582. .def("get",
  583. [](PySubgraphBuilder& self) {
  584. return (std::shared_ptr<OpDef>)self.build();
  585. })
  586. .def("compile", [](PySubgraphBuilder& self, int gopt_level) {
  587. return (std::shared_ptr<OpDef>)CompiledOp::make(
  588. self.build(), gopt_level);
  589. });
  590. auto custom = submodule(m, "_custom");
  591. init_custom(custom);
  592. }
  593. #define CUSTOM_CASE_TO_PARSE_NON_LIST(dyn_type, static_type) \
  594. case custom::ParamDynType::dyn_type: { \
  595. param_val = py::handle(kv.second).cast<static_type>(); \
  596. break; \
  597. }
  598. #define CUSTOM_CASE_TO_PARSE_LIST(dyn_type, static_type) \
  599. case custom::ParamDynType::dyn_type: { \
  600. auto pyvals = py::handle(kv.second).cast<py::list>(); \
  601. static_type vals; \
  602. using basic_type = custom::get_vector_template_arg_type<static_type>::type; \
  603. for (auto& pyval : pyvals) { \
  604. vals.push_back(py::handle(pyval).cast<basic_type>()); \
  605. } \
  606. param_val = vals; \
  607. break; \
  608. }
  609. PyObject* make_custom_op(PyObject* self, PyObject** args, Py_ssize_t nargs) {
  610. #if MGB_CUSTOM_OP
  611. auto op_name = py::handle(args[0]).cast<std::string>();
  612. auto kwargs = py::handle(args[1]).cast<py::dict>();
  613. std::shared_ptr<OpDef> opdef = CustomOpDefFactory::inst()->create_opdef(op_name);
  614. auto& custom_opdef = static_cast<mgb::imperative::CustomOpDef&>(*opdef);
  615. auto& param = custom_opdef.param();
  616. for (auto&& kv : kwargs) {
  617. std::string param_name = py::handle(kv.first).cast<std::string>();
  618. std::string type_name = py::handle(kv.second).ptr()->ob_type->tp_name;
  619. if (!param.exist(param_name)) {
  620. mgb_log_warn(
  621. "op %s have no param named %s, ignore this param parsed from "
  622. "python",
  623. op_name.c_str(), param_name.c_str());
  624. continue;
  625. }
  626. auto& param_val = param[param_name];
  627. switch (param_val.type()) {
  628. CUSTOM_FOR_EACH_BASIC_PARAMTYPE(CUSTOM_CASE_TO_PARSE_NON_LIST)
  629. CUSTOM_FOR_STRING_PARAMTYPE(CUSTOM_CASE_TO_PARSE_NON_LIST)
  630. CUSTOM_FOR_EACH_BASIC_LIST_PARAMTYPE(CUSTOM_CASE_TO_PARSE_LIST)
  631. CUSTOM_FOR_BOOL_LIST_PARAMTYPE(CUSTOM_CASE_TO_PARSE_LIST)
  632. CUSTOM_FOR_STRING_LIST_PARAMTYPE(CUSTOM_CASE_TO_PARSE_LIST)
  633. default: {
  634. mgb_assert(
  635. false, "param dtype of %s:%s is invalid", op_name.c_str(),
  636. param_name.c_str());
  637. }
  638. }
  639. }
  640. PyTypeObject* pytype;
  641. pytype = &PyOpType(OpDef);
  642. PyObject* obj = pytype->tp_alloc(pytype, 0);
  643. reinterpret_cast<PyOp(OpDef)*>(obj)->op = opdef;
  644. return obj;
  645. #else
  646. mgb_assert(
  647. false,
  648. "Custom Op is disabled now, please build megengine with Custom Op open");
  649. return nullptr;
  650. #endif
  651. }
  652. #undef CUSTOM_CASE_TO_PARSE_LIST
  653. #undef CUSTOM_CASE_TO_PARSE_NON_LIST
  654. py::list install_custom(const std::string& name, const std::string& path) {
  655. #if MGB_CUSTOM_OP
  656. py::list ret;
  657. const auto& ops_in_lib = custom::LibManager::inst()->install(name, path);
  658. for (const auto& op : ops_in_lib) {
  659. ret.append(op);
  660. }
  661. return ret;
  662. #else
  663. mgb_assert(
  664. false,
  665. "Custom Op is disabled now, please build megengine with Custom Op open");
  666. py::list ret;
  667. return ret;
  668. #endif
  669. }
  670. bool uninstall_custom(const std::string& name) {
  671. #if MGB_CUSTOM_OP
  672. return custom::LibManager::inst()->uninstall(name);
  673. #else
  674. mgb_assert(
  675. false,
  676. "Custom Op is disabled now, please build megengine with Custom Op open");
  677. return false;
  678. #endif
  679. }
  680. py::list get_custom_op_list(void) {
  681. #if MGB_CUSTOM_OP
  682. std::vector<std::string> all_ops = CustomOpDefFactory::inst()->op_list();
  683. py::list ret;
  684. for (auto& op : all_ops) {
  685. ret.append(op);
  686. }
  687. return ret;
  688. #else
  689. mgb_assert(
  690. false,
  691. "Custom Op is disabled now, please build megengine with Custom Op open");
  692. py::list ret;
  693. return ret;
  694. #endif
  695. }
  696. #ifndef METH_FASTCALL
  697. PyObject* py35_make_custom_op(PyObject* self, PyObject* args) {
  698. auto* arr = &PyTuple_GET_ITEM(args, 0);
  699. auto size = PyTuple_GET_SIZE(args);
  700. return make_custom_op(self, arr, size);
  701. };
  702. #endif
  703. void init_custom(pybind11::module m) {
  704. m.def("_install", &install_custom);
  705. m.def("_uninstall", &uninstall_custom);
  706. m.def("_get_custom_op_list", &get_custom_op_list);
  707. static PyMethodDef method_def = {
  708. #ifdef METH_FASTCALL
  709. "_make_custom_op", (PyCFunction)make_custom_op, METH_FASTCALL, ""
  710. #else
  711. "_make_custom_op", (PyCFunction)py35_make_custom_op, METH_VARARGS, ""
  712. #endif
  713. };
  714. auto* func = PyCFunction_NewEx(&method_def, nullptr, nullptr);
  715. pybind11::setattr(m, method_def.ml_name, func);
  716. }

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台