You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ops.cpp 29 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. /**
  2. * \file imperative/python/src/ops.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "./ops.h"
  12. #include "./helper.h"
  13. #include "./tensor.h"
  14. #include "megbrain/common.h"
  15. #include "megbrain/imperative.h"
  16. #include "megbrain/imperative/graph_builder.h"
  17. #include "megbrain/imperative/ops/autogen.h"
  18. #include "megbrain/imperative/ops/backward_graph.h"
  19. #include "megbrain/imperative/ops/opr_attr.h"
  20. #include "megbrain/imperative/ops/rng.h"
  21. #include "megbrain/imperative/ops/utility.h"
  22. #include <Python.h>
  23. #include <unordered_map>
  24. namespace py = pybind11;
  25. using namespace mgb::imperative;
  26. namespace {
  27. auto normalize_enum(const std::string& in) {
  28. std::string ret;
  29. for (auto&& c : in) {
  30. ret += toupper(c);
  31. }
  32. return ret;
  33. }
  34. } // anonymous namespace
  35. #define CATCH_ALL(RETVAL) \
  36. catch (py::error_already_set & e) { \
  37. e.restore(); \
  38. return RETVAL; \
  39. } \
  40. catch (py::builtin_exception & e) { \
  41. e.set_error(); \
  42. return RETVAL; \
  43. } \
  44. catch (std::exception & e) { \
  45. PyErr_SetString(PyExc_RuntimeError, e.what()); \
  46. return RETVAL; \
  47. }
  48. namespace {
  49. #define PyOp(name) Py##name
  50. #define PyOpType(name) PyOp(name)::py_type
  51. #define PyOpDefBegin(name) \
  52. struct PyOp(name) : PyOpDef { \
  53. using Ty = name; \
  54. Ty& inst() { return op->cast_final_safe<Ty>(); } \
  55. static PyTypeObject py_type;
  56. #define PyOpDefEnd(name) \
  57. } \
  58. ; \
  59. PyTypeObject PyOpType(name);
  60. #define RETURN_RICHCOMPARE(val1, val2, op) \
  61. do { \
  62. switch (op) { \
  63. case Py_EQ: \
  64. if ((val1) == (val2)) \
  65. Py_RETURN_TRUE; \
  66. Py_RETURN_FALSE; \
  67. case Py_NE: \
  68. if ((val1) != (val2)) \
  69. Py_RETURN_TRUE; \
  70. Py_RETURN_FALSE; \
  71. case Py_LT: \
  72. if ((val1) < (val2)) \
  73. Py_RETURN_TRUE; \
  74. Py_RETURN_FALSE; \
  75. case Py_GT: \
  76. if ((val1) > (val2)) \
  77. Py_RETURN_TRUE; \
  78. Py_RETURN_FALSE; \
  79. case Py_LE: \
  80. if ((val1) <= (val2)) \
  81. Py_RETURN_TRUE; \
  82. Py_RETURN_FALSE; \
  83. case Py_GE: \
  84. if ((val1) >= (val2)) \
  85. Py_RETURN_TRUE; \
  86. Py_RETURN_FALSE; \
  87. default: \
  88. Py_FatalError("Unreachable C code path reached"); \
  89. } \
  90. } while (0)
  91. template <typename T>
  92. PyObject* py_new_generic(PyTypeObject* type, PyObject*, PyObject*) {
  93. PyObject* obj = type->tp_alloc(type, 0);
  94. T* self = reinterpret_cast<T*>(obj);
  95. if (self != NULL) {
  96. self->op = T::Ty::make();
  97. }
  98. return obj;
  99. }
  100. template <typename T, typename SNIFAE = void>
  101. struct serialization {
  102. static T load(py::object obj) { return py::cast<T>(obj); }
  103. template <
  104. typename U, typename = std::enable_if_t<std::is_same_v<T, std::decay_t<U>>>>
  105. static py::object dump(U&& t) {
  106. return py::cast(std::forward<U>(t));
  107. }
  108. };
  109. template <typename T>
  110. void py_dealloc_generic(PyObject* obj) {
  111. reinterpret_cast<T*>(obj)->op.reset();
  112. Py_TYPE(obj)->tp_free(obj);
  113. }
  114. template <typename T, typename U, U T::Ty::*attr>
  115. PyObject* py_get_generic_impl(PyObject* obj, void* /* closure */) {
  116. auto& op = reinterpret_cast<T*>(obj)->inst();
  117. return py::cast(op.*attr).release().ptr();
  118. }
  119. #define py_get_generic(name, attr) \
  120. py_get_generic_impl<PyOp(name), decltype(std::declval<name>().attr), &name::attr>
  121. template <typename T, typename U, U T::Ty::*attr>
  122. int py_set_generic_impl(PyObject* obj, PyObject* value, void* /* closure */) {
  123. if (value == NULL) {
  124. PyErr_SetString(PyExc_TypeError, "Cannot delete the attribute");
  125. return -1;
  126. }
  127. auto& op = reinterpret_cast<T*>(obj)->inst();
  128. try {
  129. // TODO: remove this guard which is used for pybind11 implicit conversion
  130. py::detail::loader_life_support guard{};
  131. op.*attr = py::cast<U>(py::handle(value));
  132. }
  133. CATCH_ALL(-1)
  134. return 0;
  135. }
  136. #define py_set_generic(name, attr) \
  137. py_set_generic_impl<PyOp(name), decltype(std::declval<name>().attr), &name::attr>
  138. struct PyOpDef {
  139. PyObject_HEAD std::shared_ptr<OpDef> op;
  140. static PyTypeObject py_type;
  141. static std::unordered_map<mgb::Typeinfo*, PyTypeObject*> ctype2pytype;
  142. static PyGetSetDef py_getsetters[];
  143. static Py_hash_t tp_hash(PyObject* obj);
  144. static PyObject* tp_richcompare(PyObject* self, PyObject* other, int op);
  145. static PyObject* py_repr(PyObject* self) {
  146. return py::cast(reinterpret_cast<PyOpDef*>(self)->op->make_name())
  147. .release()
  148. .ptr();
  149. }
  150. };
  151. PyTypeObject PyOpType(OpDef);
  152. std::unordered_map<mgb::Typeinfo*, PyTypeObject*> PyOp(OpDef)::ctype2pytype;
  153. PyObject* py_get_scope(PyObject* obj, void* /* closure */) {
  154. return py::cast(reinterpret_cast<PyOp(OpDef)*>(obj)->op->scope()).release().ptr();
  155. }
  156. int py_set_scope(PyObject* obj, PyObject* value, void* /* closure */) {
  157. if (value == NULL) {
  158. PyErr_SetString(PyExc_TypeError, "Cannot delete the attribute");
  159. return -1;
  160. }
  161. try {
  162. reinterpret_cast<PyOp(OpDef)*>(obj)->op->set_scope(
  163. py::cast<std::string>(py::handle(value)));
  164. }
  165. CATCH_ALL(-1)
  166. return 0;
  167. }
  168. PyGetSetDef PyOp(OpDef)::py_getsetters[] = {
  169. {const_cast<char*>("scope"), py_get_scope, py_set_scope, "scope", NULL},
  170. {NULL}};
  171. Py_hash_t PyOp(OpDef)::tp_hash(PyObject* obj) {
  172. return static_cast<Py_hash_t>(reinterpret_cast<PyOp(OpDef)*>(obj)->op->hash());
  173. }
  174. PyObject* PyOp(OpDef)::tp_richcompare(PyObject* self, PyObject* other, int op) {
  175. bool same = reinterpret_cast<PyOp(OpDef)*>(self)->op->is_same(
  176. *reinterpret_cast<PyOp(OpDef)*>(other)->op);
  177. if (op == Py_EQ || op == Py_NE) {
  178. RETURN_RICHCOMPARE(same, true, op);
  179. }
  180. Py_RETURN_NOTIMPLEMENTED;
  181. }
  182. template <typename T>
  183. struct EnumTrait;
  184. #define PyEnumHead \
  185. static_assert(std::is_enum_v<T>); \
  186. PyObject_HEAD T value; \
  187. constexpr static const char* name = EnumTrait<T>::name; \
  188. static PyTypeObject* type; \
  189. static const char* members[]; \
  190. static std::unordered_map<std::string, T> mem2value; \
  191. static PyObject* pyobj_insts[];
  192. template <typename T>
  193. struct EnumWrapper {
  194. PyEnumHead std::string to_string() const {
  195. return members[static_cast<size_t>(value)];
  196. }
  197. static PyObject* py_repr(PyObject* self) {
  198. return py::cast(
  199. std::string(name) + "." +
  200. reinterpret_cast<EnumWrapper*>(self)->to_string())
  201. .release()
  202. .ptr();
  203. }
  204. static PyObject* py_dump(PyObject* self) {
  205. return py::cast(reinterpret_cast<EnumWrapper*>(self)->to_string())
  206. .release()
  207. .ptr();
  208. }
  209. static PyObject* tp_richcompare(PyObject* self, PyObject* other, int op) {
  210. if (op == Py_EQ || op == Py_NE) {
  211. T lhs, rhs;
  212. if (load(other, rhs) && load(self, lhs)) {
  213. RETURN_RICHCOMPARE(lhs, rhs, op);
  214. } else {
  215. RETURN_RICHCOMPARE(0, 1, op);
  216. }
  217. }
  218. Py_RETURN_NOTIMPLEMENTED;
  219. }
  220. static bool load(py::handle src, T& value) {
  221. PyObject* obj = src.ptr();
  222. if (PyObject_TypeCheck(obj, type)) {
  223. value = reinterpret_cast<EnumWrapper*>(obj)->value;
  224. return true;
  225. }
  226. if (py::isinstance<py::str>(src)) {
  227. auto&& iter = mem2value.find(normalize_enum(py::cast<std::string>(src)));
  228. if (iter != mem2value.end()) {
  229. value = iter->second;
  230. return true;
  231. } else {
  232. return false;
  233. }
  234. }
  235. return false;
  236. }
  237. static PyObject* cast(const T& value) {
  238. auto v = static_cast<std::underlying_type_t<T>>(value);
  239. mgb_assert(v <= EnumTrait<T>::max);
  240. PyObject* obj = pyobj_insts[v];
  241. Py_INCREF(obj);
  242. return obj;
  243. }
  244. };
  245. template <typename T>
  246. struct BitCombinedEnumWrapper {
  247. PyEnumHead std::string to_string() const {
  248. uint32_t value_int = static_cast<uint32_t>(value);
  249. if (value_int == 0) {
  250. return "None";
  251. } else {
  252. std::string ret;
  253. bool first = true;
  254. for (uint32_t i = 0; i < 32; i++) {
  255. if (value_int >> i & 1) {
  256. if (!first) {
  257. ret += " + ";
  258. } else {
  259. first = false;
  260. }
  261. ret += (std::string(name) + "." + members[i]);
  262. }
  263. }
  264. return ret;
  265. }
  266. }
  267. static PyObject* py_new_combined_enum(
  268. PyTypeObject* type, PyObject* args, PyObject*) {
  269. if (!PyTuple_Size(args)) {
  270. PyObject* obj = type->tp_alloc(type, 0);
  271. reinterpret_cast<BitCombinedEnumWrapper*>(obj)->value = T();
  272. return obj;
  273. } else {
  274. PyObject* input;
  275. if (!PyArg_ParseTuple(args, "|O", &input)) {
  276. return nullptr;
  277. }
  278. T value;
  279. if (load(input, value)) {
  280. return cast(value);
  281. } else {
  282. PyErr_SetString(
  283. PyExc_RuntimeError,
  284. mgb::ssprintf(
  285. "Cannot convert type %s to type %s\n",
  286. input->ob_type->tp_name, name)
  287. .c_str());
  288. return nullptr;
  289. }
  290. }
  291. }
  292. static PyObject* py_repr(PyObject* self) {
  293. return py::cast(reinterpret_cast<BitCombinedEnumWrapper*>(self)->to_string())
  294. .release()
  295. .ptr();
  296. }
  297. static PyObject* py_dump(PyObject* self) {
  298. std::vector<std::string> result;
  299. auto value = reinterpret_cast<BitCombinedEnumWrapper*>(self)->value;
  300. uint32_t value_int = static_cast<uint32_t>(value);
  301. for (uint32_t i = 0; i < 32; i++) {
  302. if (value_int >> i & 1) {
  303. result.push_back(members[i]);
  304. }
  305. }
  306. return py::tuple(py::cast(result)).release().ptr();
  307. }
  308. static PyObject* py_or(PyObject* self, PyObject* other) {
  309. if (!(self->ob_type == other->ob_type)) {
  310. return PyErr_Format(
  311. PyExc_RuntimeError,
  312. "Operand in or operator must be the same type.");
  313. }
  314. T lhs = reinterpret_cast<BitCombinedEnumWrapper*>(self)->value,
  315. rhs = reinterpret_cast<BitCombinedEnumWrapper*>(other)->value;
  316. return cast(lhs | rhs);
  317. }
  318. static PyObject* py_and(PyObject* self, PyObject* other) {
  319. if (!(self->ob_type == other->ob_type)) {
  320. return PyErr_Format(
  321. PyExc_RuntimeError,
  322. "Operand in and operator must be the same type.");
  323. }
  324. T lhs = reinterpret_cast<BitCombinedEnumWrapper*>(self)->value,
  325. rhs = reinterpret_cast<BitCombinedEnumWrapper*>(other)->value;
  326. return cast(lhs & rhs);
  327. }
  328. static PyObject* tp_richcompare(PyObject* self, PyObject* other, int op) {
  329. if (op == Py_EQ || op == Py_NE) {
  330. T lhs, rhs;
  331. if (load(other, rhs) && load(self, lhs)) {
  332. RETURN_RICHCOMPARE(lhs, rhs, op);
  333. } else {
  334. RETURN_RICHCOMPARE(0, 1, op);
  335. }
  336. }
  337. Py_RETURN_NOTIMPLEMENTED;
  338. }
  339. static bool load(py::handle src, T& value) {
  340. PyObject* obj = src.ptr();
  341. if (PyObject_TypeCheck(obj, type)) {
  342. value = reinterpret_cast<BitCombinedEnumWrapper*>(obj)->value;
  343. return true;
  344. }
  345. if (py::isinstance<py::str>(src)) {
  346. auto&& iter = mem2value.find(normalize_enum(py::cast<std::string>(src)));
  347. if (iter != mem2value.end()) {
  348. value = iter->second;
  349. return true;
  350. } else {
  351. return false;
  352. }
  353. }
  354. if (py::isinstance<py::tuple>(src)) {
  355. auto params = py::cast<std::vector<std::string>>(src);
  356. bool first = true;
  357. for (auto s : params) {
  358. auto&& iter = mem2value.find(normalize_enum(s));
  359. if (iter != mem2value.end()) {
  360. if (first) {
  361. value = iter->second;
  362. first = false;
  363. } else {
  364. value |= iter->second;
  365. }
  366. } else {
  367. return false;
  368. }
  369. }
  370. return true;
  371. }
  372. if (py::isinstance<py::int_>(obj)) {
  373. auto v = py::cast<std::underlying_type_t<T>>(src);
  374. if (v > EnumTrait<T>::max) {
  375. return false;
  376. }
  377. value = static_cast<T>(v);
  378. return true;
  379. }
  380. return false;
  381. }
  382. static PyObject* cast(const T& value) {
  383. auto v = static_cast<std::underlying_type_t<T>>(value);
  384. mgb_assert(v <= EnumTrait<T>::max);
  385. if ((!v) || (v & (v - 1))) {
  386. PyObject* obj = type->tp_alloc(type, 0);
  387. reinterpret_cast<BitCombinedEnumWrapper*>(obj)->value = value;
  388. return obj;
  389. } else {
  390. PyObject* obj = pyobj_insts[__builtin_ctz(v)];
  391. Py_INCREF(obj);
  392. return obj;
  393. }
  394. }
  395. };
  396. template <typename T>
  397. struct serialization<T, std::enable_if_t<std::is_enum_v<std::decay_t<T>>>> {
  398. static T load(py::object obj) {
  399. auto caster = pybind11::detail::type_caster<T>();
  400. if (caster.load(obj, true)) {
  401. return caster;
  402. } else {
  403. PyErr_SetString(PyExc_RuntimeError, "load faild \n");
  404. return caster;
  405. }
  406. }
  407. static py::object dump(T t) { return py::cast(t).attr("dump")(); }
  408. };
  409. void _init_py_op_def(py::module m) {
  410. using py_op = PyOp(OpDef);
  411. auto& py_type = PyOpType(OpDef);
  412. py_type = {PyVarObject_HEAD_INIT(NULL, 0)};
  413. py_type.tp_name = "megengine.core._imperative_rt.OpDef";
  414. py_type.tp_basicsize = sizeof(PyOp(OpDef));
  415. py_type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
  416. py_type.tp_doc = "OpDef";
  417. py_type.tp_base = &PyBaseObject_Type;
  418. py_type.tp_hash = PyOp(OpDef)::tp_hash;
  419. py_type.tp_richcompare = PyOp(OpDef)::tp_richcompare;
  420. py_type.tp_getset = py_op::py_getsetters;
  421. py_type.tp_repr = py_op::py_repr;
  422. mgb_assert(PyType_Ready(&py_type) >= 0);
  423. m.add_object("OpDef", reinterpret_cast<PyObject*>(&py_type));
  424. }
  425. /*********** begin of hand-write opdefs **************/
  426. struct PyOpBase : PyOpDef {
  427. static PyTypeObject py_type;
  428. static PyObject* tp_new(PyTypeObject* type, PyObject*, PyObject*) {
  429. auto* obj = type->tp_alloc(type, 0);
  430. if (obj) {
  431. auto* self = reinterpret_cast<PyOpBase*>(obj);
  432. new (&self->op) decltype(self->op);
  433. }
  434. return obj;
  435. }
  436. };
  437. PyTypeObject PyOpBase::py_type;
  438. void _init_py_op_base(py::module m) {
  439. using py_op = PyOpBase;
  440. auto& py_type = PyOpBase::py_type;
  441. py_type = {PyVarObject_HEAD_INIT(NULL, 0)};
  442. py_type.tp_name = "megengine.core._imperative_rt.ops.PyOpBase";
  443. py_type.tp_basicsize = sizeof(py_op);
  444. py_type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
  445. py_type.tp_doc = "PyOpBase";
  446. py_type.tp_base = &PyOpType(OpDef);
  447. py_type.tp_dealloc = py_dealloc_generic<py_op>;
  448. py_type.tp_new = py_op::tp_new;
  449. mgb_assert(PyType_Ready(&py_type) >= 0);
  450. m.add_object("PyOpBase", reinterpret_cast<PyObject*>(&py_type));
  451. }
  452. /*********** end of hand-write opdefs **************/
  453. // auto generated opdefs
  454. #include "opdef.cpy.inl"
  455. #undef CATCH_ALL
  456. } // anonymous namespace
  457. namespace PYBIND11_NAMESPACE {
  458. namespace detail {
  459. bool type_caster<OpDef>::load(handle src, bool convert) {
  460. PyObject* obj = src.ptr();
  461. if (!PyObject_TypeCheck(obj, &PyOpType(OpDef))) {
  462. return false;
  463. }
  464. value = reinterpret_cast<PyOp(OpDef)*>(obj)->op;
  465. if (!value) {
  466. // opdef only defined in Python
  467. value = std::make_shared<GenericPyOp>(reinterpret_borrow<object>(src));
  468. }
  469. return true;
  470. }
  471. handle type_caster<OpDef>::cast(const OpDef& op, return_value_policy, handle) {
  472. if (auto* pyop = op.try_cast_final<GenericPyOp>()) {
  473. return object(pyop->obj).release();
  474. }
  475. PyTypeObject* pytype;
  476. auto& c2p = PyOp(OpDef)::ctype2pytype;
  477. auto&& iter = c2p.find(op.dyn_typeinfo());
  478. if (iter != c2p.end()) { // FIXME: should always meet this condition
  479. pytype = iter->second;
  480. } else { // which means unregistered op type, jsut make it as an opaque op type
  481. // currently, only OprAttr goes into this branch
  482. pytype = &PyOpType(OpDef);
  483. }
  484. PyObject* obj = pytype->tp_alloc(pytype, 0);
  485. mgb_assert(PyObject_TypeCheck(obj, &PyOpType(OpDef)));
  486. reinterpret_cast<PyOp(OpDef)*>(obj)->op = const_cast<OpDef&>(op).shared_from_this();
  487. return py::handle(obj);
  488. }
  489. #define ENUM_CASTER_IMPL(T) \
  490. bool type_caster<T>::load(handle src, bool) { \
  491. return EnumWrapper<T>::load(src, value); \
  492. } \
  493. handle type_caster<T>::cast(const T& value, return_value_policy, handle) { \
  494. return EnumWrapper<T>::cast(value); \
  495. }
  496. FOR_EACH_ENUM_PARAM(ENUM_CASTER_IMPL)
  497. #define BIT_COMBINED_ENUM_CASTER_IMPL(T) \
  498. bool type_caster<T>::load(handle src, bool) { \
  499. return BitCombinedEnumWrapper<T>::load(src, value); \
  500. } \
  501. handle type_caster<T>::cast(const T& value, return_value_policy, handle) { \
  502. return BitCombinedEnumWrapper<T>::cast(value); \
  503. }
  504. FOR_EACH_BIT_COMBINED_ENUM_PARAM(BIT_COMBINED_ENUM_CASTER_IMPL)
  505. } // namespace detail
  506. } // namespace PYBIND11_NAMESPACE
  507. void init_ops(py::module m) {
  508. _init_py_op_def(m);
  509. _init_py_op_base(m);
  510. INIT_ALL_OP(m)
  511. m.def("new_rng_handle", &rng::new_handle);
  512. m.def(
  513. "delete_rng_handle",
  514. [](size_t handle) {
  515. // RNG op might execute after handle released due to async dispatch, so
  516. // we need sync before delete a handle to avoid memory leak or
  517. // use-after-free
  518. if (python::interpreter_for_py->check_available()) {
  519. python::interpreter_for_py->sync();
  520. }
  521. mgb::CompNode::sync_all();
  522. py_task_q.wait_all_task_finish();
  523. rng::delete_handle(handle);
  524. },
  525. py::call_guard<py::gil_scoped_release>());
  526. m.def("set_global_rng_seed", &rng::set_global_rng_seed);
  527. m.def("get_global_rng_seed", &rng::get_global_rng_seed);
  528. m.def("get_rng_handle_compnode", &rng::get_rng_handle_compnode);
  529. struct PySubgraphBuilder {
  530. explicit PySubgraphBuilder(std::string name) : name{name} {}
  531. std::string name;
  532. std::shared_ptr<Subgraph> graph_storage = std::make_shared<Subgraph>();
  533. std::shared_ptr<UniqueKey> graph_key = std::make_shared<UniqueKey>();
  534. Subgraph& graph = *graph_storage;
  535. mgb::SmallVector<bool> output_grad_mask;
  536. Subgraph::var_t next_var = 1;
  537. std::shared_ptr<OpDef> build() const {
  538. return SubgraphOp::make(name, graph_storage, output_grad_mask, graph_key);
  539. }
  540. };
  541. py::class_<PySubgraphBuilder>(m, "SubgraphBuilder")
  542. .def(py::init<std::string>())
  543. .def("input",
  544. [](PySubgraphBuilder& self) {
  545. auto var = self.next_var++;
  546. self.graph.inputs.push_back(var);
  547. return var;
  548. })
  549. .def("apply",
  550. [](PySubgraphBuilder& self, std::shared_ptr<OpDef> op,
  551. Subgraph::vars_t inputs, size_t nr_outputs) {
  552. Subgraph::vars_t outputs;
  553. for (size_t i = 0; i < nr_outputs; ++i) {
  554. outputs.push_back(self.next_var++);
  555. }
  556. self.graph.exprs.push_back({op, inputs, outputs});
  557. return outputs;
  558. })
  559. .def("apply_const",
  560. [](PySubgraphBuilder& self, py::object value, mgb::DType dtype,
  561. mgb::CompNode cn) {
  562. auto var = self.next_var++;
  563. mgb::HostTensorND hvalue(cn);
  564. npy::np2tensor(
  565. value.cast<py::array>().ptr(),
  566. npy::Meth::copy_into(&hvalue), dtype);
  567. self.graph.constants.push_back({var, Tensor::make(hvalue)});
  568. return var;
  569. })
  570. .def("outputs",
  571. [](PySubgraphBuilder& self, Subgraph::vars_t outputs) {
  572. self.graph.outputs = outputs;
  573. self.output_grad_mask.resize(outputs.size(), true);
  574. })
  575. .def("outputs_has_grad",
  576. [](PySubgraphBuilder& self, mgb::SmallVector<bool> outputs_has_grad) {
  577. mgb_assert(
  578. self.graph.outputs.size() == self.output_grad_mask.size());
  579. self.output_grad_mask = outputs_has_grad;
  580. })
  581. .def("get",
  582. [](PySubgraphBuilder& self) {
  583. return (std::shared_ptr<OpDef>)self.build();
  584. })
  585. .def("compile", [](PySubgraphBuilder& self, int gopt_level) {
  586. return (std::shared_ptr<OpDef>)CompiledOp::make(
  587. self.build(), gopt_level);
  588. });
  589. auto custom = submodule(m, "_custom");
  590. init_custom(custom);
  591. }
  592. #define CUSTOM_CASE_TO_PARSE_NON_LIST(dyn_type, static_type) \
  593. case custom::ParamDynType::dyn_type: { \
  594. param_val = py::handle(kv.second).cast<static_type>(); \
  595. break; \
  596. }
  597. #define CUSTOM_CASE_TO_PARSE_LIST(dyn_type, static_type) \
  598. case custom::ParamDynType::dyn_type: { \
  599. auto pyvals = py::handle(kv.second).cast<py::list>(); \
  600. static_type vals; \
  601. using basic_type = custom::get_vector_template_arg_type<static_type>::type; \
  602. for (auto& pyval : pyvals) { \
  603. vals.push_back(py::handle(pyval).cast<basic_type>()); \
  604. } \
  605. param_val = vals; \
  606. break; \
  607. }
  608. PyObject* make_custom_op(PyObject* self, PyObject** args, Py_ssize_t nargs) {
  609. #if MGB_CUSTOM_OP
  610. auto op_name = py::handle(args[0]).cast<std::string>();
  611. auto kwargs = py::handle(args[1]).cast<py::dict>();
  612. std::shared_ptr<OpDef> opdef = CustomOpDefFactory::inst()->create_opdef(op_name);
  613. auto& custom_opdef = static_cast<mgb::imperative::CustomOpDef&>(*opdef);
  614. auto& param = custom_opdef.param();
  615. for (auto&& kv : kwargs) {
  616. std::string param_name = py::handle(kv.first).cast<std::string>();
  617. std::string type_name = py::handle(kv.second).ptr()->ob_type->tp_name;
  618. if (!param.exist(param_name)) {
  619. mgb_log_warn(
  620. "op %s have no param named %s, ignore this param parsed from "
  621. "python",
  622. op_name.c_str(), param_name.c_str());
  623. continue;
  624. }
  625. auto& param_val = param[param_name];
  626. switch (param_val.type()) {
  627. CUSTOM_FOR_EACH_BASIC_PARAMTYPE(CUSTOM_CASE_TO_PARSE_NON_LIST)
  628. CUSTOM_FOR_STRING_PARAMTYPE(CUSTOM_CASE_TO_PARSE_NON_LIST)
  629. CUSTOM_FOR_EACH_BASIC_LIST_PARAMTYPE(CUSTOM_CASE_TO_PARSE_LIST)
  630. CUSTOM_FOR_BOOL_LIST_PARAMTYPE(CUSTOM_CASE_TO_PARSE_LIST)
  631. CUSTOM_FOR_STRING_LIST_PARAMTYPE(CUSTOM_CASE_TO_PARSE_LIST)
  632. default: {
  633. mgb_assert(
  634. false, "param dtype of %s:%s is invalid", op_name.c_str(),
  635. param_name.c_str());
  636. }
  637. }
  638. }
  639. PyTypeObject* pytype;
  640. pytype = &PyOpType(OpDef);
  641. PyObject* obj = pytype->tp_alloc(pytype, 0);
  642. reinterpret_cast<PyOp(OpDef)*>(obj)->op = opdef;
  643. return obj;
  644. #else
  645. mgb_assert(
  646. false,
  647. "Custom Op is disabled now, please build megengine with Custom Op open");
  648. return nullptr;
  649. #endif
  650. }
  651. #undef CUSTOM_CASE_TO_PARSE_LIST
  652. #undef CUSTOM_CASE_TO_PARSE_NON_LIST
  653. py::list install_custom(const std::string& name, const std::string& path) {
  654. #if MGB_CUSTOM_OP
  655. py::list ret;
  656. const auto& ops_in_lib = custom::LibManager::inst()->install(name, path);
  657. for (const auto& op : ops_in_lib) {
  658. ret.append(op);
  659. }
  660. return ret;
  661. #else
  662. mgb_assert(
  663. false,
  664. "Custom Op is disabled now, please build megengine with Custom Op open");
  665. py::list ret;
  666. return ret;
  667. #endif
  668. }
  669. bool uninstall_custom(const std::string& name) {
  670. #if MGB_CUSTOM_OP
  671. return custom::LibManager::inst()->uninstall(name);
  672. #else
  673. mgb_assert(
  674. false,
  675. "Custom Op is disabled now, please build megengine with Custom Op open");
  676. return false;
  677. #endif
  678. }
  679. py::list get_custom_op_list(void) {
  680. #if MGB_CUSTOM_OP
  681. std::vector<std::string> all_ops = CustomOpDefFactory::inst()->op_list();
  682. py::list ret;
  683. for (auto& op : all_ops) {
  684. ret.append(op);
  685. }
  686. return ret;
  687. #else
  688. mgb_assert(
  689. false,
  690. "Custom Op is disabled now, please build megengine with Custom Op open");
  691. py::list ret;
  692. return ret;
  693. #endif
  694. }
  695. #ifndef METH_FASTCALL
  696. PyObject* py35_make_custom_op(PyObject* self, PyObject* args) {
  697. auto* arr = &PyTuple_GET_ITEM(args, 0);
  698. auto size = PyTuple_GET_SIZE(args);
  699. return make_custom_op(self, arr, size);
  700. };
  701. #endif
  702. void init_custom(pybind11::module m) {
  703. m.def("_install", &install_custom);
  704. m.def("_uninstall", &uninstall_custom);
  705. m.def("_get_custom_op_list", &get_custom_op_list);
  706. static PyMethodDef method_def = {
  707. #ifdef METH_FASTCALL
  708. "_make_custom_op", (PyCFunction)make_custom_op, METH_FASTCALL, ""
  709. #else
  710. "_make_custom_op", (PyCFunction)py35_make_custom_op, METH_VARARGS, ""
  711. #endif
  712. };
  713. auto* func = PyCFunction_NewEx(&method_def, nullptr, nullptr);
  714. pybind11::setattr(m, method_def.ml_name, func);
  715. }

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台