You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor_utils.cpp 59 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682
  1. #include "megbrain/common.h"
  2. #include "megbrain/dtype.h"
  3. #include "megbrain/imperative/ops/autogen.h"
  4. #include "megbrain/imperative/ops/backward_graph.h"
  5. #include "megbrain/imperative/ops/utility.h"
  6. #include "megbrain/imperative/profiler.h"
  7. #include "megbrain/imperative/transformations/eval.h"
  8. #include "megbrain/imperative/transformations/lazy.h"
  9. #include "megbrain/imperative/transformations/scalar.h"
  10. #include "megbrain/imperative/transformations/symbol.h"
  11. #include "megbrain/imperative/transformations/trace.h"
  12. #include "megbrain/imperative/utils/map.h"
  13. #include "megbrain/opr/io.h"
  14. #include "megbrain/plugin/profiler.h"
  15. #include "./common.h"
  16. #include "./grad.h"
  17. #include "./graph_rt.h"
  18. #include "./helper.h"
  19. #include "./module_trace.h"
  20. #include "./numpy_dtypes.h"
  21. #include "./tensor.h"
  22. #include "./tensor_utils.h"
  23. #include "./transformation.h"
  24. #include <object.h>
  25. #include <pybind11/numpy.h>
  26. #include <pybind11/operators.h>
  27. #include <pybind11/pytypes.h>
  28. #include <pyerrors.h>
  29. #include <range/v3/all.hpp>
  30. #include <string>
  31. #include <unordered_map>
  32. #include "../../src/impl/mgb_cg_impl.h"
  33. namespace py = pybind11;
  34. namespace views = ranges::views;
  35. namespace mgb::imperative::python {
  36. /* ============== convert inputs ============== */
  37. // map numpy.dtype.kind to priority
  38. inline uint8_t category_priority(char c) {
  39. switch (c) {
  40. case 'f':
  41. return 3; // floating-point
  42. case 'i':
  43. return 2; // signed integer
  44. case 'u':
  45. return 2; // unsigned integer
  46. case 'b':
  47. return 1; // boolean
  48. default:
  49. return 0;
  50. }
  51. }
  52. // Returns the maximum value of the priority of each type in the list `types`.
  53. uint8_t max_priority(SmallVector<PyArray_Descr*> types) {
  54. if (types.size() == 0) {
  55. return 0;
  56. } else {
  57. uint8_t max_p = 0;
  58. for (auto&& desc : types) {
  59. max_p = std::max(max_p, category_priority(desc->kind));
  60. }
  61. return max_p;
  62. }
  63. }
  64. // Returns the data type with sufficient size to hold all types of
  65. // category `cat` in the list `types`.
  66. PyArray_Descr* promote_types(SmallVector<PyArray_Descr*> types, uint8_t cat) {
  67. // Return value: New reference
  68. SmallVector<PyArray_Descr*> used_types;
  69. for (auto&& desc : types) {
  70. auto&& v = category_priority(desc->kind);
  71. if (v == cat) {
  72. used_types.emplace_back(desc);
  73. }
  74. }
  75. mgb_assert(used_types.size() > 0, "size of used_types is 0");
  76. PyArray_Descr* res = used_types[0];
  77. Py_INCREF(res);
  78. for (size_t i = 1; i < used_types.size(); ++i) {
  79. PyArray_Descr* tmp = PyArray_PromoteTypes(used_types[i], res);
  80. Py_DECREF(res);
  81. res = tmp;
  82. }
  83. return res;
  84. }
  85. PyArray_Descr* scalar2dtype(PyObject* arg) {
  86. // Return value: New reference
  87. if (PyBool_Check(arg)) {
  88. auto&& descr = PyArray_DescrFromType(NPY_BOOL);
  89. return descr;
  90. }
  91. if (PyLong_CheckExact(arg)) {
  92. auto&& descr = PyArray_DescrFromType(NPY_INT32);
  93. return descr;
  94. }
  95. if (PyFloat_CheckExact(arg)) {
  96. auto&& descr = PyArray_DescrFromType(NPY_FLOAT32);
  97. return descr;
  98. }
  99. return nullptr;
  100. }
  101. PyArray_Descr* _dtype_promotion(PyObject* const* args, size_t nargs) {
  102. // Return value: New reference
  103. SmallVector<PyArray_Descr*> tensors;
  104. SmallVector<PyArray_Descr*> scalars;
  105. bool is_tuple = false;
  106. PyObject* tuple = nullptr;
  107. if (nargs == 1 && (PyTuple_Check(args[0]) || PyList_Check(args[0]))) {
  108. if (PyList_Check(args[0])) {
  109. tuple = PyList_AsTuple(args[0]);
  110. } else {
  111. tuple = args[0];
  112. Py_INCREF(tuple);
  113. }
  114. nargs = PyTuple_Size(tuple);
  115. is_tuple = true;
  116. }
  117. for (size_t i = 0; i < nargs; ++i) {
  118. PyObject* handle = is_tuple ? PyTuple_GetItem(tuple, i) : args[i];
  119. if (handle == Py_None)
  120. continue;
  121. TensorWrapper* tw = TensorWrapper::try_cast(handle);
  122. if (tw) {
  123. mgb::DType type = tw->m_tensor->dtype();
  124. auto&& descr = npy::dtype_mgb2np_descr(type);
  125. Py_INCREF(descr.get());
  126. tensors.emplace_back(descr.get());
  127. } else {
  128. if (PyArray_Check(handle) || PyArray_CheckScalar(handle)) {
  129. auto&& descr = PyArray_DescrFromObject(handle, nullptr);
  130. tensors.emplace_back(descr);
  131. continue;
  132. }
  133. PyArray_Descr* descr = scalar2dtype(handle);
  134. if (descr) {
  135. scalars.emplace_back(descr);
  136. continue;
  137. }
  138. }
  139. }
  140. auto max_pri_scalars = max_priority(scalars);
  141. auto max_pri_tensors = max_priority(tensors);
  142. if (max_pri_scalars <= 0 && max_pri_tensors <= 0) {
  143. throw py::value_error("invalid input, no dtype avaliable");
  144. }
  145. PyArray_Descr* res;
  146. if (max_pri_scalars > max_pri_tensors) {
  147. res = promote_types(scalars, max_pri_scalars);
  148. } else {
  149. res = promote_types(tensors, max_pri_tensors);
  150. }
  151. for (auto* p : tensors) {
  152. Py_DECREF(p);
  153. }
  154. for (auto* p : scalars) {
  155. Py_DECREF(p);
  156. }
  157. Py_XDECREF(tuple);
  158. return res;
  159. }
  160. CompNode _get_device(PyObject* const* args, size_t nargs) {
  161. bool is_tuple = false;
  162. PyObject* tuple = nullptr;
  163. if (nargs == 1 && (PyTuple_Check(args[0]) || PyList_Check(args[0]))) {
  164. if (PyList_Check(args[0])) {
  165. tuple = PyList_AsTuple(args[0]);
  166. } else {
  167. tuple = args[0];
  168. Py_INCREF(tuple);
  169. }
  170. nargs = PyTuple_Size(tuple);
  171. is_tuple = true;
  172. }
  173. bool valid = false;
  174. CompNode cn;
  175. for (size_t i = 0; i < nargs; ++i) {
  176. PyObject* handle = is_tuple ? PyTuple_GetItem(tuple, i) : args[i];
  177. TensorWrapper* tw = TensorWrapper::try_cast(handle);
  178. if (tw) {
  179. if (!valid) {
  180. cn = tw->m_tensor->comp_node();
  181. valid = true;
  182. } else {
  183. CompNode cn1 = tw->m_tensor->comp_node();
  184. if (cn1 != cn) {
  185. throw py::value_error(ssprintf(
  186. "ambiguous device: %s (from %s) vs %s (from %s)",
  187. cn.to_string().c_str(), cn.to_string_logical().c_str(),
  188. cn1.to_string().c_str(), cn1.to_string_logical().c_str()));
  189. }
  190. }
  191. }
  192. }
  193. if (!valid) {
  194. return CompNode::load(get_default_device());
  195. }
  196. Py_XDECREF(tuple);
  197. return cn;
  198. }
  199. // Returns the dtype that would result from performing an arithmetic
  200. // operation on the provided input tensors and scalars.
  201. PyObject* dtype_promotion(PyObject* self, PyObject* const* args, size_t nargs) {
  202. if (!nargs) {
  203. PyErr_SetString(PyExc_TypeError, "empty input is not allowed");
  204. return nullptr;
  205. }
  206. try {
  207. PyArray_Descr* res = _dtype_promotion(args, nargs);
  208. return py::cast(npy::dtype_np2mgb_descr(res)).release().ptr();
  209. }
  210. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  211. }
  212. PyObject* get_device(PyObject* self, PyObject* const* args, size_t nargs) {
  213. if (!nargs) {
  214. PyErr_SetString(PyExc_TypeError, "empty input is not allowed");
  215. return nullptr;
  216. }
  217. try {
  218. CompNode cn = _get_device(args, nargs);
  219. return py::cast(cn).release().ptr();
  220. }
  221. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  222. }
  223. bool is_scalar(PyObject* tensor) {
  224. auto* tw = TensorWrapper::try_cast(tensor);
  225. if (tw) {
  226. return tw->m_tensor->is_scalar();
  227. }
  228. return PyArray_CheckAnyScalar(tensor);
  229. }
  230. bool is_bool_list(PyObject* arg) {
  231. if (!PyList_Check(arg)) {
  232. return false;
  233. }
  234. size_t sz = PyList_Size(arg);
  235. if (!sz) {
  236. return false;
  237. }
  238. for (size_t i = 0; i < sz; ++i) {
  239. PyObject* handle = PyList_GetItem(arg, i);
  240. if (!PyBool_Check(handle)) {
  241. return false;
  242. }
  243. }
  244. return true;
  245. }
  246. bool is_bool_dtype(PyObject* args) {
  247. if (!PyObject_HasAttrString(args, "dtype"))
  248. return false;
  249. PyObject* dobj = PyObject_GetAttrString(args, "dtype");
  250. PyArray_Descr* dtype;
  251. PyArray_DescrConverter(dobj, &dtype);
  252. bool ret = (dtype->kind == 'b');
  253. Py_XDECREF(dtype);
  254. Py_XDECREF(dobj);
  255. return ret;
  256. }
  257. py::object device2obj(py::handle device, bool mapping = false) {
  258. if (device.ptr() == Py_None) {
  259. return py::cast(CompNode::load(get_default_device()));
  260. } else if (py::isinstance<py::str>(device)) {
  261. if (mapping) {
  262. py::object dmap = getattr(
  263. py::reinterpret_borrow<py::object>((PyObject*)py_tensor_type),
  264. "dmap_callback");
  265. if (dmap.ptr() != Py_None) {
  266. return device2obj(dmap(device), false);
  267. }
  268. }
  269. return py::cast(CompNode::load(device.cast<std::string>()));
  270. } else if (py::isinstance<CompNode>(device)) {
  271. return py::reinterpret_borrow<py::object>(device);
  272. } else {
  273. return getattr(device, "_cn");
  274. }
  275. }
  276. py::object _Const(py::handle value, py::handle dtype, py::handle device) {
  277. py::object val = py::reinterpret_borrow<py::object>(value);
  278. if (PyArray_Check(value.ptr())) {
  279. py::tuple strides =
  280. py::reinterpret_borrow<py::tuple>(getattr(value, "strides"));
  281. bool need_squeeze = false;
  282. for (size_t i = 0; i < strides.size(); ++i) {
  283. if (strides[i].cast<ptrdiff_t>() == 0) {
  284. need_squeeze = true;
  285. }
  286. }
  287. if (need_squeeze) {
  288. val = py::reinterpret_borrow<py::array>(value);
  289. py::object orig_shp = val.attr("shape");
  290. val = val.attr("squeeze")();
  291. val = val.attr("reshape")(orig_shp);
  292. }
  293. }
  294. py::object device_obj = device2obj(device, true);
  295. py::tuple tup =
  296. py::make_tuple(val, dtype, device_obj, true, false, py::none(), py::none());
  297. return TensorWrapper::make(py_tensor_type, tup.ptr(), nullptr);
  298. }
  299. py::tuple _make_shape_tuple(py::handle shape) {
  300. py::list orig;
  301. py::list ret(0);
  302. auto solve_one = [&](py::handle val) {
  303. if (TensorWrapper::try_cast(val.ptr())) {
  304. py::object np = getattr(val, "numpy")();
  305. PyArrayObject* arr = (PyArrayObject*)np.ptr();
  306. PyObject* maybe_list = PyArray_ToList(arr);
  307. if (PyList_Check(maybe_list)) {
  308. py::list may = py::reinterpret_steal<py::list>(maybe_list);
  309. for (size_t i = 0; i < may.size(); ++i) {
  310. ret.append(may[i]);
  311. }
  312. } else {
  313. mgb_assert(PyLong_Check(maybe_list));
  314. ret.append(PyLong_AsLong(maybe_list));
  315. Py_XDECREF(maybe_list);
  316. }
  317. } else if (PyArray_Check(val.ptr())) {
  318. ret.append(PyArray_PyIntAsInt(val.ptr()));
  319. } else {
  320. ret.append(PyLong_AsLong(val.ptr()));
  321. }
  322. };
  323. if (PyArray_Check(shape.ptr()) && !PyArray_CheckAnyScalar(shape.ptr())) {
  324. orig = py::reinterpret_steal<py::list>(
  325. PyArray_ToList((PyArrayObject*)shape.ptr()));
  326. for (size_t i = 0; i < orig.size(); ++i) {
  327. solve_one(orig[i]);
  328. }
  329. } else if (PyList_Check(shape.ptr())) {
  330. orig = py::reinterpret_borrow<py::list>(shape);
  331. for (size_t i = 0; i < orig.size(); ++i) {
  332. solve_one(orig[i]);
  333. }
  334. } else if (PyTuple_Check(shape.ptr())) {
  335. py::tuple tup = py::reinterpret_borrow<py::tuple>(shape);
  336. for (size_t i = 0; i < tup.size(); ++i) {
  337. solve_one(tup[i]);
  338. }
  339. } else {
  340. solve_one(shape);
  341. }
  342. return py::reinterpret_steal<py::tuple>(PyList_AsTuple(ret.ptr()));
  343. }
  344. bool is_tensor(py::handle arg) {
  345. return bool(TensorWrapper::try_cast(arg.ptr()));
  346. }
  347. bool is_py_sequence(py::handle arg) {
  348. if (PyArray_Check(arg.ptr()) || TensorWrapper::try_cast(arg.ptr())) {
  349. return false;
  350. }
  351. return PySequence_Check(arg.ptr());
  352. }
  353. py::object get_res_by_refhdl(
  354. py::handle value, py::handle dtype, py::handle device, py::handle ref_hdl) {
  355. py::object res = _Const(value, dtype, device);
  356. py::object ref;
  357. if (py::isinstance<py::tuple>(ref_hdl)) {
  358. py::tuple tup = py::reinterpret_borrow<py::tuple>(ref_hdl);
  359. if (tup.size()) {
  360. ref = tup[0];
  361. } else {
  362. ref = py::none();
  363. }
  364. } else {
  365. ref = py::reinterpret_borrow<py::object>(ref_hdl);
  366. }
  367. if (PyObject_TypeCheck(ref.ptr(), py_varnode_type)) {
  368. auto temp = dtype.cast<mgb::DType>();
  369. ComputingGraph* graph = getattr(ref, "graph").cast<ComputingGraph*>();
  370. cg::VarNode* node = getattr(ref, "var").cast<cg::VarNode*>();
  371. CompNode cn;
  372. if (device.ptr() == Py_None) {
  373. cn = node->comp_node();
  374. } else {
  375. cn = device2obj(device).cast<CompNode>();
  376. }
  377. OperatorNodeConfig config(cn);
  378. auto hv = npy::np2tensor(
  379. value.ptr(), npy::Meth::borrow(cn), dtype.cast<mgb::DType>());
  380. auto typeobj = ref.get_type();
  381. return typeobj(opr::ImmutableTensor::make(*graph, hv, config).node());
  382. }
  383. return res;
  384. }
  385. mgb::DType _get_dtype(py::handle tensor) {
  386. auto tw = TensorWrapper::try_cast(tensor.ptr());
  387. return tw->m_tensor->dtype();
  388. }
  389. py::object _astype_cpp(py::handle tensor, py::handle dtype_hdl) {
  390. PyArray_Descr* descr;
  391. py::object ret;
  392. if (!PyArray_DescrConverter(dtype_hdl.ptr(), &descr)) {
  393. throw py::value_error(ssprintf(
  394. "can not convert to numpy.dtype from %s",
  395. dtype_hdl.ptr()->ob_type->tp_name));
  396. }
  397. auto&& cur = npy::dtype_mgb2np_descr(_get_dtype(tensor));
  398. if (!dtype_equal(cur.get(), descr)) {
  399. std::shared_ptr<OpDef> op = TypeCvt::make(npy::dtype_np2mgb_descr(descr));
  400. py::object Op = py::cast(op);
  401. PyObject* p[2] = {Op.ptr(), tensor.ptr()};
  402. py::tuple apply_res = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  403. ret = apply_res[0];
  404. } else {
  405. ret = py::reinterpret_borrow<py::object>(tensor);
  406. }
  407. Py_DECREF(descr);
  408. return ret;
  409. }
  410. py::object _convert_single_value_cpp(
  411. py::handle value, py::handle dtype, py::handle device) {
  412. if (is_tensor(value)) {
  413. if (_get_dtype(value).category() != DTypeCategory::QUANTIZED) {
  414. return _astype_cpp(value, dtype);
  415. }
  416. } else {
  417. return _Const(value, dtype, device);
  418. }
  419. return py::reinterpret_borrow<py::object>(value);
  420. }
  421. py::object _convert_inputs_cpp(
  422. PyObject* const* args, size_t nargs, py::object dtype, py::object device) {
  423. ComputingGraph* graph = nullptr;
  424. py::handle typeobj;
  425. py::list lis;
  426. for (size_t i = 0; i < nargs; ++i) {
  427. py::handle h = py::handle(args[i]);
  428. lis.append(h);
  429. }
  430. auto convert = [&](py::object value) {
  431. if (value.is_none()) {
  432. return value;
  433. }
  434. return _convert_single_value_cpp(value, dtype, device);
  435. };
  436. for (size_t i = 0; i < lis.size(); ++i) {
  437. lis[i] = convert(lis[i]);
  438. }
  439. return py::reinterpret_steal<py::tuple>(PyList_AsTuple(lis.ptr()));
  440. }
  441. py::object _astensor1d_cpp(
  442. py::handle value, py::handle dtype, py::handle device, py::handle ref) {
  443. py::object ret;
  444. py::object device_obj = py::none();
  445. py::object ndim_obj = py::none();
  446. if (device.ptr() != Py_None) {
  447. device_obj = device2obj(device);
  448. }
  449. if (PyObject_TypeCheck(value.ptr(), py_varnode_type)) {
  450. try {
  451. getattr(value, "ndim");
  452. } catch (py::error_already_set& err) {
  453. if (dtype.ptr() != Py_None) {
  454. ret = _astype_cpp(value, dtype);
  455. } else {
  456. ret = py::reinterpret_borrow<py::object>(value);
  457. }
  458. if (device.ptr() != Py_None) {
  459. std::shared_ptr<OpDef> op = Copy::make(device_obj.cast<CompNode>());
  460. py::object Op = py::cast(op);
  461. PyObject* p[2] = {Op.ptr(), ret.ptr()};
  462. py::tuple copy_ret =
  463. py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  464. return copy_ret[0];
  465. }
  466. return ret;
  467. }
  468. }
  469. size_t ndim = 999;
  470. if (hasattr(value, "ndim")) {
  471. ndim = getattr(value, "ndim").cast<size_t>();
  472. if (ndim != 0 && ndim != 1) {
  473. throw py::value_error("ndim != 1 or 0, get : " + std::to_string(ndim));
  474. }
  475. if (!is_tensor(value)) {
  476. return get_res_by_refhdl(value, dtype, device, ref);
  477. } else {
  478. return py::reinterpret_borrow<py::object>(value);
  479. }
  480. }
  481. if (!is_py_sequence(value)) {
  482. throw py::type_error();
  483. }
  484. py::list lis = py::reinterpret_steal<py::list>(PySequence_List(value.ptr()));
  485. bool need_concat = false;
  486. for (size_t i = 0; i < lis.size(); ++i) {
  487. if (is_tensor(lis[i])) {
  488. need_concat = true;
  489. break;
  490. }
  491. }
  492. if (!need_concat) {
  493. return get_res_by_refhdl(value, dtype, device, ref);
  494. }
  495. if (lis.size() > 1) {
  496. py::list flat_list;
  497. for (auto item : lis) {
  498. if (!PyList_Check(item.ptr())) {
  499. flat_list.append(item);
  500. } else {
  501. py::list sub_lis =
  502. py::reinterpret_steal<py::list>(PySequence_List(item.ptr()));
  503. for (auto sub_item : sub_lis) {
  504. flat_list.append(sub_item);
  505. }
  506. }
  507. }
  508. std::vector<PyObject*> c_args(flat_list.size() + 1);
  509. for (size_t i = 0; i < flat_list.size(); ++i) {
  510. c_args[i] = flat_list[i].ptr();
  511. }
  512. c_args[flat_list.size()] = Py_None;
  513. py::tuple inp_tup = py::reinterpret_steal<py::tuple>(
  514. convert_inputs_cpp(NULL, c_args.data(), c_args.size()));
  515. if (device_obj.is_none()) {
  516. std::vector<PyObject*> inp(inp_tup.size());
  517. for (size_t i = 0; i < inp_tup.size(); ++i) {
  518. inp[i] = inp_tup[i].ptr();
  519. }
  520. device_obj = py::cast(_get_device(inp.data(), inp.size()));
  521. }
  522. std::shared_ptr<OpDef> op = Concat::make(0, device_obj.cast<CompNode>());
  523. py::object Op = py::cast(op);
  524. std::vector<PyObject*> p;
  525. p.resize(inp_tup.size() + 1);
  526. p[0] = Op.ptr();
  527. for (size_t i = 0; i < inp_tup.size(); ++i) {
  528. p[i + 1] = inp_tup[i].ptr();
  529. }
  530. py::tuple concat_ret =
  531. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  532. ret = concat_ret[0];
  533. } else {
  534. ret = lis[0];
  535. }
  536. if (dtype.ptr() != Py_None) {
  537. return _astype_cpp(ret, dtype);
  538. } else {
  539. return ret;
  540. }
  541. }
  542. py::object _get_index(py::object tensor, py::object src) {
  543. if (!TensorWrapper::try_cast(tensor.ptr())) {
  544. auto get_const = [&](mgb::DType dtype) -> py::object {
  545. return _Const(tensor, py::cast(dtype), src.attr("device"));
  546. };
  547. if (is_bool_list(tensor.ptr()) || is_bool_dtype(tensor.ptr())) {
  548. tensor = get_const(dtype::Bool());
  549. } else {
  550. tensor = get_const(dtype::Int32());
  551. }
  552. if (!is_bool_dtype(tensor.ptr())) {
  553. return tensor;
  554. }
  555. } else {
  556. if (!is_bool_dtype(tensor.ptr())) {
  557. return tensor;
  558. }
  559. }
  560. std::shared_ptr<OpDef> op = CondTake::make();
  561. py::object Op = py::cast(op);
  562. PyObject* p[3] = {Op.ptr(), tensor.ptr(), tensor.ptr()};
  563. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 3));
  564. return ret[1];
  565. }
  566. py::tuple _try_cond_take(py::handle tensor, py::handle index) {
  567. if (!hasattr(index, "dtype") || !hasattr(index, "shape")) {
  568. return py::tuple();
  569. }
  570. if (!is_bool_dtype(index.ptr()) ||
  571. _make_shape_tuple(getattr(index, "shape"))
  572. .not_equal(_make_shape_tuple(getattr(tensor, "shape")))) {
  573. return py::tuple();
  574. }
  575. py::object iobj;
  576. if (PyArray_Check(index.ptr())) {
  577. iobj = _Const(
  578. index, py::cast((mgb::DType)dtype::Bool()), getattr(tensor, "device"));
  579. } else {
  580. iobj = py::reinterpret_borrow<py::object>(index);
  581. }
  582. std::shared_ptr<OpDef> op = CondTake::make();
  583. py::object Op = py::cast(op);
  584. PyObject* p[3] = {Op.ptr(), tensor.ptr(), iobj.ptr()};
  585. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 3));
  586. return ret;
  587. }
  588. py::tuple _remove_ellipsis(py::object tensor, py::tuple tuple_val) {
  589. size_t tuple_size = tuple_val.size();
  590. size_t ndim_sum = 0, cur_sum = 0;
  591. int pos = -1;
  592. bool has_unknown_ndim_bool_index = false;
  593. for (size_t i = 0; i < tuple_size; ++i) {
  594. py::object handle = tuple_val[i];
  595. if (handle.is_none()) {
  596. continue;
  597. } else if (handle.ptr() == Py_Ellipsis) {
  598. pos = static_cast<int>(i);
  599. for (size_t j = 0; j < i; ++j) {
  600. py::object t = tuple_val[j];
  601. if (t.ptr() == Py_Ellipsis) {
  602. throw py::index_error("only one ellipsis is allowed.");
  603. }
  604. }
  605. } else {
  606. size_t ndim_incr = 1;
  607. if (hasattr(handle, "dtype") && is_bool_dtype(handle.ptr()) &&
  608. hasattr(handle, "ndim")) {
  609. py::object ndim;
  610. try {
  611. ndim = getattr(handle, "ndim");
  612. } catch (py::error_already_set& err) {
  613. has_unknown_ndim_bool_index = true;
  614. }
  615. if (PyLong_Check(ndim.ptr())) {
  616. ndim_incr = PyLong_AsLong(ndim.ptr());
  617. } else {
  618. has_unknown_ndim_bool_index = true;
  619. }
  620. }
  621. cur_sum += ndim_incr;
  622. }
  623. }
  624. if (pos == -1) {
  625. return tuple_val;
  626. } else {
  627. if (has_unknown_ndim_bool_index) {
  628. throw py::index_error(
  629. "does not support bool index with unknown shape when using "
  630. "Ellipsis.");
  631. }
  632. try {
  633. ndim_sum = getattr(tensor, "ndim").cast<size_t>();
  634. } catch (py::error_already_set& err) {
  635. throw py::index_error(
  636. "does not support Ellipsis when tensor's ndim is unknown.");
  637. }
  638. py::tuple ret(ndim_sum - cur_sum + tuple_size - 1);
  639. size_t idx = 0;
  640. for (size_t i = 0; i < tuple_size; ++i) {
  641. if (i == pos) {
  642. for (size_t j = cur_sum; j < ndim_sum; ++j) {
  643. ret[idx++] = PySlice_New(NULL, NULL, NULL);
  644. }
  645. } else {
  646. ret[idx++] = tuple_val[i];
  647. }
  648. }
  649. return ret;
  650. }
  651. }
  652. py::object _reshape_cpp(py::handle inp_hdl, py::handle args);
  653. py::tuple _expand_bool_dim(py::object tensor, py::tuple tuple_val) {
  654. py::tuple cur_shape = _make_shape_tuple(py::handle(getattr(tensor, "shape")));
  655. py::list new_tuple_val(0);
  656. size_t offset = 0;
  657. size_t tdim = 0;
  658. size_t nonedim = 0;
  659. for (size_t i = 0; i < tuple_val.size(); ++i) {
  660. py::handle k = tuple_val[i];
  661. if (k.ptr() == Py_None) {
  662. nonedim++;
  663. new_tuple_val.append(k);
  664. continue;
  665. }
  666. if (is_bool_dtype(k.ptr())) {
  667. size_t ndim = getattr(k, "ndim").cast<size_t>();
  668. if (ndim > 1) {
  669. py::tuple ishape = _make_shape_tuple(py::handle(getattr(k, "shape")));
  670. for (size_t j = 0; j < ndim; ++j) {
  671. if (cur_shape[tdim + j - offset].cast<size_t>() !=
  672. ishape[j].cast<size_t>()) {
  673. std::string msg =
  674. "boolean index did not match tensor along "
  675. "dimension " +
  676. std::to_string(tdim + j) + "; dimension is " +
  677. std::to_string(
  678. cur_shape[tdim + j - offset].cast<size_t>()) +
  679. " but corresponding boolean dimension is " +
  680. std::to_string(ishape[j].cast<size_t>());
  681. throw py::index_error(msg.c_str());
  682. }
  683. }
  684. py::object new_k = getattr(k, "reshape")(-1);
  685. py::object kshape = getattr(new_k, "shape");
  686. py::list new_shape(0);
  687. PyObject* sym = PyObject_CallObject(cpp_use_symbolic_shape, nullptr);
  688. bool is_sym = (sym == Py_True);
  689. Py_XDECREF(sym);
  690. if (is_sym) {
  691. py::object tshape = getattr(tensor, "shape");
  692. for (size_t j = 0; j < i - nonedim; ++j) {
  693. new_shape.append(tshape[py::int_(j)]);
  694. }
  695. new_shape.append(kshape[py::int_(0)]);
  696. for (size_t j = tdim + ndim - offset; j < cur_shape.size(); ++j) {
  697. new_shape.append(cur_shape[j]);
  698. }
  699. py::object shape_tensor = _astensor1d_cpp(
  700. new_shape, py::none(), py::none(), py::none());
  701. tensor = _reshape_cpp(tensor, shape_tensor);
  702. cur_shape = _make_shape_tuple(shape_tensor);
  703. } else {
  704. for (size_t j = 0; j < i - nonedim; ++j) {
  705. new_shape.append(cur_shape[j]);
  706. }
  707. new_shape.append(py::reinterpret_borrow<py::tuple>(kshape)[0]);
  708. for (size_t j = tdim + ndim - offset; j < cur_shape.size(); ++j) {
  709. new_shape.append(cur_shape[j]);
  710. }
  711. cur_shape = new_shape;
  712. tensor = _reshape_cpp(tensor, cur_shape);
  713. }
  714. offset++;
  715. tdim += ndim;
  716. }
  717. new_tuple_val.append(k);
  718. } else {
  719. new_tuple_val.append(k);
  720. tdim++;
  721. }
  722. }
  723. return py::make_tuple(tensor, py::reinterpret_borrow<py::tuple>(new_tuple_val));
  724. }
  725. std::pair<size_t, bool> get_ndim_safe(py::handle tensor) {
  726. if (auto p = TensorWrapper::try_cast(tensor.ptr())) {
  727. return {p->m_tensor->shape()->ndim, true};
  728. }
  729. try {
  730. return {getattr(tensor, "ndim").cast<size_t>(), true};
  731. } catch (py::error_already_set& err) {
  732. return {0, false};
  733. }
  734. }
  735. py::tuple _unpack_indexes(py::handle inp_hdl, py::handle idx_hdl) {
  736. py::object inp = py::reinterpret_borrow<py::object>(inp_hdl);
  737. py::tuple tuple_val;
  738. if (py::isinstance<py::tuple>(idx_hdl)) {
  739. tuple_val = py::reinterpret_borrow<py::tuple>(idx_hdl);
  740. } else {
  741. tuple_val = py::make_tuple(idx_hdl);
  742. }
  743. bool use_subtensor = true;
  744. bool need_remove_ellipsis = false;
  745. bool need_expand_bool_dim = false;
  746. size_t idx_ndim = 0;
  747. for (size_t i = 0; i < tuple_val.size(); ++i) {
  748. py::object k = tuple_val[i];
  749. if (k.is_none()) {
  750. continue;
  751. } else if (k.ptr() == Py_Ellipsis) {
  752. need_remove_ellipsis = true;
  753. } else {
  754. if (is_bool_dtype(k.ptr()) && hasattr(k, "ndim")) {
  755. size_t ndim = get_ndim_safe(k).first;
  756. idx_ndim += ndim;
  757. if (ndim > 1) {
  758. need_expand_bool_dim = true;
  759. }
  760. } else {
  761. idx_ndim++;
  762. }
  763. }
  764. }
  765. try {
  766. size_t inp_ndim = getattr(inp, "ndim").cast<size_t>();
  767. if (idx_ndim > inp_ndim) {
  768. std::string msg = "too many indices for tensor: tensor is " +
  769. std::to_string(inp_ndim) + "-dimensional, but " +
  770. std::to_string(idx_ndim) + " were indexed";
  771. throw py::index_error(msg.c_str());
  772. }
  773. } catch (py::error_already_set& err) {
  774. ; // ignore
  775. }
  776. if (need_remove_ellipsis) {
  777. tuple_val = _remove_ellipsis(inp, tuple_val);
  778. }
  779. if (need_expand_bool_dim) {
  780. py::object shape = getattr(inp, "shape");
  781. if (shape.ptr() != Py_None) {
  782. py::tuple ret = _expand_bool_dim(inp, tuple_val);
  783. inp = ret[0];
  784. tuple_val = ret[1];
  785. }
  786. }
  787. std::vector<int32_t> axis;
  788. for (size_t i = 0; i < tuple_val.size(); ++i) {
  789. if (tuple_val[i].is_none()) {
  790. axis.push_back(i);
  791. }
  792. }
  793. if (axis.size()) {
  794. std::shared_ptr<OpDef> op = AddAxis::make(axis);
  795. py::object Op = py::cast(op);
  796. PyObject* p[2] = {Op.ptr(), inp.ptr()};
  797. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  798. inp = ret[0];
  799. }
  800. py::list items;
  801. py::list tensors;
  802. int cur_axis = -1;
  803. for (size_t i = 0; i < tuple_val.size(); ++i) {
  804. py::object handle = tuple_val[i];
  805. cur_axis++;
  806. if (handle.is_none()) {
  807. continue;
  808. }
  809. if (!is_scalar(handle.ptr()) && !PySlice_Check(handle.ptr())) {
  810. use_subtensor = false;
  811. }
  812. py::list item;
  813. item.append(cur_axis);
  814. auto push = [&](PyObject* v) {
  815. if (v == Py_None) {
  816. item.append(false);
  817. } else {
  818. item.append(true);
  819. tensors.append(_get_index(py::reinterpret_borrow<py::object>(v), inp));
  820. }
  821. };
  822. if (PySlice_Check(handle.ptr())) {
  823. PySliceObject* s = (PySliceObject*)handle.ptr();
  824. if (s->start == Py_None && s->stop == Py_None && s->step == Py_None) {
  825. continue;
  826. }
  827. push(s->start);
  828. push(s->stop);
  829. push(s->step);
  830. item.append(false);
  831. } else {
  832. for (size_t j = 0; j < 3; j++)
  833. item.append(false);
  834. push(handle.ptr());
  835. }
  836. items.append(item);
  837. }
  838. return py::make_tuple(inp, tensors, items, use_subtensor, need_expand_bool_dim);
  839. }
  840. py::object _expand_args(py::handle args) {
  841. if (!PyTuple_Check(args.ptr())) {
  842. return py::reinterpret_borrow<py::object>(args);
  843. }
  844. py::tuple args_tup = py::reinterpret_borrow<py::tuple>(args.ptr());
  845. if (args_tup.size() == 1 &&
  846. (PySequence_Check(args_tup[0].ptr()) || is_tensor(args_tup[0].ptr()))) {
  847. return py::reinterpret_borrow<py::object>(args_tup[0]);
  848. } else {
  849. return py::reinterpret_steal<py::list>(PySequence_List(args_tup.ptr()));
  850. }
  851. }
  852. std::tuple<std::vector<int32_t>, bool> tuple2vector(py::object shape) {
  853. std::vector<int32_t> shp;
  854. if (!PyTuple_Check(shape.ptr())) {
  855. return {shp, false};
  856. }
  857. py::tuple tup = py::reinterpret_borrow<py::tuple>(shape);
  858. for (size_t i = 0; i < tup.size(); ++i) {
  859. if (!PyLong_Check(tup[i].ptr())) {
  860. shp.clear();
  861. return {shp, false};
  862. } else {
  863. shp.push_back(tup[i].cast<int32_t>());
  864. }
  865. }
  866. return {shp, true};
  867. }
  868. bool enable_fastpath(py::handle inp) {
  869. auto&& tm_tr = TransformationManager::get_instance()
  870. .segments[TransformationManager::Segment::ModuleTrace];
  871. bool is_varnode = PyObject_TypeCheck(inp.ptr(), py_varnode_type);
  872. if (is_varnode ||
  873. TransformationManager::get_instance()
  874. .segments[TransformationManager::Segment::Trace]
  875. .size() > 0 ||
  876. (tm_tr.size() > 0 &&
  877. reinterpret_cast<ModuleTraceTransformation*>(tm_tr[0].get())->enabled())) {
  878. return false;
  879. }
  880. return true;
  881. }
  882. py::object _broadcast_cpp(py::handle input, py::handle args) {
  883. py::object shape = _expand_args(args);
  884. py::list dims;
  885. bool all_imm;
  886. if (PyList_Check(shape.ptr()) || PyTuple_Check(shape.ptr())) {
  887. dims = py::reinterpret_steal<py::list>(PySequence_List(shape.ptr()));
  888. mgb_assert(!dims.is_none());
  889. all_imm = true;
  890. py::object inp_shape = py::none();
  891. size_t inp_ndim;
  892. for (size_t i = 0; i < dims.size(); ++i) {
  893. py::object dim = dims[i];
  894. if (dim.is_none()) {
  895. ptrdiff_t right = (ptrdiff_t)i - dims.size();
  896. if (inp_shape.is_none()) {
  897. inp_shape = input.attr("shape");
  898. mgb_assert(!inp_shape.is_none());
  899. inp_ndim = py::len(inp_shape);
  900. }
  901. if ((ptrdiff_t)inp_ndim + right < 0) {
  902. throw py::value_error("size connot be `None` for new axis");
  903. }
  904. dim = inp_shape.attr("__getitem__")(right);
  905. dims[i] = dim;
  906. }
  907. if (py::int_::check_(dim)) {
  908. if (dim.cast<long>() < 0) {
  909. throw py::value_error(ssprintf(
  910. "expect shape[%zu] >= 0 or use `None` to auto infer, got "
  911. "%s",
  912. i, py::repr(dims[i]).cast<std::string>().c_str()));
  913. }
  914. } else {
  915. all_imm = false;
  916. }
  917. }
  918. shape = dims;
  919. } else {
  920. all_imm = false;
  921. }
  922. bool fastpath = all_imm && enable_fastpath(input);
  923. if ((!fastpath) && (!is_tensor(shape))) {
  924. shape = _astensor1d_cpp(
  925. shape, py::cast((mgb::DType)dtype::Int32()), input.attr("device"),
  926. input);
  927. }
  928. std::shared_ptr<OpDef> op;
  929. SmallVector<PyObject*> p(2);
  930. if (fastpath) {
  931. std::vector<int32_t> shape_vec;
  932. for (auto&& dim : dims) {
  933. shape_vec.push_back(dim.cast<long>());
  934. }
  935. op = Broadcast::make(shape_vec);
  936. } else {
  937. op = Broadcast::make();
  938. p.push_back(shape.ptr());
  939. }
  940. py::object py_op = py::cast(op);
  941. p[0] = py_op.ptr();
  942. p[1] = input.ptr();
  943. py::tuple ret =
  944. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  945. return ret[0];
  946. }
  947. py::object _reshape_cpp(py::handle inp_hdl, py::handle args) {
  948. py::object shape_hdl = _expand_args(args);
  949. py::object shape_tuple;
  950. try {
  951. shape_tuple = _make_shape_tuple(shape_hdl);
  952. } catch (py::error_already_set& err) {
  953. shape_tuple = py::reinterpret_borrow<py::object>(shape_hdl);
  954. }
  955. int32_t unspec_axis = -1;
  956. if (PyTuple_Check(shape_tuple.ptr())) {
  957. py::tuple tup = py::reinterpret_borrow<py::tuple>(shape_tuple);
  958. for (size_t i = 0; i < tup.size(); ++i) {
  959. py::object obj = py::reinterpret_borrow<py::object>(tup[i]);
  960. if (obj < py::int_(0)) {
  961. if (obj.not_equal(py::int_(-1))) {
  962. throw py::value_error(
  963. "expect shape [" + std::to_string(i) + "] >= -1, got " +
  964. repr(obj).cast<std::string>());
  965. }
  966. if (unspec_axis >= 0) {
  967. throw py::value_error(
  968. "multiple -1 in shape: " + std::to_string(unspec_axis) +
  969. " & " + std::to_string(i));
  970. }
  971. unspec_axis = i;
  972. }
  973. }
  974. }
  975. auto [shape, fastpath] = tuple2vector(shape_tuple);
  976. fastpath &= enable_fastpath(inp_hdl);
  977. std::shared_ptr<OpDef> op;
  978. std::vector<PyObject*> p;
  979. py::object shape_tensor;
  980. if (fastpath) {
  981. if (unspec_axis >= 0) {
  982. op = Reshape::make(unspec_axis, shape);
  983. } else {
  984. op = Reshape::make(::megdnn::param::OptionalAxisV1::INVALID_AXIS, shape);
  985. }
  986. p.resize(2);
  987. } else {
  988. shape.clear();
  989. if (unspec_axis >= 0) {
  990. op = Reshape::make(unspec_axis, shape);
  991. } else {
  992. op = Reshape::make();
  993. }
  994. shape_tensor = _astensor1d_cpp(
  995. shape_hdl, py::cast((mgb::DType)dtype::Int32()),
  996. getattr(inp_hdl, "device"), inp_hdl);
  997. p.resize(3);
  998. p[2] = shape_tensor.ptr();
  999. }
  1000. py::object Op = py::cast(op);
  1001. p[0] = Op.ptr();
  1002. p[1] = inp_hdl.ptr();
  1003. py::tuple ret =
  1004. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1005. return ret[0];
  1006. }
  1007. py::object _adaptive_pool2d_cpp(
  1008. py::handle inp_hdl, py::handle shape_val_hdl, py::handle pool_mode_hdl) {
  1009. py::object shape_hdl = py::reinterpret_borrow<py::object>(shape_val_hdl);
  1010. py::list shps(0);
  1011. auto mode_string = pool_mode_hdl.cast<std::string>();
  1012. ::megdnn::param::AdaptivePooling::Mode pool_mode =
  1013. ::megdnn::param::AdaptivePooling::Mode::MAX;
  1014. if (mode_string.compare(std::string("AVERAGE")) == 0) {
  1015. pool_mode = ::megdnn::param::AdaptivePooling::Mode::AVERAGE;
  1016. }
  1017. std::shared_ptr<OpDef> op;
  1018. std::vector<PyObject*> p;
  1019. auto pool_format = ::megdnn::param::AdaptivePooling::Format::NCHW;
  1020. auto inp_format = getattr(inp_hdl, "format").cast<std::string>();
  1021. if (inp_format == "nhwc") {
  1022. pool_format = ::megdnn::param::AdaptivePooling::Format::NHWC;
  1023. }
  1024. if (TensorWrapper::try_cast(shape_val_hdl.ptr())) {
  1025. std::vector<int32_t> shp;
  1026. op = AdaptivePooling::make(pool_mode, pool_format, shp);
  1027. py::object Op = py::cast(op);
  1028. p.resize(3);
  1029. p[0] = Op.ptr();
  1030. p[1] = inp_hdl.ptr();
  1031. p[2] = shape_val_hdl.ptr();
  1032. py::tuple ret =
  1033. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1034. return ret[0];
  1035. } else if (!PyTuple_Check(shape_val_hdl.ptr())) {
  1036. shps.append(PyLong_AsLong(shape_val_hdl.ptr()));
  1037. shps.append(PyLong_AsLong(shape_val_hdl.ptr()));
  1038. shape_hdl = py::reinterpret_borrow<py::object>(shps);
  1039. }
  1040. py::object shape_tuple;
  1041. try {
  1042. shape_tuple = _make_shape_tuple(shape_hdl);
  1043. } catch (py::error_already_set& err) {
  1044. shape_tuple = py::reinterpret_borrow<py::object>(shape_hdl);
  1045. }
  1046. auto [shape, fastpath] = tuple2vector(shape_tuple);
  1047. fastpath &= enable_fastpath(inp_hdl);
  1048. py::object shape_tensor;
  1049. op = AdaptivePooling::make(pool_mode, pool_format, shape);
  1050. if (fastpath) {
  1051. p.resize(2);
  1052. } else {
  1053. p.resize(3);
  1054. shape_tensor = _astensor1d_cpp(
  1055. shape_hdl, py::cast((mgb::DType)dtype::Int32()),
  1056. getattr(inp_hdl, "device"), inp_hdl);
  1057. p[2] = shape_tensor.ptr();
  1058. }
  1059. py::object Op = py::cast(op);
  1060. p[0] = Op.ptr();
  1061. p[1] = inp_hdl.ptr();
  1062. py::tuple ret =
  1063. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1064. return ret[0];
  1065. }
  1066. py::object _getitem_cpp(py::handle inp_hdl, py::handle idx_hdl) {
  1067. py::tuple try_res = _try_cond_take(inp_hdl, idx_hdl);
  1068. if (try_res.size() == 2) {
  1069. return try_res[0];
  1070. }
  1071. py::tuple up = _unpack_indexes(inp_hdl, idx_hdl);
  1072. py::object tensor = py::reinterpret_borrow<py::object>(up[0]);
  1073. py::list tensors = py::reinterpret_borrow<py::list>(up[1]);
  1074. py::list py_items = py::reinterpret_borrow<py::list>(up[2]);
  1075. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> cpp_items;
  1076. for (size_t i = 0; i < py_items.size(); ++i) {
  1077. py::list item = py::reinterpret_borrow<py::list>(py_items[i]);
  1078. cpp_items.push_back(
  1079. {item[0].cast<int8_t>(), item[1].cast<bool>(), item[2].cast<bool>(),
  1080. item[3].cast<bool>(), item[4].cast<bool>()});
  1081. }
  1082. std::shared_ptr<OpDef> op;
  1083. if (up[3].cast<bool>()) {
  1084. op = Subtensor::make(cpp_items);
  1085. } else {
  1086. op = IndexingMultiAxisVec::make(cpp_items);
  1087. }
  1088. std::vector<PyObject*> p;
  1089. p.resize(tensors.size() + 2);
  1090. py::object Op = py::cast(op);
  1091. p[0] = Op.ptr();
  1092. p[1] = tensor.ptr();
  1093. for (size_t i = 0; i < tensors.size(); ++i) {
  1094. p[i + 2] = tensors[i].ptr();
  1095. }
  1096. py::tuple ret =
  1097. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1098. return ret[0];
  1099. }
  1100. py::object _setitem_cpp(py::handle inp_hdl, py::handle idx_hdl, py::handle val_hdl) {
  1101. py::object org_shape = getattr(inp_hdl, "shape");
  1102. py::object val = py::reinterpret_borrow<py::object>(val_hdl);
  1103. if (!TensorWrapper::try_cast(val.ptr())) {
  1104. val = _Const(val_hdl, getattr(inp_hdl, "dtype"), getattr(inp_hdl, "device"));
  1105. }
  1106. py::tuple up = _unpack_indexes(inp_hdl, idx_hdl);
  1107. py::object tensor = py::reinterpret_borrow<py::object>(up[0]);
  1108. py::list tensors = py::reinterpret_borrow<py::list>(up[1]);
  1109. py::list py_items = py::reinterpret_borrow<py::list>(up[2]);
  1110. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> cpp_items;
  1111. for (size_t i = 0; i < py_items.size(); ++i) {
  1112. py::list item = py::reinterpret_borrow<py::list>(py_items[i]);
  1113. cpp_items.push_back(
  1114. {item[0].cast<int8_t>(), item[1].cast<bool>(), item[2].cast<bool>(),
  1115. item[3].cast<bool>(), item[4].cast<bool>()});
  1116. }
  1117. std::shared_ptr<OpDef> op, set_op;
  1118. if (up[3].cast<bool>()) {
  1119. op = Subtensor::make(cpp_items);
  1120. } else {
  1121. op = IndexingMultiAxisVec::make(cpp_items);
  1122. }
  1123. std::vector<PyObject*> p;
  1124. p.resize(tensors.size() + 2);
  1125. py::object Op = py::cast(op);
  1126. p[0] = Op.ptr();
  1127. p[1] = tensor.ptr();
  1128. for (size_t i = 0; i < tensors.size(); ++i) {
  1129. p[i + 2] = tensors[i].ptr();
  1130. }
  1131. py::tuple ret =
  1132. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1133. py::object tmp_result = ret[0];
  1134. try {
  1135. py::tuple value_shape =
  1136. py::reinterpret_borrow<py::tuple>(val.attr("_tuple_shape"));
  1137. py::tuple tmp_result_shape =
  1138. py::reinterpret_borrow<py::tuple>(tmp_result.attr("_tuple_shape"));
  1139. for (size_t i = 0; i < value_shape.size() && i < tmp_result_shape.size(); ++i) {
  1140. size_t vs = value_shape[value_shape.size() - i - 1].cast<size_t>();
  1141. size_t ts =
  1142. tmp_result_shape[tmp_result_shape.size() - i - 1].cast<size_t>();
  1143. if (vs != 1 && vs != ts) {
  1144. std::string lhs = "", rhs = "";
  1145. for (size_t j = 0; j < tmp_result_shape.size(); ++j) {
  1146. lhs += std::to_string(tmp_result_shape[j].cast<size_t>());
  1147. if (j)
  1148. lhs += ",";
  1149. }
  1150. for (size_t j = 0; j < value_shape.size(); ++j) {
  1151. rhs += std::to_string(value_shape[j].cast<size_t>());
  1152. if (j)
  1153. rhs += ",";
  1154. }
  1155. throw py::value_error(
  1156. "cannot copy tensor with shape (" + rhs +
  1157. ") to subtensor with shape (" + lhs + ")");
  1158. }
  1159. }
  1160. } catch (py::error_already_set& err) {
  1161. ;
  1162. }
  1163. val = _broadcast_cpp(val, getattr(tmp_result, "shape"));
  1164. if (up[3].cast<bool>()) {
  1165. set_op = SetSubtensor::make(cpp_items);
  1166. } else {
  1167. set_op = IndexingSetMultiAxisVec::make(cpp_items);
  1168. }
  1169. std::vector<PyObject*> q;
  1170. q.resize(tensors.size() + 3);
  1171. py::object Set_Op = py::cast(set_op);
  1172. q[0] = Set_Op.ptr();
  1173. q[1] = tensor.ptr();
  1174. q[2] = val.ptr();
  1175. for (size_t i = 0; i < tensors.size(); ++i) {
  1176. q[i + 3] = tensors[i].ptr();
  1177. }
  1178. py::tuple result =
  1179. py::reinterpret_steal<py::object>(py_apply(NULL, q.data(), q.size()));
  1180. py::object res = result[0];
  1181. if (up[4].cast<bool>()) {
  1182. res = _reshape_cpp(res, org_shape);
  1183. }
  1184. return res;
  1185. }
  1186. py::object _split_cpp(
  1187. py::handle inp_hdl, py::handle nsplits_or_sections_hdl, py::handle axis_hdl) {
  1188. py::object shape_obj = getattr(inp_hdl, "shape");
  1189. py::object n_total = shape_obj[axis_hdl];
  1190. int ndim = shape_obj.attr("__len__")().cast<int>();
  1191. int axis = axis_hdl.cast<int>();
  1192. if (axis >= ndim) {
  1193. throw py::value_error("Invalid axis " + std::to_string(axis));
  1194. }
  1195. int n_sections;
  1196. bool is_array;
  1197. if (is_py_sequence(nsplits_or_sections_hdl)) {
  1198. n_sections = PySequence_Length(nsplits_or_sections_hdl.ptr()) + 1;
  1199. is_array = true;
  1200. } else {
  1201. n_sections = getattr(nsplits_or_sections_hdl, "__int__")().cast<int>();
  1202. is_array = false;
  1203. }
  1204. py::list partitions;
  1205. std::shared_ptr<OpDef> op;
  1206. std::vector<PyObject*> p;
  1207. if (is_array) {
  1208. py::list div_points;
  1209. py::list sections = py::reinterpret_borrow<py::object>(nsplits_or_sections_hdl);
  1210. div_points.append(0);
  1211. for (size_t i = 0; i < sections.size(); ++i) {
  1212. div_points.append(sections[i]);
  1213. }
  1214. div_points.append(n_total);
  1215. for (size_t i = 1; i < div_points.size(); ++i) {
  1216. if (div_points[i - 1] > div_points[i]) {
  1217. throw py::value_error(
  1218. "Invalid nsplits_or_secions: " +
  1219. repr(nsplits_or_sections_hdl).cast<std::string>());
  1220. }
  1221. py::object pos = div_points[i] - div_points[i - 1];
  1222. if (is_tensor(pos)) {
  1223. partitions.append(pos);
  1224. } else {
  1225. partitions.append(
  1226. _Const(pos, py::cast((mgb::DType)dtype::Int32()),
  1227. getattr(inp_hdl, "device")));
  1228. }
  1229. }
  1230. op = Split::make(axis, 0);
  1231. p.resize(partitions.size() + 2);
  1232. for (size_t i = 0; i < partitions.size(); ++i) {
  1233. p[i + 2] = partitions[i].ptr();
  1234. }
  1235. } else {
  1236. if (n_sections <= 0) {
  1237. throw py::value_error("Number sections must be larger than 0");
  1238. }
  1239. if (py::int_(n_sections) > n_total) {
  1240. throw py::value_error(
  1241. "The size " + repr(n_total).cast<std::string>() + " at dim " +
  1242. std::to_string(axis) + " cannot be split into " +
  1243. std::to_string(n_sections) + " sections");
  1244. }
  1245. op = Split::make(axis, n_sections);
  1246. p.resize(2);
  1247. }
  1248. py::object Op = py::cast(op);
  1249. p[0] = Op.ptr();
  1250. p[1] = inp_hdl.ptr();
  1251. return py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1252. }
  1253. std::vector<int32_t> list2vector(py::handle li) {
  1254. std::vector<int32_t> axis;
  1255. if (is_py_sequence(li)) {
  1256. py::list tmp_list = py::reinterpret_steal<py::list>(PySequence_List(li.ptr()));
  1257. for (size_t i = 0; i < tmp_list.size(); ++i) {
  1258. axis.push_back(tmp_list[i].attr("__int__")().cast<int32_t>());
  1259. }
  1260. } else {
  1261. axis.push_back(getattr(li, "__int__")().cast<int32_t>());
  1262. }
  1263. return axis;
  1264. }
  1265. py::object _expand_dims_cpp(py::handle inp_hdl, py::handle axis_hdl) {
  1266. std::vector<int32_t> axis = list2vector(axis_hdl);
  1267. bool unknown_ndim = true;
  1268. size_t ndim = axis.size();
  1269. if (auto p = TensorWrapper::try_cast(inp_hdl.ptr())) {
  1270. auto&& shape = p->m_tensor->shape();
  1271. if (shape) {
  1272. unknown_ndim = false;
  1273. ndim += shape->ndim;
  1274. }
  1275. } else {
  1276. auto&& inp_ndim = get_ndim_safe(inp_hdl);
  1277. ndim += inp_ndim.first;
  1278. unknown_ndim &= !inp_ndim.second;
  1279. }
  1280. for (size_t i = 0; i < axis.size(); ++i) {
  1281. if (axis[i] < 0) {
  1282. if (unknown_ndim) {
  1283. throw py::index_error(
  1284. "Does not support negative index when tensor's ndim is "
  1285. "unknown");
  1286. }
  1287. axis[i] += static_cast<int32_t>(ndim);
  1288. }
  1289. }
  1290. if (!axis.size()) {
  1291. throw py::index_error("axis could not be empty");
  1292. }
  1293. std::sort(axis.begin(), axis.end());
  1294. std::shared_ptr<OpDef> op = AddAxis::make(axis = axis);
  1295. py::object Op = py::cast(op);
  1296. PyObject* p[2] = {Op.ptr(), inp_hdl.ptr()};
  1297. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  1298. return ret[0];
  1299. }
  1300. py::object _squeeze_cpp(py::handle inp_hdl, py::handle axis_hdl) {
  1301. std::vector<int32_t> axis;
  1302. size_t ndim;
  1303. if (axis_hdl.ptr() != Py_None) {
  1304. axis = list2vector(axis_hdl);
  1305. }
  1306. if (auto p = TensorWrapper::try_cast(inp_hdl.ptr())) {
  1307. auto&& shape = p->m_tensor->shape();
  1308. if (shape) {
  1309. ndim = shape->ndim;
  1310. if (axis_hdl.ptr() == Py_None) {
  1311. for (size_t i = 0; i < shape->ndim; ++i) {
  1312. if (shape->shape[i] == 1) {
  1313. axis.push_back(i);
  1314. }
  1315. }
  1316. }
  1317. }
  1318. } else {
  1319. py::tuple shape =
  1320. py::reinterpret_borrow<py::tuple>(getattr(inp_hdl, "_tuple_shape"));
  1321. ndim = shape.size();
  1322. if (axis_hdl.ptr() == Py_None) {
  1323. for (size_t i = 0; i < shape.size(); ++i) {
  1324. if (shape[i].cast<size_t>() == 1) {
  1325. axis.push_back(i);
  1326. }
  1327. }
  1328. }
  1329. }
  1330. for (size_t i = 0; i < axis.size(); ++i) {
  1331. if (axis[i] < 0) {
  1332. axis[i] += static_cast<int32_t>(ndim);
  1333. }
  1334. }
  1335. std::sort(axis.begin(), axis.end());
  1336. for (size_t i = 0; i < axis.size(); ++i) {
  1337. axis[i] -= static_cast<int32_t>(i);
  1338. }
  1339. std::shared_ptr<OpDef> op = RemoveAxis::make(axis = axis);
  1340. py::object Op = py::cast(op);
  1341. PyObject* p[2] = {Op.ptr(), inp_hdl.ptr()};
  1342. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  1343. return ret[0];
  1344. }
  1345. py::object _transpose_cpp(py::handle inp_hdl, py::handle args) {
  1346. py::object obj = _expand_args(args);
  1347. py::list lis;
  1348. if (!is_tensor(obj.ptr()) && PySequence_Check(obj.ptr())) {
  1349. lis = py::reinterpret_steal<py::list>(PySequence_List(obj.ptr()));
  1350. } else {
  1351. py::object np = getattr(obj, "numpy")();
  1352. PyArrayObject* arr = (PyArrayObject*)np.ptr();
  1353. PyObject* maybe_list = PyArray_ToList(arr);
  1354. if (PyList_Check(maybe_list)) {
  1355. lis = py::reinterpret_steal<py::list>(maybe_list);
  1356. }
  1357. }
  1358. if (get_ndim_safe(inp_hdl).first == 0) {
  1359. if (lis.size() != 0) {
  1360. throw py::index_error(
  1361. "transpose for scalar does not accept additional args");
  1362. }
  1363. return getattr(inp_hdl, "to")(getattr(inp_hdl, "device"));
  1364. }
  1365. std::vector<int32_t> pattern;
  1366. if (!lis.size()) {
  1367. size_t ndim = getattr(inp_hdl, "ndim").cast<size_t>();
  1368. for (size_t i = 0; i < ndim; ++i) {
  1369. pattern.push_back(ndim - i - 1);
  1370. }
  1371. } else {
  1372. for (size_t i = 0; i < lis.size(); ++i) {
  1373. if (PyLong_Check(lis[i].ptr())) {
  1374. pattern.push_back(lis[i].cast<int32_t>());
  1375. } else {
  1376. if (lis[i].cast<std::string>() == "x") {
  1377. pattern.push_back(-1);
  1378. }
  1379. }
  1380. }
  1381. }
  1382. std::shared_ptr<OpDef> op = Dimshuffle::make(pattern);
  1383. py::object Op = py::cast(op);
  1384. PyObject* p[2] = {Op.ptr(), inp_hdl.ptr()};
  1385. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  1386. return ret[0];
  1387. }
  1388. py::object _matmul_cpp(
  1389. py::handle inp1, py::handle inp2, py::handle dim1, py::handle dim2,
  1390. py::handle transpose_a, py::handle transpose_b, py::handle compute_mode,
  1391. py::handle profile, py::handle deterministic) {
  1392. ::megdnn::param::MatrixMul::ComputeMode mode =
  1393. ::megdnn::param::MatrixMul::ComputeMode::DEFAULT;
  1394. if (compute_mode.cast<std::string>().compare(std::string("float32")) == 0) {
  1395. mode = ::megdnn::param::MatrixMul::ComputeMode::FLOAT32;
  1396. }
  1397. ::megdnn::param::ExecutionPolicy::Strategy cstrategy =
  1398. static_cast<::megdnn::param::ExecutionPolicy::Strategy>(0);
  1399. if (profile.cast<bool>()) {
  1400. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::PROFILE;
  1401. } else {
  1402. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::HEURISTIC;
  1403. }
  1404. if (deterministic.cast<bool>()) {
  1405. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::REPRODUCIBLE;
  1406. }
  1407. std::shared_ptr<OpDef> op = MatrixMul::make(
  1408. transpose_a.cast<bool>(), transpose_b.cast<bool>(), mode,
  1409. ::megdnn::param::MatrixMul::Format::DEFAULT, cstrategy, UINT64_MAX,
  1410. dim1.cast<uint32_t>(), dim2.cast<uint32_t>());
  1411. py::object Op = py::cast(op);
  1412. PyObject* p[3] = {Op.ptr(), inp1.ptr(), inp2.ptr()};
  1413. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 3));
  1414. return ret[0];
  1415. }
  1416. py::object _batched_matmul_cpp(
  1417. py::handle inp1, py::handle inp2, py::handle dim1, py::handle dim2,
  1418. py::handle transpose_a, py::handle transpose_b, py::handle compute_mode,
  1419. py::handle profile, py::handle deterministic) {
  1420. ::megdnn::param::MatrixMul::ComputeMode mode =
  1421. ::megdnn::param::MatrixMul::ComputeMode::DEFAULT;
  1422. if (compute_mode.cast<std::string>().compare(std::string("float32")) == 0) {
  1423. mode = ::megdnn::param::MatrixMul::ComputeMode::FLOAT32;
  1424. }
  1425. ::megdnn::param::ExecutionPolicy::Strategy cstrategy =
  1426. static_cast<::megdnn::param::ExecutionPolicy::Strategy>(0);
  1427. if (profile.cast<bool>()) {
  1428. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::PROFILE;
  1429. } else {
  1430. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::HEURISTIC;
  1431. }
  1432. if (deterministic.cast<bool>()) {
  1433. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::REPRODUCIBLE;
  1434. }
  1435. std::shared_ptr<OpDef> op = BatchedMatrixMul::make(
  1436. transpose_a.cast<bool>(), transpose_b.cast<bool>(), mode,
  1437. ::megdnn::param::MatrixMul::Format::DEFAULT, cstrategy, UINT64_MAX,
  1438. dim1.cast<uint32_t>(), dim2.cast<uint32_t>());
  1439. py::object Op = py::cast(op);
  1440. PyObject* p[3] = {Op.ptr(), inp1.ptr(), inp2.ptr()};
  1441. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 3));
  1442. return ret[0];
  1443. }
  1444. py::object _pixel_shuffle_cpp(py::handle inp, py::handle val, py::handle func) {
  1445. if (enable_fastpath(inp) && PyLong_Check(val.ptr())) {
  1446. std::shared_ptr<OpDef> op = PixelShuffle::make(val.cast<int32_t>());
  1447. py::object Op = py::cast(op);
  1448. PyObject* p[2] = {Op.ptr(), inp.ptr()};
  1449. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  1450. return ret[0];
  1451. } else {
  1452. // fallback to traceable subgraph implement
  1453. return func(inp, val);
  1454. }
  1455. }
  1456. PyObject* make_shape_tuple(PyObject* self, PyObject* const* args, size_t nargs) {
  1457. try {
  1458. return _make_shape_tuple(args[0]).release().ptr();
  1459. }
  1460. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1461. }
  1462. PyObject* getitem_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1463. try {
  1464. return _getitem_cpp(args[0], args[1]).release().ptr();
  1465. }
  1466. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1467. }
  1468. PyObject* setitem_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1469. try {
  1470. return _setitem_cpp(args[0], args[1], args[2]).release().ptr();
  1471. }
  1472. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1473. }
  1474. PyObject* split_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1475. try {
  1476. return _split_cpp(args[0], args[1], args[2]).release().ptr();
  1477. }
  1478. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1479. }
  1480. PyObject* expand_dims_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1481. try {
  1482. return _expand_dims_cpp(args[0], args[1]).release().ptr();
  1483. }
  1484. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1485. }
  1486. PyObject* squeeze_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1487. try {
  1488. return _squeeze_cpp(args[0], args[1]).release().ptr();
  1489. }
  1490. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1491. }
  1492. PyObject* transpose_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1493. try {
  1494. return _transpose_cpp(args[0], args[1]).release().ptr();
  1495. }
  1496. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1497. }
  1498. PyObject* broadcast_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1499. try {
  1500. return _broadcast_cpp(args[0], args[1]).release().ptr();
  1501. }
  1502. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1503. }
  1504. PyObject* reshape_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1505. try {
  1506. return _reshape_cpp(args[0], args[1]).release().ptr();
  1507. }
  1508. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1509. }
  1510. PyObject* adaptive_pool2d_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1511. try {
  1512. return _adaptive_pool2d_cpp(args[0], args[1], args[2]).release().ptr();
  1513. }
  1514. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1515. }
  1516. PyObject* pixel_shuffle_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1517. try {
  1518. return _pixel_shuffle_cpp(args[0], args[1], args[2]).release().ptr();
  1519. }
  1520. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1521. }
  1522. PyObject* Const(PyObject* self, PyObject* const* args, size_t nargs) {
  1523. try {
  1524. return _Const(args[0], args[1], args[2]).release().ptr();
  1525. }
  1526. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1527. }
  1528. PyObject* astype_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1529. try {
  1530. return _astype_cpp(args[0], args[1]).release().ptr();
  1531. }
  1532. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1533. }
  1534. PyObject* matmul_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1535. try {
  1536. return _matmul_cpp(
  1537. args[0], args[1], args[2], args[3], args[4], args[5], args[6],
  1538. args[7], args[8])
  1539. .release()
  1540. .ptr();
  1541. }
  1542. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1543. }
  1544. PyObject* batched_matmul_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1545. try {
  1546. return _batched_matmul_cpp(
  1547. args[0], args[1], args[2], args[3], args[4], args[5], args[6],
  1548. args[7], args[8])
  1549. .release()
  1550. .ptr();
  1551. }
  1552. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1553. }
  1554. PyObject* convert_single_value_cpp(
  1555. PyObject* self, PyObject* const* args, size_t nargs) {
  1556. try {
  1557. return _convert_single_value_cpp(args[0], args[1], args[2]).release().ptr();
  1558. }
  1559. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1560. }
  1561. PyObject* convert_inputs_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1562. try {
  1563. py::object dtype = py::reinterpret_steal<py::object>(
  1564. dtype_promotion(self, args, nargs - 1));
  1565. py::object device;
  1566. if (args[nargs - 1] == Py_None) {
  1567. device = py::reinterpret_steal<py::object>(
  1568. get_device(self, args, nargs - 1));
  1569. } else {
  1570. device = py::reinterpret_borrow<py::object>(args[nargs - 1]);
  1571. }
  1572. return _convert_inputs_cpp(args, nargs - 1, dtype, device).release().ptr();
  1573. }
  1574. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1575. }
  1576. PyObject* astensor1d_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1577. try {
  1578. return _astensor1d_cpp(args[0], args[1], args[2], args[3]).release().ptr();
  1579. }
  1580. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1581. }
  1582. } // namespace mgb::imperative::python