You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor_utils.cpp 59 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667
  1. #include "megbrain/common.h"
  2. #include "megbrain/dtype.h"
  3. #include "megbrain/imperative/ops/autogen.h"
  4. #include "megbrain/imperative/ops/backward_graph.h"
  5. #include "megbrain/imperative/ops/utility.h"
  6. #include "megbrain/imperative/profiler.h"
  7. #include "megbrain/imperative/transformations/eval.h"
  8. #include "megbrain/imperative/transformations/lazy.h"
  9. #include "megbrain/imperative/transformations/scalar.h"
  10. #include "megbrain/imperative/transformations/symbol.h"
  11. #include "megbrain/imperative/transformations/trace.h"
  12. #include "megbrain/imperative/utils/map.h"
  13. #include "megbrain/opr/io.h"
  14. #include "megbrain/plugin/profiler.h"
  15. #include "./common.h"
  16. #include "./grad.h"
  17. #include "./graph_rt.h"
  18. #include "./helper.h"
  19. #include "./module_trace.h"
  20. #include "./numpy_dtypes.h"
  21. #include "./tensor.h"
  22. #include "./tensor_utils.h"
  23. #include "./transformation.h"
  24. #include <object.h>
  25. #include <pybind11/numpy.h>
  26. #include <pybind11/operators.h>
  27. #include <pybind11/pytypes.h>
  28. #include <pyerrors.h>
  29. #include <range/v3/all.hpp>
  30. #include <string>
  31. #include <unordered_map>
  32. #include "../../src/impl/mgb_cg_impl.h"
  33. namespace py = pybind11;
  34. namespace views = ranges::views;
  35. namespace mgb::imperative::python {
  36. /* ============== convert inputs ============== */
  37. // map numpy.dtype.kind to priority
  38. inline uint8_t category_priority(char c) {
  39. switch (c) {
  40. case 'f':
  41. return 3; // floating-point
  42. case 'i':
  43. return 2; // signed integer
  44. case 'u':
  45. return 2; // unsigned integer
  46. case 'b':
  47. return 1; // boolean
  48. default:
  49. return 0;
  50. }
  51. }
  52. // Returns the maximum value of the priority of each type in the list `types`.
  53. uint8_t max_priority(SmallVector<PyArray_Descr*> types) {
  54. if (types.size() == 0) {
  55. return 0;
  56. } else {
  57. uint8_t max_p = 0;
  58. for (auto&& desc : types) {
  59. max_p = std::max(max_p, category_priority(desc->kind));
  60. }
  61. return max_p;
  62. }
  63. }
  64. // Returns the data type with sufficient size to hold all types of
  65. // category `cat` in the list `types`.
  66. PyArray_Descr* promote_types(SmallVector<PyArray_Descr*> types, uint8_t cat) {
  67. // Return value: New reference
  68. SmallVector<PyArray_Descr*> used_types;
  69. for (auto&& desc : types) {
  70. auto&& v = category_priority(desc->kind);
  71. if (v == cat) {
  72. used_types.emplace_back(desc);
  73. }
  74. }
  75. mgb_assert(used_types.size() > 0, "size of used_types is 0");
  76. PyArray_Descr* res = used_types[0];
  77. Py_INCREF(res);
  78. for (size_t i = 1; i < used_types.size(); ++i) {
  79. PyArray_Descr* tmp = PyArray_PromoteTypes(used_types[i], res);
  80. Py_DECREF(res);
  81. res = tmp;
  82. }
  83. return res;
  84. }
  85. PyArray_Descr* scalar2dtype(PyObject* arg) {
  86. // Return value: New reference
  87. if (PyBool_Check(arg)) {
  88. auto&& descr = PyArray_DescrFromType(NPY_BOOL);
  89. return descr;
  90. }
  91. if (PyLong_CheckExact(arg)) {
  92. auto&& descr = PyArray_DescrFromType(NPY_INT32);
  93. return descr;
  94. }
  95. if (PyFloat_CheckExact(arg)) {
  96. auto&& descr = PyArray_DescrFromType(NPY_FLOAT32);
  97. return descr;
  98. }
  99. return nullptr;
  100. }
  101. PyArray_Descr* _dtype_promotion(PyObject* const* args, size_t nargs) {
  102. // Return value: New reference
  103. SmallVector<PyArray_Descr*> tensors;
  104. SmallVector<PyArray_Descr*> scalars;
  105. bool is_tuple = false;
  106. PyObject* tuple = nullptr;
  107. if (nargs == 1 && (PyTuple_Check(args[0]) || PyList_Check(args[0]))) {
  108. if (PyList_Check(args[0])) {
  109. tuple = PyList_AsTuple(args[0]);
  110. } else {
  111. tuple = args[0];
  112. Py_INCREF(tuple);
  113. }
  114. nargs = PyTuple_Size(tuple);
  115. is_tuple = true;
  116. }
  117. for (size_t i = 0; i < nargs; ++i) {
  118. PyObject* handle = is_tuple ? PyTuple_GetItem(tuple, i) : args[i];
  119. if (handle == Py_None)
  120. continue;
  121. TensorWrapper* tw = TensorWrapper::try_cast(handle);
  122. if (tw) {
  123. mgb::DType type = tw->m_tensor->dtype();
  124. auto&& descr = npy::dtype_mgb2np_descr(type);
  125. Py_INCREF(descr.get());
  126. tensors.emplace_back(descr.get());
  127. } else {
  128. if (PyArray_Check(handle) || PyArray_CheckScalar(handle)) {
  129. auto&& descr = PyArray_DescrFromObject(handle, nullptr);
  130. tensors.emplace_back(descr);
  131. continue;
  132. }
  133. PyArray_Descr* descr = scalar2dtype(handle);
  134. if (descr) {
  135. scalars.emplace_back(descr);
  136. continue;
  137. }
  138. }
  139. }
  140. auto max_pri_scalars = max_priority(scalars);
  141. auto max_pri_tensors = max_priority(tensors);
  142. if (max_pri_scalars <= 0 && max_pri_tensors <= 0) {
  143. throw py::value_error("invalid input, no dtype avaliable");
  144. }
  145. PyArray_Descr* res;
  146. if (max_pri_scalars > max_pri_tensors) {
  147. res = promote_types(scalars, max_pri_scalars);
  148. } else {
  149. res = promote_types(tensors, max_pri_tensors);
  150. }
  151. for (auto* p : tensors) {
  152. Py_DECREF(p);
  153. }
  154. for (auto* p : scalars) {
  155. Py_DECREF(p);
  156. }
  157. Py_XDECREF(tuple);
  158. return res;
  159. }
  160. CompNode _get_device(PyObject* const* args, size_t nargs) {
  161. bool is_tuple = false;
  162. PyObject* tuple = nullptr;
  163. if (nargs == 1 && (PyTuple_Check(args[0]) || PyList_Check(args[0]))) {
  164. if (PyList_Check(args[0])) {
  165. tuple = PyList_AsTuple(args[0]);
  166. } else {
  167. tuple = args[0];
  168. Py_INCREF(tuple);
  169. }
  170. nargs = PyTuple_Size(tuple);
  171. is_tuple = true;
  172. }
  173. bool valid = false;
  174. CompNode cn;
  175. for (size_t i = 0; i < nargs; ++i) {
  176. PyObject* handle = is_tuple ? PyTuple_GetItem(tuple, i) : args[i];
  177. TensorWrapper* tw = TensorWrapper::try_cast(handle);
  178. if (tw) {
  179. if (!valid) {
  180. cn = tw->m_tensor->comp_node();
  181. valid = true;
  182. } else {
  183. CompNode cn1 = tw->m_tensor->comp_node();
  184. if (cn1 != cn) {
  185. throw py::value_error(ssprintf(
  186. "ambiguous device: %s (from %s) vs %s (from %s)",
  187. cn.to_string().c_str(), cn.to_string_logical().c_str(),
  188. cn1.to_string().c_str(), cn1.to_string_logical().c_str()));
  189. }
  190. }
  191. }
  192. }
  193. if (!valid) {
  194. return CompNode::load(get_default_device());
  195. }
  196. Py_XDECREF(tuple);
  197. return cn;
  198. }
  199. // Returns the dtype that would result from performing an arithmetic
  200. // operation on the provided input tensors and scalars.
  201. PyObject* dtype_promotion(PyObject* self, PyObject* const* args, size_t nargs) {
  202. if (!nargs) {
  203. PyErr_SetString(PyExc_TypeError, "empty input is not allowed");
  204. return nullptr;
  205. }
  206. try {
  207. PyArray_Descr* res = _dtype_promotion(args, nargs);
  208. return py::cast(npy::dtype_np2mgb_descr(res)).release().ptr();
  209. }
  210. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  211. }
  212. PyObject* get_device(PyObject* self, PyObject* const* args, size_t nargs) {
  213. if (!nargs) {
  214. PyErr_SetString(PyExc_TypeError, "empty input is not allowed");
  215. return nullptr;
  216. }
  217. try {
  218. CompNode cn = _get_device(args, nargs);
  219. return py::cast(cn).release().ptr();
  220. }
  221. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  222. }
  223. bool is_scalar(PyObject* tensor) {
  224. auto* tw = TensorWrapper::try_cast(tensor);
  225. if (tw) {
  226. return tw->m_tensor->is_scalar();
  227. }
  228. return PyArray_CheckAnyScalar(tensor);
  229. }
  230. bool is_bool_list(PyObject* arg) {
  231. if (!PyList_Check(arg)) {
  232. return false;
  233. }
  234. size_t sz = PyList_Size(arg);
  235. if (!sz) {
  236. return false;
  237. }
  238. for (size_t i = 0; i < sz; ++i) {
  239. PyObject* handle = PyList_GetItem(arg, i);
  240. if (!PyBool_Check(handle)) {
  241. return false;
  242. }
  243. }
  244. return true;
  245. }
  246. bool is_bool_dtype(PyObject* args) {
  247. if (!PyObject_HasAttrString(args, "dtype"))
  248. return false;
  249. PyObject* dobj = PyObject_GetAttrString(args, "dtype");
  250. PyArray_Descr* dtype;
  251. PyArray_DescrConverter(dobj, &dtype);
  252. bool ret = (dtype->kind == 'b');
  253. Py_XDECREF(dtype);
  254. Py_XDECREF(dobj);
  255. return ret;
  256. }
  257. py::object device2obj(py::handle device, bool mapping = false) {
  258. if (device.ptr() == Py_None) {
  259. return py::cast(CompNode::load(get_default_device()));
  260. } else if (py::isinstance<py::str>(device)) {
  261. if (mapping) {
  262. py::object dmap = getattr(
  263. py::reinterpret_borrow<py::object>((PyObject*)py_tensor_type),
  264. "dmap_callback");
  265. if (dmap.ptr() != Py_None) {
  266. return device2obj(dmap(device), false);
  267. }
  268. }
  269. return py::cast(CompNode::load(device.cast<std::string>()));
  270. } else if (py::isinstance<CompNode>(device)) {
  271. return py::reinterpret_borrow<py::object>(device);
  272. } else {
  273. return getattr(device, "_cn");
  274. }
  275. }
  276. py::object _Const(py::handle value, py::handle dtype, py::handle device) {
  277. py::object val = py::reinterpret_borrow<py::object>(value);
  278. if (PyArray_Check(value.ptr())) {
  279. py::tuple strides =
  280. py::reinterpret_borrow<py::tuple>(getattr(value, "strides"));
  281. bool need_squeeze = false;
  282. for (size_t i = 0; i < strides.size(); ++i) {
  283. if (strides[i].cast<ptrdiff_t>() == 0) {
  284. need_squeeze = true;
  285. }
  286. }
  287. if (need_squeeze) {
  288. val = py::reinterpret_borrow<py::array>(value);
  289. py::object orig_shp = val.attr("shape");
  290. val = val.attr("squeeze")();
  291. val = val.attr("reshape")(orig_shp);
  292. }
  293. }
  294. py::object device_obj = device2obj(device, true);
  295. py::tuple tup =
  296. py::make_tuple(val, dtype, device_obj, true, false, py::none(), py::none());
  297. return TensorWrapper::make(py_tensor_type, tup.ptr(), nullptr);
  298. }
  299. py::tuple _make_shape_tuple(py::handle shape) {
  300. py::list orig;
  301. py::list ret(0);
  302. auto solve_one = [&](py::handle val) {
  303. if (TensorWrapper::try_cast(val.ptr())) {
  304. py::object np = getattr(val, "numpy")();
  305. PyArrayObject* arr = (PyArrayObject*)np.ptr();
  306. PyObject* maybe_list = PyArray_ToList(arr);
  307. if (PyList_Check(maybe_list)) {
  308. py::list may = py::reinterpret_steal<py::list>(maybe_list);
  309. for (size_t i = 0; i < may.size(); ++i) {
  310. ret.append(may[i]);
  311. }
  312. } else {
  313. mgb_assert(PyLong_Check(maybe_list));
  314. ret.append(PyLong_AsLong(maybe_list));
  315. Py_XDECREF(maybe_list);
  316. }
  317. } else if (PyArray_Check(val.ptr())) {
  318. ret.append(PyArray_PyIntAsInt(val.ptr()));
  319. } else {
  320. ret.append(PyLong_AsLong(val.ptr()));
  321. }
  322. };
  323. if (PyArray_Check(shape.ptr()) && !PyArray_CheckAnyScalar(shape.ptr())) {
  324. orig = py::reinterpret_steal<py::list>(
  325. PyArray_ToList((PyArrayObject*)shape.ptr()));
  326. for (size_t i = 0; i < orig.size(); ++i) {
  327. solve_one(orig[i]);
  328. }
  329. } else if (PyList_Check(shape.ptr())) {
  330. orig = py::reinterpret_borrow<py::list>(shape);
  331. for (size_t i = 0; i < orig.size(); ++i) {
  332. solve_one(orig[i]);
  333. }
  334. } else if (PyTuple_Check(shape.ptr())) {
  335. py::tuple tup = py::reinterpret_borrow<py::tuple>(shape);
  336. for (size_t i = 0; i < tup.size(); ++i) {
  337. solve_one(tup[i]);
  338. }
  339. } else {
  340. solve_one(shape);
  341. }
  342. return py::reinterpret_steal<py::tuple>(PyList_AsTuple(ret.ptr()));
  343. }
  344. bool is_tensor(py::handle arg) {
  345. return bool(TensorWrapper::try_cast(arg.ptr()));
  346. }
  347. bool is_py_sequence(py::handle arg) {
  348. if (PyArray_Check(arg.ptr()) || TensorWrapper::try_cast(arg.ptr())) {
  349. return false;
  350. }
  351. return PySequence_Check(arg.ptr());
  352. }
  353. py::object get_res_by_refhdl(
  354. py::handle value, py::handle dtype, py::handle device, py::handle ref_hdl) {
  355. py::object res = _Const(value, dtype, device);
  356. py::object ref;
  357. if (py::isinstance<py::tuple>(ref_hdl)) {
  358. py::tuple tup = py::reinterpret_borrow<py::tuple>(ref_hdl);
  359. if (tup.size()) {
  360. ref = tup[0];
  361. } else {
  362. ref = py::none();
  363. }
  364. } else {
  365. ref = py::reinterpret_borrow<py::object>(ref_hdl);
  366. }
  367. if (PyObject_TypeCheck(ref.ptr(), py_varnode_type)) {
  368. auto temp = dtype.cast<mgb::DType>();
  369. ComputingGraph* graph = getattr(ref, "graph").cast<ComputingGraph*>();
  370. cg::VarNode* node = getattr(ref, "var").cast<cg::VarNode*>();
  371. CompNode cn;
  372. if (device.ptr() == Py_None) {
  373. cn = node->comp_node();
  374. } else {
  375. cn = device2obj(device).cast<CompNode>();
  376. }
  377. OperatorNodeConfig config(cn);
  378. auto hv = npy::np2tensor(
  379. value.ptr(), npy::Meth::borrow(cn), dtype.cast<mgb::DType>());
  380. auto typeobj = ref.get_type();
  381. return typeobj(opr::ImmutableTensor::make(*graph, hv, config).node());
  382. }
  383. return res;
  384. }
  385. mgb::DType _get_dtype(py::handle tensor) {
  386. auto tw = TensorWrapper::try_cast(tensor.ptr());
  387. return tw->m_tensor->dtype();
  388. }
  389. py::object _astype_cpp(py::handle tensor, py::handle dtype_hdl) {
  390. PyArray_Descr* descr;
  391. if (!PyArray_DescrConverter(dtype_hdl.ptr(), &descr)) {
  392. throw py::value_error(ssprintf(
  393. "can not convert to numpy.dtype from %s",
  394. dtype_hdl.ptr()->ob_type->tp_name));
  395. }
  396. PyArray_Descr* cur = npy::dtype_mgb2np_descr(_get_dtype(tensor)).get();
  397. if (!dtype_equal(cur, descr)) {
  398. std::shared_ptr<OpDef> op = TypeCvt::make(npy::dtype_np2mgb_descr(descr));
  399. py::object Op = py::cast(op);
  400. PyObject* p[2] = {Op.ptr(), tensor.ptr()};
  401. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  402. return ret[0];
  403. } else {
  404. return py::reinterpret_borrow<py::object>(tensor);
  405. }
  406. }
  407. py::object _convert_single_value_cpp(
  408. py::handle value, py::handle dtype, py::handle device) {
  409. if (is_tensor(value)) {
  410. if (_get_dtype(value).category() != DTypeCategory::QUANTIZED) {
  411. return _astype_cpp(value, dtype);
  412. }
  413. } else {
  414. return _Const(value, dtype, device);
  415. }
  416. return py::reinterpret_borrow<py::object>(value);
  417. }
  418. py::object _convert_inputs_cpp(
  419. PyObject* const* args, size_t nargs, py::object dtype, py::object device) {
  420. ComputingGraph* graph = nullptr;
  421. py::handle typeobj;
  422. py::list lis;
  423. for (size_t i = 0; i < nargs; ++i) {
  424. py::handle h = py::handle(args[i]);
  425. lis.append(h);
  426. }
  427. auto convert = [&](py::object value) {
  428. if (value.is_none()) {
  429. return value;
  430. }
  431. return _convert_single_value_cpp(value, dtype, device);
  432. };
  433. for (size_t i = 0; i < lis.size(); ++i) {
  434. lis[i] = convert(lis[i]);
  435. }
  436. return py::reinterpret_steal<py::tuple>(PyList_AsTuple(lis.ptr()));
  437. }
  438. py::object _astensor1d_cpp(
  439. py::handle value, py::handle dtype, py::handle device, py::handle ref) {
  440. py::object ret;
  441. py::object device_obj = py::none();
  442. py::object ndim_obj = py::none();
  443. if (device.ptr() != Py_None) {
  444. device_obj = device2obj(device);
  445. }
  446. if (PyObject_TypeCheck(value.ptr(), py_varnode_type)) {
  447. try {
  448. getattr(value, "ndim");
  449. } catch (py::error_already_set& err) {
  450. if (dtype.ptr() != Py_None) {
  451. ret = _astype_cpp(value, dtype);
  452. } else {
  453. ret = py::reinterpret_borrow<py::object>(value);
  454. }
  455. if (device.ptr() != Py_None) {
  456. std::shared_ptr<OpDef> op = Copy::make(device_obj.cast<CompNode>());
  457. py::object Op = py::cast(op);
  458. PyObject* p[2] = {Op.ptr(), ret.ptr()};
  459. py::tuple copy_ret =
  460. py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  461. return copy_ret[0];
  462. }
  463. return ret;
  464. }
  465. }
  466. size_t ndim = 999;
  467. if (hasattr(value, "ndim")) {
  468. ndim = getattr(value, "ndim").cast<size_t>();
  469. if (ndim != 0 && ndim != 1) {
  470. throw py::value_error("ndim != 1 or 0, get : " + std::to_string(ndim));
  471. }
  472. if (!is_tensor(value)) {
  473. return get_res_by_refhdl(value, dtype, device, ref);
  474. } else {
  475. return py::reinterpret_borrow<py::object>(value);
  476. }
  477. }
  478. if (!is_py_sequence(value)) {
  479. throw py::type_error();
  480. }
  481. py::list lis = py::reinterpret_steal<py::list>(PySequence_List(value.ptr()));
  482. bool need_concat = false;
  483. for (size_t i = 0; i < lis.size(); ++i) {
  484. if (is_tensor(lis[i])) {
  485. need_concat = true;
  486. break;
  487. }
  488. }
  489. if (!need_concat) {
  490. return get_res_by_refhdl(value, dtype, device, ref);
  491. }
  492. if (lis.size() > 1) {
  493. std::vector<PyObject*> c_args(lis.size() + 1);
  494. for (size_t i = 0; i < lis.size(); ++i) {
  495. c_args[i] = lis[i].ptr();
  496. }
  497. c_args[lis.size()] = Py_None;
  498. py::tuple inp_tup = py::reinterpret_steal<py::tuple>(
  499. convert_inputs_cpp(NULL, c_args.data(), c_args.size()));
  500. if (device_obj.is_none()) {
  501. std::vector<PyObject*> inp(inp_tup.size());
  502. for (size_t i = 0; i < inp_tup.size(); ++i) {
  503. inp[i] = inp_tup[i].ptr();
  504. }
  505. device_obj = py::cast(_get_device(inp.data(), inp.size()));
  506. }
  507. std::shared_ptr<OpDef> op = Concat::make(0, device_obj.cast<CompNode>());
  508. py::object Op = py::cast(op);
  509. std::vector<PyObject*> p;
  510. p.resize(inp_tup.size() + 1);
  511. p[0] = Op.ptr();
  512. for (size_t i = 0; i < inp_tup.size(); ++i) {
  513. p[i + 1] = inp_tup[i].ptr();
  514. }
  515. py::tuple concat_ret =
  516. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  517. ret = concat_ret[0];
  518. } else {
  519. ret = lis[0];
  520. }
  521. if (dtype.ptr() != Py_None) {
  522. return _astype_cpp(ret, dtype);
  523. } else {
  524. return ret;
  525. }
  526. }
  527. py::object _get_index(py::object tensor, py::object src) {
  528. if (!TensorWrapper::try_cast(tensor.ptr())) {
  529. auto get_const = [&](mgb::DType dtype) -> py::object {
  530. return _Const(tensor, py::cast(dtype), src.attr("device"));
  531. };
  532. if (is_bool_list(tensor.ptr()) || is_bool_dtype(tensor.ptr())) {
  533. tensor = get_const(dtype::Bool());
  534. } else {
  535. tensor = get_const(dtype::Int32());
  536. }
  537. if (!is_bool_dtype(tensor.ptr())) {
  538. return tensor;
  539. }
  540. } else {
  541. if (!is_bool_dtype(tensor.ptr())) {
  542. return tensor;
  543. }
  544. }
  545. std::shared_ptr<OpDef> op = CondTake::make();
  546. py::object Op = py::cast(op);
  547. PyObject* p[3] = {Op.ptr(), tensor.ptr(), tensor.ptr()};
  548. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 3));
  549. return ret[1];
  550. }
  551. py::tuple _try_cond_take(py::handle tensor, py::handle index) {
  552. if (!hasattr(index, "dtype") || !hasattr(index, "shape")) {
  553. return py::tuple();
  554. }
  555. if (!is_bool_dtype(index.ptr()) ||
  556. _make_shape_tuple(getattr(index, "shape"))
  557. .not_equal(_make_shape_tuple(getattr(tensor, "shape")))) {
  558. return py::tuple();
  559. }
  560. py::object iobj;
  561. if (PyArray_Check(index.ptr())) {
  562. iobj = _Const(
  563. index, py::cast((mgb::DType)dtype::Bool()), getattr(tensor, "device"));
  564. } else {
  565. iobj = py::reinterpret_borrow<py::object>(index);
  566. }
  567. std::shared_ptr<OpDef> op = CondTake::make();
  568. py::object Op = py::cast(op);
  569. PyObject* p[3] = {Op.ptr(), tensor.ptr(), iobj.ptr()};
  570. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 3));
  571. return ret;
  572. }
  573. py::tuple _remove_ellipsis(py::object tensor, py::tuple tuple_val) {
  574. size_t tuple_size = tuple_val.size();
  575. size_t ndim_sum = 0, cur_sum = 0;
  576. int pos = -1;
  577. bool has_unknown_ndim_bool_index = false;
  578. for (size_t i = 0; i < tuple_size; ++i) {
  579. py::object handle = tuple_val[i];
  580. if (handle.is_none()) {
  581. continue;
  582. } else if (handle.ptr() == Py_Ellipsis) {
  583. pos = static_cast<int>(i);
  584. for (size_t j = 0; j < i; ++j) {
  585. py::object t = tuple_val[j];
  586. if (t.ptr() == Py_Ellipsis) {
  587. throw py::index_error("only one ellipsis is allowed.");
  588. }
  589. }
  590. } else {
  591. size_t ndim_incr = 1;
  592. if (hasattr(handle, "dtype") && is_bool_dtype(handle.ptr()) &&
  593. hasattr(handle, "ndim")) {
  594. py::object ndim;
  595. try {
  596. ndim = getattr(handle, "ndim");
  597. } catch (py::error_already_set& err) {
  598. has_unknown_ndim_bool_index = true;
  599. }
  600. if (PyLong_Check(ndim.ptr())) {
  601. ndim_incr = PyLong_AsLong(ndim.ptr());
  602. } else {
  603. has_unknown_ndim_bool_index = true;
  604. }
  605. }
  606. cur_sum += ndim_incr;
  607. }
  608. }
  609. if (pos == -1) {
  610. return tuple_val;
  611. } else {
  612. if (has_unknown_ndim_bool_index) {
  613. throw py::index_error(
  614. "does not support bool index with unknown shape when using "
  615. "Ellipsis.");
  616. }
  617. try {
  618. ndim_sum = getattr(tensor, "ndim").cast<size_t>();
  619. } catch (py::error_already_set& err) {
  620. throw py::index_error(
  621. "does not support Ellipsis when tensor's ndim is unknown.");
  622. }
  623. py::tuple ret(ndim_sum - cur_sum + tuple_size - 1);
  624. size_t idx = 0;
  625. for (size_t i = 0; i < tuple_size; ++i) {
  626. if (i == pos) {
  627. for (size_t j = cur_sum; j < ndim_sum; ++j) {
  628. ret[idx++] = PySlice_New(NULL, NULL, NULL);
  629. }
  630. } else {
  631. ret[idx++] = tuple_val[i];
  632. }
  633. }
  634. return ret;
  635. }
  636. }
  637. py::object _reshape_cpp(py::handle inp_hdl, py::handle args);
  638. py::tuple _expand_bool_dim(py::object tensor, py::tuple tuple_val) {
  639. py::tuple cur_shape = _make_shape_tuple(py::handle(getattr(tensor, "shape")));
  640. py::list new_tuple_val(0);
  641. size_t offset = 0;
  642. size_t tdim = 0;
  643. size_t nonedim = 0;
  644. for (size_t i = 0; i < tuple_val.size(); ++i) {
  645. py::handle k = tuple_val[i];
  646. if (k.ptr() == Py_None) {
  647. nonedim++;
  648. new_tuple_val.append(k);
  649. continue;
  650. }
  651. if (is_bool_dtype(k.ptr())) {
  652. size_t ndim = getattr(k, "ndim").cast<size_t>();
  653. if (ndim > 1) {
  654. py::tuple ishape = _make_shape_tuple(py::handle(getattr(k, "shape")));
  655. for (size_t j = 0; j < ndim; ++j) {
  656. if (cur_shape[tdim + j - offset].cast<size_t>() !=
  657. ishape[j].cast<size_t>()) {
  658. std::string msg =
  659. "boolean index did not match tensor along "
  660. "dimension " +
  661. std::to_string(tdim + j) + "; dimension is " +
  662. std::to_string(
  663. cur_shape[tdim + j - offset].cast<size_t>()) +
  664. " but corresponding boolean dimension is " +
  665. std::to_string(ishape[j].cast<size_t>());
  666. throw py::index_error(msg.c_str());
  667. }
  668. }
  669. py::object new_k = getattr(k, "reshape")(-1);
  670. py::object kshape = getattr(new_k, "shape");
  671. py::list new_shape(0);
  672. PyObject* sym = PyObject_CallObject(cpp_use_symbolic_shape, nullptr);
  673. bool is_sym = (sym == Py_True);
  674. Py_XDECREF(sym);
  675. if (is_sym) {
  676. py::object tshape = getattr(tensor, "shape");
  677. for (size_t j = 0; j < i - nonedim; ++j) {
  678. new_shape.append(tshape[py::int_(j)]);
  679. }
  680. new_shape.append(kshape[py::int_(0)]);
  681. for (size_t j = tdim + ndim - offset; j < cur_shape.size(); ++j) {
  682. new_shape.append(cur_shape[j]);
  683. }
  684. py::object shape_tensor = _astensor1d_cpp(
  685. new_shape, py::none(), py::none(), py::none());
  686. tensor = _reshape_cpp(tensor, shape_tensor);
  687. cur_shape = _make_shape_tuple(shape_tensor);
  688. } else {
  689. for (size_t j = 0; j < i - nonedim; ++j) {
  690. new_shape.append(cur_shape[j]);
  691. }
  692. new_shape.append(py::reinterpret_borrow<py::tuple>(kshape)[0]);
  693. for (size_t j = tdim + ndim - offset; j < cur_shape.size(); ++j) {
  694. new_shape.append(cur_shape[j]);
  695. }
  696. cur_shape = new_shape;
  697. tensor = _reshape_cpp(tensor, cur_shape);
  698. }
  699. offset++;
  700. tdim += ndim;
  701. }
  702. new_tuple_val.append(k);
  703. } else {
  704. new_tuple_val.append(k);
  705. tdim++;
  706. }
  707. }
  708. return py::make_tuple(tensor, py::reinterpret_borrow<py::tuple>(new_tuple_val));
  709. }
  710. std::pair<size_t, bool> get_ndim_safe(py::handle tensor) {
  711. if (auto p = TensorWrapper::try_cast(tensor.ptr())) {
  712. return {p->m_tensor->shape()->ndim, true};
  713. }
  714. try {
  715. return {getattr(tensor, "ndim").cast<size_t>(), true};
  716. } catch (py::error_already_set& err) {
  717. return {0, false};
  718. }
  719. }
  720. py::tuple _unpack_indexes(py::handle inp_hdl, py::handle idx_hdl) {
  721. py::object inp = py::reinterpret_borrow<py::object>(inp_hdl);
  722. py::tuple tuple_val;
  723. if (py::isinstance<py::tuple>(idx_hdl)) {
  724. tuple_val = py::reinterpret_borrow<py::tuple>(idx_hdl);
  725. } else {
  726. tuple_val = py::make_tuple(idx_hdl);
  727. }
  728. bool use_subtensor = true;
  729. bool need_remove_ellipsis = false;
  730. bool need_expand_bool_dim = false;
  731. size_t idx_ndim = 0;
  732. for (size_t i = 0; i < tuple_val.size(); ++i) {
  733. py::object k = tuple_val[i];
  734. if (k.is_none()) {
  735. continue;
  736. } else if (k.ptr() == Py_Ellipsis) {
  737. need_remove_ellipsis = true;
  738. } else {
  739. if (is_bool_dtype(k.ptr()) && hasattr(k, "ndim")) {
  740. size_t ndim = get_ndim_safe(k).first;
  741. idx_ndim += ndim;
  742. if (ndim > 1) {
  743. need_expand_bool_dim = true;
  744. }
  745. } else {
  746. idx_ndim++;
  747. }
  748. }
  749. }
  750. try {
  751. size_t inp_ndim = getattr(inp, "ndim").cast<size_t>();
  752. if (idx_ndim > inp_ndim) {
  753. std::string msg = "too many indices for tensor: tensor is " +
  754. std::to_string(inp_ndim) + "-dimensional, but " +
  755. std::to_string(idx_ndim) + " were indexed";
  756. throw py::index_error(msg.c_str());
  757. }
  758. } catch (py::error_already_set& err) {
  759. ; // ignore
  760. }
  761. if (need_remove_ellipsis) {
  762. tuple_val = _remove_ellipsis(inp, tuple_val);
  763. }
  764. if (need_expand_bool_dim) {
  765. py::object shape = getattr(inp, "shape");
  766. if (shape.ptr() != Py_None) {
  767. py::tuple ret = _expand_bool_dim(inp, tuple_val);
  768. inp = ret[0];
  769. tuple_val = ret[1];
  770. }
  771. }
  772. std::vector<int32_t> axis;
  773. for (size_t i = 0; i < tuple_val.size(); ++i) {
  774. if (tuple_val[i].is_none()) {
  775. axis.push_back(i);
  776. }
  777. }
  778. if (axis.size()) {
  779. std::shared_ptr<OpDef> op = AddAxis::make(axis);
  780. py::object Op = py::cast(op);
  781. PyObject* p[2] = {Op.ptr(), inp.ptr()};
  782. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  783. inp = ret[0];
  784. }
  785. py::list items;
  786. py::list tensors;
  787. int cur_axis = -1;
  788. for (size_t i = 0; i < tuple_val.size(); ++i) {
  789. py::object handle = tuple_val[i];
  790. cur_axis++;
  791. if (handle.is_none()) {
  792. continue;
  793. }
  794. if (!is_scalar(handle.ptr()) && !PySlice_Check(handle.ptr())) {
  795. use_subtensor = false;
  796. }
  797. py::list item;
  798. item.append(cur_axis);
  799. auto push = [&](PyObject* v) {
  800. if (v == Py_None) {
  801. item.append(false);
  802. } else {
  803. item.append(true);
  804. tensors.append(_get_index(py::reinterpret_borrow<py::object>(v), inp));
  805. }
  806. };
  807. if (PySlice_Check(handle.ptr())) {
  808. PySliceObject* s = (PySliceObject*)handle.ptr();
  809. if (s->start == Py_None && s->stop == Py_None && s->step == Py_None) {
  810. continue;
  811. }
  812. push(s->start);
  813. push(s->stop);
  814. push(s->step);
  815. item.append(false);
  816. } else {
  817. for (size_t j = 0; j < 3; j++)
  818. item.append(false);
  819. push(handle.ptr());
  820. }
  821. items.append(item);
  822. }
  823. return py::make_tuple(inp, tensors, items, use_subtensor, need_expand_bool_dim);
  824. }
  825. py::object _expand_args(py::handle args) {
  826. if (!PyTuple_Check(args.ptr())) {
  827. return py::reinterpret_borrow<py::object>(args);
  828. }
  829. py::tuple args_tup = py::reinterpret_borrow<py::tuple>(args.ptr());
  830. if (args_tup.size() == 1 &&
  831. (PySequence_Check(args_tup[0].ptr()) || is_tensor(args_tup[0].ptr()))) {
  832. return py::reinterpret_borrow<py::object>(args_tup[0]);
  833. } else {
  834. return py::reinterpret_steal<py::list>(PySequence_List(args_tup.ptr()));
  835. }
  836. }
  837. std::tuple<std::vector<int32_t>, bool> tuple2vector(py::object shape) {
  838. std::vector<int32_t> shp;
  839. if (!PyTuple_Check(shape.ptr())) {
  840. return {shp, false};
  841. }
  842. py::tuple tup = py::reinterpret_borrow<py::tuple>(shape);
  843. for (size_t i = 0; i < tup.size(); ++i) {
  844. if (!PyLong_Check(tup[i].ptr())) {
  845. shp.clear();
  846. return {shp, false};
  847. } else {
  848. shp.push_back(tup[i].cast<int32_t>());
  849. }
  850. }
  851. return {shp, true};
  852. }
  853. bool enable_fastpath(py::handle inp) {
  854. auto&& tm_tr = TransformationManager::get_instance()
  855. .segments[TransformationManager::Segment::ModuleTrace];
  856. bool is_varnode = PyObject_TypeCheck(inp.ptr(), py_varnode_type);
  857. if (is_varnode ||
  858. TransformationManager::get_instance()
  859. .segments[TransformationManager::Segment::Trace]
  860. .size() > 0 ||
  861. (tm_tr.size() > 0 &&
  862. reinterpret_cast<ModuleTraceTransformation*>(tm_tr[0].get())->enabled())) {
  863. return false;
  864. }
  865. return true;
  866. }
  867. py::object _broadcast_cpp(py::handle input, py::handle args) {
  868. py::object shape = _expand_args(args);
  869. py::list dims;
  870. bool all_imm;
  871. if (PyList_Check(shape.ptr()) || PyTuple_Check(shape.ptr())) {
  872. dims = py::reinterpret_steal<py::list>(PySequence_List(shape.ptr()));
  873. mgb_assert(!dims.is_none());
  874. all_imm = true;
  875. py::object inp_shape = py::none();
  876. size_t inp_ndim;
  877. for (size_t i = 0; i < dims.size(); ++i) {
  878. py::object dim = dims[i];
  879. if (dim.is_none()) {
  880. ptrdiff_t right = (ptrdiff_t)i - dims.size();
  881. if (inp_shape.is_none()) {
  882. inp_shape = input.attr("shape");
  883. mgb_assert(!inp_shape.is_none());
  884. inp_ndim = py::len(inp_shape);
  885. }
  886. if ((ptrdiff_t)inp_ndim + right < 0) {
  887. throw py::value_error("size connot be `None` for new axis");
  888. }
  889. dim = inp_shape.attr("__getitem__")(right);
  890. dims[i] = dim;
  891. }
  892. if (py::int_::check_(dim)) {
  893. if (dim.cast<long>() < 0) {
  894. throw py::value_error(ssprintf(
  895. "expect shape[%zu] >= 0 or use `None` to auto infer, got "
  896. "%s",
  897. i, py::repr(dims[i]).cast<std::string>().c_str()));
  898. }
  899. } else {
  900. all_imm = false;
  901. }
  902. }
  903. shape = dims;
  904. } else {
  905. all_imm = false;
  906. }
  907. bool fastpath = all_imm && enable_fastpath(input);
  908. if ((!fastpath) && (!is_tensor(shape))) {
  909. shape = _astensor1d_cpp(
  910. shape, py::cast((mgb::DType)dtype::Int32()), input.attr("device"),
  911. input);
  912. }
  913. std::shared_ptr<OpDef> op;
  914. SmallVector<PyObject*> p(2);
  915. if (fastpath) {
  916. std::vector<int32_t> shape_vec;
  917. for (auto&& dim : dims) {
  918. shape_vec.push_back(dim.cast<long>());
  919. }
  920. op = Broadcast::make(shape_vec);
  921. } else {
  922. op = Broadcast::make();
  923. p.push_back(shape.ptr());
  924. }
  925. py::object py_op = py::cast(op);
  926. p[0] = py_op.ptr();
  927. p[1] = input.ptr();
  928. py::tuple ret =
  929. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  930. return ret[0];
  931. }
  932. py::object _reshape_cpp(py::handle inp_hdl, py::handle args) {
  933. py::object shape_hdl = _expand_args(args);
  934. py::object shape_tuple;
  935. try {
  936. shape_tuple = _make_shape_tuple(shape_hdl);
  937. } catch (py::error_already_set& err) {
  938. shape_tuple = py::reinterpret_borrow<py::object>(shape_hdl);
  939. }
  940. int32_t unspec_axis = -1;
  941. if (PyTuple_Check(shape_tuple.ptr())) {
  942. py::tuple tup = py::reinterpret_borrow<py::tuple>(shape_tuple);
  943. for (size_t i = 0; i < tup.size(); ++i) {
  944. py::object obj = py::reinterpret_borrow<py::object>(tup[i]);
  945. if (obj < py::int_(0)) {
  946. if (obj.not_equal(py::int_(-1))) {
  947. throw py::value_error(
  948. "expect shape [" + std::to_string(i) + "] >= -1, got " +
  949. repr(obj).cast<std::string>());
  950. }
  951. if (unspec_axis >= 0) {
  952. throw py::value_error(
  953. "multiple -1 in shape: " + std::to_string(unspec_axis) +
  954. " & " + std::to_string(i));
  955. }
  956. unspec_axis = i;
  957. }
  958. }
  959. }
  960. auto [shape, fastpath] = tuple2vector(shape_tuple);
  961. fastpath &= enable_fastpath(inp_hdl);
  962. std::shared_ptr<OpDef> op;
  963. std::vector<PyObject*> p;
  964. py::object shape_tensor;
  965. if (fastpath) {
  966. if (unspec_axis >= 0) {
  967. op = Reshape::make(unspec_axis, shape);
  968. } else {
  969. op = Reshape::make(::megdnn::param::OptionalAxisV1::INVALID_AXIS, shape);
  970. }
  971. p.resize(2);
  972. } else {
  973. shape.clear();
  974. if (unspec_axis >= 0) {
  975. op = Reshape::make(unspec_axis, shape);
  976. } else {
  977. op = Reshape::make();
  978. }
  979. shape_tensor = _astensor1d_cpp(
  980. shape_hdl, py::cast((mgb::DType)dtype::Int32()),
  981. getattr(inp_hdl, "device"), inp_hdl);
  982. p.resize(3);
  983. p[2] = shape_tensor.ptr();
  984. }
  985. py::object Op = py::cast(op);
  986. p[0] = Op.ptr();
  987. p[1] = inp_hdl.ptr();
  988. py::tuple ret =
  989. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  990. return ret[0];
  991. }
  992. py::object _adaptive_pool2d_cpp(
  993. py::handle inp_hdl, py::handle shape_val_hdl, py::handle pool_mode_hdl) {
  994. py::object shape_hdl = py::reinterpret_borrow<py::object>(shape_val_hdl);
  995. py::list shps(0);
  996. auto mode_string = pool_mode_hdl.cast<std::string>();
  997. ::megdnn::param::AdaptivePooling::Mode pool_mode =
  998. ::megdnn::param::AdaptivePooling::Mode::MAX;
  999. if (mode_string.compare(std::string("AVERAGE")) == 0) {
  1000. pool_mode = ::megdnn::param::AdaptivePooling::Mode::AVERAGE;
  1001. }
  1002. std::shared_ptr<OpDef> op;
  1003. std::vector<PyObject*> p;
  1004. auto pool_format = ::megdnn::param::AdaptivePooling::Format::NCHW;
  1005. auto inp_format = getattr(inp_hdl, "format").cast<std::string>();
  1006. if (inp_format == "nhwc") {
  1007. pool_format = ::megdnn::param::AdaptivePooling::Format::NHWC;
  1008. }
  1009. if (TensorWrapper::try_cast(shape_val_hdl.ptr())) {
  1010. std::vector<int32_t> shp;
  1011. op = AdaptivePooling::make(pool_mode, pool_format, shp);
  1012. py::object Op = py::cast(op);
  1013. p.resize(3);
  1014. p[0] = Op.ptr();
  1015. p[1] = inp_hdl.ptr();
  1016. p[2] = shape_val_hdl.ptr();
  1017. py::tuple ret =
  1018. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1019. return ret[0];
  1020. } else if (!PyTuple_Check(shape_val_hdl.ptr())) {
  1021. shps.append(PyLong_AsLong(shape_val_hdl.ptr()));
  1022. shps.append(PyLong_AsLong(shape_val_hdl.ptr()));
  1023. shape_hdl = py::reinterpret_borrow<py::object>(shps);
  1024. }
  1025. py::object shape_tuple;
  1026. try {
  1027. shape_tuple = _make_shape_tuple(shape_hdl);
  1028. } catch (py::error_already_set& err) {
  1029. shape_tuple = py::reinterpret_borrow<py::object>(shape_hdl);
  1030. }
  1031. auto [shape, fastpath] = tuple2vector(shape_tuple);
  1032. fastpath &= enable_fastpath(inp_hdl);
  1033. py::object shape_tensor;
  1034. op = AdaptivePooling::make(pool_mode, pool_format, shape);
  1035. if (fastpath) {
  1036. p.resize(2);
  1037. } else {
  1038. p.resize(3);
  1039. shape_tensor = _astensor1d_cpp(
  1040. shape_hdl, py::cast((mgb::DType)dtype::Int32()),
  1041. getattr(inp_hdl, "device"), inp_hdl);
  1042. p[2] = shape_tensor.ptr();
  1043. }
  1044. py::object Op = py::cast(op);
  1045. p[0] = Op.ptr();
  1046. p[1] = inp_hdl.ptr();
  1047. py::tuple ret =
  1048. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1049. return ret[0];
  1050. }
  1051. py::object _getitem_cpp(py::handle inp_hdl, py::handle idx_hdl) {
  1052. py::tuple try_res = _try_cond_take(inp_hdl, idx_hdl);
  1053. if (try_res.size() == 2) {
  1054. return try_res[0];
  1055. }
  1056. py::tuple up = _unpack_indexes(inp_hdl, idx_hdl);
  1057. py::object tensor = py::reinterpret_borrow<py::object>(up[0]);
  1058. py::list tensors = py::reinterpret_borrow<py::list>(up[1]);
  1059. py::list py_items = py::reinterpret_borrow<py::list>(up[2]);
  1060. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> cpp_items;
  1061. for (size_t i = 0; i < py_items.size(); ++i) {
  1062. py::list item = py::reinterpret_borrow<py::list>(py_items[i]);
  1063. cpp_items.push_back(
  1064. {item[0].cast<int8_t>(), item[1].cast<bool>(), item[2].cast<bool>(),
  1065. item[3].cast<bool>(), item[4].cast<bool>()});
  1066. }
  1067. std::shared_ptr<OpDef> op;
  1068. if (up[3].cast<bool>()) {
  1069. op = Subtensor::make(cpp_items);
  1070. } else {
  1071. op = IndexingMultiAxisVec::make(cpp_items);
  1072. }
  1073. std::vector<PyObject*> p;
  1074. p.resize(tensors.size() + 2);
  1075. py::object Op = py::cast(op);
  1076. p[0] = Op.ptr();
  1077. p[1] = tensor.ptr();
  1078. for (size_t i = 0; i < tensors.size(); ++i) {
  1079. p[i + 2] = tensors[i].ptr();
  1080. }
  1081. py::tuple ret =
  1082. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1083. return ret[0];
  1084. }
  1085. py::object _setitem_cpp(py::handle inp_hdl, py::handle idx_hdl, py::handle val_hdl) {
  1086. py::object org_shape = getattr(inp_hdl, "shape");
  1087. py::object val = py::reinterpret_borrow<py::object>(val_hdl);
  1088. if (!TensorWrapper::try_cast(val.ptr())) {
  1089. val = _Const(val_hdl, getattr(inp_hdl, "dtype"), getattr(inp_hdl, "device"));
  1090. }
  1091. py::tuple up = _unpack_indexes(inp_hdl, idx_hdl);
  1092. py::object tensor = py::reinterpret_borrow<py::object>(up[0]);
  1093. py::list tensors = py::reinterpret_borrow<py::list>(up[1]);
  1094. py::list py_items = py::reinterpret_borrow<py::list>(up[2]);
  1095. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> cpp_items;
  1096. for (size_t i = 0; i < py_items.size(); ++i) {
  1097. py::list item = py::reinterpret_borrow<py::list>(py_items[i]);
  1098. cpp_items.push_back(
  1099. {item[0].cast<int8_t>(), item[1].cast<bool>(), item[2].cast<bool>(),
  1100. item[3].cast<bool>(), item[4].cast<bool>()});
  1101. }
  1102. std::shared_ptr<OpDef> op, set_op;
  1103. if (up[3].cast<bool>()) {
  1104. op = Subtensor::make(cpp_items);
  1105. } else {
  1106. op = IndexingMultiAxisVec::make(cpp_items);
  1107. }
  1108. std::vector<PyObject*> p;
  1109. p.resize(tensors.size() + 2);
  1110. py::object Op = py::cast(op);
  1111. p[0] = Op.ptr();
  1112. p[1] = tensor.ptr();
  1113. for (size_t i = 0; i < tensors.size(); ++i) {
  1114. p[i + 2] = tensors[i].ptr();
  1115. }
  1116. py::tuple ret =
  1117. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1118. py::object tmp_result = ret[0];
  1119. try {
  1120. py::tuple value_shape =
  1121. py::reinterpret_borrow<py::tuple>(val.attr("_tuple_shape"));
  1122. py::tuple tmp_result_shape =
  1123. py::reinterpret_borrow<py::tuple>(tmp_result.attr("_tuple_shape"));
  1124. for (size_t i = 0; i < value_shape.size() && i < tmp_result_shape.size(); ++i) {
  1125. size_t vs = value_shape[value_shape.size() - i - 1].cast<size_t>();
  1126. size_t ts =
  1127. tmp_result_shape[tmp_result_shape.size() - i - 1].cast<size_t>();
  1128. if (vs != 1 && vs != ts) {
  1129. std::string lhs = "", rhs = "";
  1130. for (size_t j = 0; j < tmp_result_shape.size(); ++j) {
  1131. lhs += std::to_string(tmp_result_shape[j].cast<size_t>());
  1132. if (j)
  1133. lhs += ",";
  1134. }
  1135. for (size_t j = 0; j < value_shape.size(); ++j) {
  1136. rhs += std::to_string(value_shape[j].cast<size_t>());
  1137. if (j)
  1138. rhs += ",";
  1139. }
  1140. throw py::value_error(
  1141. "cannot copy tensor with shape (" + rhs +
  1142. ") to subtensor with shape (" + lhs + ")");
  1143. }
  1144. }
  1145. } catch (py::error_already_set& err) {
  1146. ;
  1147. }
  1148. val = _broadcast_cpp(val, getattr(tmp_result, "shape"));
  1149. if (up[3].cast<bool>()) {
  1150. set_op = SetSubtensor::make(cpp_items);
  1151. } else {
  1152. set_op = IndexingSetMultiAxisVec::make(cpp_items);
  1153. }
  1154. std::vector<PyObject*> q;
  1155. q.resize(tensors.size() + 3);
  1156. py::object Set_Op = py::cast(set_op);
  1157. q[0] = Set_Op.ptr();
  1158. q[1] = tensor.ptr();
  1159. q[2] = val.ptr();
  1160. for (size_t i = 0; i < tensors.size(); ++i) {
  1161. q[i + 3] = tensors[i].ptr();
  1162. }
  1163. py::tuple result =
  1164. py::reinterpret_steal<py::object>(py_apply(NULL, q.data(), q.size()));
  1165. py::object res = result[0];
  1166. if (up[4].cast<bool>()) {
  1167. res = _reshape_cpp(res, org_shape);
  1168. }
  1169. return res;
  1170. }
  1171. py::object _split_cpp(
  1172. py::handle inp_hdl, py::handle nsplits_or_sections_hdl, py::handle axis_hdl) {
  1173. py::object shape_obj = getattr(inp_hdl, "shape");
  1174. py::object n_total = shape_obj[axis_hdl];
  1175. int ndim = shape_obj.attr("__len__")().cast<int>();
  1176. int axis = axis_hdl.cast<int>();
  1177. if (axis >= ndim) {
  1178. throw py::value_error("Invalid axis " + std::to_string(axis));
  1179. }
  1180. int n_sections;
  1181. bool is_array;
  1182. if (is_py_sequence(nsplits_or_sections_hdl)) {
  1183. n_sections = PySequence_Length(nsplits_or_sections_hdl.ptr()) + 1;
  1184. is_array = true;
  1185. } else {
  1186. n_sections = getattr(nsplits_or_sections_hdl, "__int__")().cast<int>();
  1187. is_array = false;
  1188. }
  1189. py::list partitions;
  1190. std::shared_ptr<OpDef> op;
  1191. std::vector<PyObject*> p;
  1192. if (is_array) {
  1193. py::list div_points;
  1194. py::list sections = py::reinterpret_borrow<py::object>(nsplits_or_sections_hdl);
  1195. div_points.append(0);
  1196. for (size_t i = 0; i < sections.size(); ++i) {
  1197. div_points.append(sections[i]);
  1198. }
  1199. div_points.append(n_total);
  1200. for (size_t i = 1; i < div_points.size(); ++i) {
  1201. if (div_points[i - 1] > div_points[i]) {
  1202. throw py::value_error(
  1203. "Invalid nsplits_or_secions: " +
  1204. repr(nsplits_or_sections_hdl).cast<std::string>());
  1205. }
  1206. py::object pos = div_points[i] - div_points[i - 1];
  1207. if (is_tensor(pos)) {
  1208. partitions.append(pos);
  1209. } else {
  1210. partitions.append(
  1211. _Const(pos, py::cast((mgb::DType)dtype::Int32()),
  1212. getattr(inp_hdl, "device")));
  1213. }
  1214. }
  1215. op = Split::make(axis, 0);
  1216. p.resize(partitions.size() + 2);
  1217. for (size_t i = 0; i < partitions.size(); ++i) {
  1218. p[i + 2] = partitions[i].ptr();
  1219. }
  1220. } else {
  1221. if (n_sections <= 0) {
  1222. throw py::value_error("Number sections must be larger than 0");
  1223. }
  1224. if (py::int_(n_sections) > n_total) {
  1225. throw py::value_error(
  1226. "The size " + repr(n_total).cast<std::string>() + " at dim " +
  1227. std::to_string(axis) + " cannot be split into " +
  1228. std::to_string(n_sections) + " sections");
  1229. }
  1230. op = Split::make(axis, n_sections);
  1231. p.resize(2);
  1232. }
  1233. py::object Op = py::cast(op);
  1234. p[0] = Op.ptr();
  1235. p[1] = inp_hdl.ptr();
  1236. return py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1237. }
  1238. std::vector<int32_t> list2vector(py::handle li) {
  1239. std::vector<int32_t> axis;
  1240. if (is_py_sequence(li)) {
  1241. py::list tmp_list = py::reinterpret_steal<py::list>(PySequence_List(li.ptr()));
  1242. for (size_t i = 0; i < tmp_list.size(); ++i) {
  1243. axis.push_back(tmp_list[i].attr("__int__")().cast<int32_t>());
  1244. }
  1245. } else {
  1246. axis.push_back(getattr(li, "__int__")().cast<int32_t>());
  1247. }
  1248. return axis;
  1249. }
  1250. py::object _expand_dims_cpp(py::handle inp_hdl, py::handle axis_hdl) {
  1251. std::vector<int32_t> axis = list2vector(axis_hdl);
  1252. bool unknown_ndim = true;
  1253. size_t ndim = axis.size();
  1254. if (auto p = TensorWrapper::try_cast(inp_hdl.ptr())) {
  1255. auto&& shape = p->m_tensor->shape();
  1256. if (shape) {
  1257. unknown_ndim = false;
  1258. ndim += shape->ndim;
  1259. }
  1260. } else {
  1261. auto&& inp_ndim = get_ndim_safe(inp_hdl);
  1262. ndim += inp_ndim.first;
  1263. unknown_ndim &= !inp_ndim.second;
  1264. }
  1265. for (size_t i = 0; i < axis.size(); ++i) {
  1266. if (axis[i] < 0) {
  1267. if (unknown_ndim) {
  1268. throw py::index_error(
  1269. "Does not support negative index when tensor's ndim is "
  1270. "unknown");
  1271. }
  1272. axis[i] += static_cast<int32_t>(ndim);
  1273. }
  1274. }
  1275. if (!axis.size()) {
  1276. throw py::index_error("axis could not be empty");
  1277. }
  1278. std::sort(axis.begin(), axis.end());
  1279. std::shared_ptr<OpDef> op = AddAxis::make(axis = axis);
  1280. py::object Op = py::cast(op);
  1281. PyObject* p[2] = {Op.ptr(), inp_hdl.ptr()};
  1282. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  1283. return ret[0];
  1284. }
  1285. py::object _squeeze_cpp(py::handle inp_hdl, py::handle axis_hdl) {
  1286. std::vector<int32_t> axis;
  1287. size_t ndim;
  1288. if (axis_hdl.ptr() != Py_None) {
  1289. axis = list2vector(axis_hdl);
  1290. }
  1291. if (auto p = TensorWrapper::try_cast(inp_hdl.ptr())) {
  1292. auto&& shape = p->m_tensor->shape();
  1293. if (shape) {
  1294. ndim = shape->ndim;
  1295. if (axis_hdl.ptr() == Py_None) {
  1296. for (size_t i = 0; i < shape->ndim; ++i) {
  1297. if (shape->shape[i] == 1) {
  1298. axis.push_back(i);
  1299. }
  1300. }
  1301. }
  1302. }
  1303. } else {
  1304. py::tuple shape =
  1305. py::reinterpret_borrow<py::tuple>(getattr(inp_hdl, "_tuple_shape"));
  1306. ndim = shape.size();
  1307. if (axis_hdl.ptr() == Py_None) {
  1308. for (size_t i = 0; i < shape.size(); ++i) {
  1309. if (shape[i].cast<size_t>() == 1) {
  1310. axis.push_back(i);
  1311. }
  1312. }
  1313. }
  1314. }
  1315. for (size_t i = 0; i < axis.size(); ++i) {
  1316. if (axis[i] < 0) {
  1317. axis[i] += static_cast<int32_t>(ndim);
  1318. }
  1319. }
  1320. std::sort(axis.begin(), axis.end());
  1321. for (size_t i = 0; i < axis.size(); ++i) {
  1322. axis[i] -= static_cast<int32_t>(i);
  1323. }
  1324. std::shared_ptr<OpDef> op = RemoveAxis::make(axis = axis);
  1325. py::object Op = py::cast(op);
  1326. PyObject* p[2] = {Op.ptr(), inp_hdl.ptr()};
  1327. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  1328. return ret[0];
  1329. }
  1330. py::object _transpose_cpp(py::handle inp_hdl, py::handle args) {
  1331. py::object obj = _expand_args(args);
  1332. py::list lis;
  1333. if (!is_tensor(obj.ptr()) && PySequence_Check(obj.ptr())) {
  1334. lis = py::reinterpret_steal<py::list>(PySequence_List(obj.ptr()));
  1335. } else {
  1336. py::object np = getattr(obj, "numpy")();
  1337. PyArrayObject* arr = (PyArrayObject*)np.ptr();
  1338. PyObject* maybe_list = PyArray_ToList(arr);
  1339. if (PyList_Check(maybe_list)) {
  1340. lis = py::reinterpret_steal<py::list>(maybe_list);
  1341. }
  1342. }
  1343. if (get_ndim_safe(inp_hdl).first == 0) {
  1344. if (lis.size() != 0) {
  1345. throw py::index_error(
  1346. "transpose for scalar does not accept additional args");
  1347. }
  1348. return getattr(inp_hdl, "to")(getattr(inp_hdl, "device"));
  1349. }
  1350. std::vector<int32_t> pattern;
  1351. if (!lis.size()) {
  1352. size_t ndim = getattr(inp_hdl, "ndim").cast<size_t>();
  1353. for (size_t i = 0; i < ndim; ++i) {
  1354. pattern.push_back(ndim - i - 1);
  1355. }
  1356. } else {
  1357. for (size_t i = 0; i < lis.size(); ++i) {
  1358. if (PyLong_Check(lis[i].ptr())) {
  1359. pattern.push_back(lis[i].cast<int32_t>());
  1360. } else {
  1361. if (lis[i].cast<std::string>() == "x") {
  1362. pattern.push_back(-1);
  1363. }
  1364. }
  1365. }
  1366. }
  1367. std::shared_ptr<OpDef> op = Dimshuffle::make(pattern);
  1368. py::object Op = py::cast(op);
  1369. PyObject* p[2] = {Op.ptr(), inp_hdl.ptr()};
  1370. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  1371. return ret[0];
  1372. }
  1373. py::object _matmul_cpp(
  1374. py::handle inp1, py::handle inp2, py::handle dim1, py::handle dim2,
  1375. py::handle transpose_a, py::handle transpose_b, py::handle compute_mode,
  1376. py::handle profile, py::handle deterministic) {
  1377. ::megdnn::param::MatrixMul::ComputeMode mode =
  1378. ::megdnn::param::MatrixMul::ComputeMode::DEFAULT;
  1379. if (compute_mode.cast<std::string>().compare(std::string("float32")) == 0) {
  1380. mode = ::megdnn::param::MatrixMul::ComputeMode::FLOAT32;
  1381. }
  1382. ::megdnn::param::ExecutionPolicy::Strategy cstrategy =
  1383. static_cast<::megdnn::param::ExecutionPolicy::Strategy>(0);
  1384. if (profile.cast<bool>()) {
  1385. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::PROFILE;
  1386. } else {
  1387. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::HEURISTIC;
  1388. }
  1389. if (deterministic.cast<bool>()) {
  1390. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::REPRODUCIBLE;
  1391. }
  1392. std::shared_ptr<OpDef> op = MatrixMul::make(
  1393. transpose_a.cast<bool>(), transpose_b.cast<bool>(), mode,
  1394. ::megdnn::param::MatrixMul::Format::DEFAULT, cstrategy, UINT64_MAX,
  1395. dim1.cast<uint32_t>(), dim2.cast<uint32_t>());
  1396. py::object Op = py::cast(op);
  1397. PyObject* p[3] = {Op.ptr(), inp1.ptr(), inp2.ptr()};
  1398. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 3));
  1399. return ret[0];
  1400. }
  1401. py::object _batched_matmul_cpp(
  1402. py::handle inp1, py::handle inp2, py::handle dim1, py::handle dim2,
  1403. py::handle transpose_a, py::handle transpose_b, py::handle compute_mode,
  1404. py::handle profile, py::handle deterministic) {
  1405. ::megdnn::param::MatrixMul::ComputeMode mode =
  1406. ::megdnn::param::MatrixMul::ComputeMode::DEFAULT;
  1407. if (compute_mode.cast<std::string>().compare(std::string("float32")) == 0) {
  1408. mode = ::megdnn::param::MatrixMul::ComputeMode::FLOAT32;
  1409. }
  1410. ::megdnn::param::ExecutionPolicy::Strategy cstrategy =
  1411. static_cast<::megdnn::param::ExecutionPolicy::Strategy>(0);
  1412. if (profile.cast<bool>()) {
  1413. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::PROFILE;
  1414. } else {
  1415. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::HEURISTIC;
  1416. }
  1417. if (deterministic.cast<bool>()) {
  1418. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::REPRODUCIBLE;
  1419. }
  1420. std::shared_ptr<OpDef> op = BatchedMatrixMul::make(
  1421. transpose_a.cast<bool>(), transpose_b.cast<bool>(), mode,
  1422. ::megdnn::param::MatrixMul::Format::DEFAULT, cstrategy, UINT64_MAX,
  1423. dim1.cast<uint32_t>(), dim2.cast<uint32_t>());
  1424. py::object Op = py::cast(op);
  1425. PyObject* p[3] = {Op.ptr(), inp1.ptr(), inp2.ptr()};
  1426. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 3));
  1427. return ret[0];
  1428. }
  1429. py::object _pixel_shuffle_cpp(py::handle inp, py::handle val, py::handle func) {
  1430. if (enable_fastpath(inp) && PyLong_Check(val.ptr())) {
  1431. std::shared_ptr<OpDef> op = PixelShuffle::make(val.cast<int32_t>());
  1432. py::object Op = py::cast(op);
  1433. PyObject* p[2] = {Op.ptr(), inp.ptr()};
  1434. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  1435. return ret[0];
  1436. } else {
  1437. // fallback to traceable subgraph implement
  1438. return func(inp, val);
  1439. }
  1440. }
  1441. PyObject* make_shape_tuple(PyObject* self, PyObject* const* args, size_t nargs) {
  1442. try {
  1443. return _make_shape_tuple(args[0]).release().ptr();
  1444. }
  1445. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1446. }
  1447. PyObject* getitem_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1448. try {
  1449. return _getitem_cpp(args[0], args[1]).release().ptr();
  1450. }
  1451. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1452. }
  1453. PyObject* setitem_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1454. try {
  1455. return _setitem_cpp(args[0], args[1], args[2]).release().ptr();
  1456. }
  1457. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1458. }
  1459. PyObject* split_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1460. try {
  1461. return _split_cpp(args[0], args[1], args[2]).release().ptr();
  1462. }
  1463. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1464. }
  1465. PyObject* expand_dims_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1466. try {
  1467. return _expand_dims_cpp(args[0], args[1]).release().ptr();
  1468. }
  1469. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1470. }
  1471. PyObject* squeeze_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1472. try {
  1473. return _squeeze_cpp(args[0], args[1]).release().ptr();
  1474. }
  1475. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1476. }
  1477. PyObject* transpose_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1478. try {
  1479. return _transpose_cpp(args[0], args[1]).release().ptr();
  1480. }
  1481. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1482. }
  1483. PyObject* broadcast_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1484. try {
  1485. return _broadcast_cpp(args[0], args[1]).release().ptr();
  1486. }
  1487. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1488. }
  1489. PyObject* reshape_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1490. try {
  1491. return _reshape_cpp(args[0], args[1]).release().ptr();
  1492. }
  1493. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1494. }
  1495. PyObject* adaptive_pool2d_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1496. try {
  1497. return _adaptive_pool2d_cpp(args[0], args[1], args[2]).release().ptr();
  1498. }
  1499. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1500. }
  1501. PyObject* pixel_shuffle_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1502. try {
  1503. return _pixel_shuffle_cpp(args[0], args[1], args[2]).release().ptr();
  1504. }
  1505. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1506. }
  1507. PyObject* Const(PyObject* self, PyObject* const* args, size_t nargs) {
  1508. try {
  1509. return _Const(args[0], args[1], args[2]).release().ptr();
  1510. }
  1511. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1512. }
  1513. PyObject* astype_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1514. try {
  1515. return _astype_cpp(args[0], args[1]).release().ptr();
  1516. }
  1517. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1518. }
  1519. PyObject* matmul_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1520. try {
  1521. return _matmul_cpp(
  1522. args[0], args[1], args[2], args[3], args[4], args[5], args[6],
  1523. args[7], args[8])
  1524. .release()
  1525. .ptr();
  1526. }
  1527. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1528. }
  1529. PyObject* batched_matmul_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1530. try {
  1531. return _batched_matmul_cpp(
  1532. args[0], args[1], args[2], args[3], args[4], args[5], args[6],
  1533. args[7], args[8])
  1534. .release()
  1535. .ptr();
  1536. }
  1537. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1538. }
  1539. PyObject* convert_single_value_cpp(
  1540. PyObject* self, PyObject* const* args, size_t nargs) {
  1541. try {
  1542. return _convert_single_value_cpp(args[0], args[1], args[2]).release().ptr();
  1543. }
  1544. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1545. }
  1546. PyObject* convert_inputs_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1547. try {
  1548. py::object dtype = py::reinterpret_steal<py::object>(
  1549. dtype_promotion(self, args, nargs - 1));
  1550. py::object device;
  1551. if (args[nargs - 1] == Py_None) {
  1552. device = py::reinterpret_steal<py::object>(
  1553. get_device(self, args, nargs - 1));
  1554. } else {
  1555. device = py::reinterpret_borrow<py::object>(args[nargs - 1]);
  1556. }
  1557. return _convert_inputs_cpp(args, nargs - 1, dtype, device).release().ptr();
  1558. }
  1559. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1560. }
  1561. PyObject* astensor1d_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1562. try {
  1563. return _astensor1d_cpp(args[0], args[1], args[2], args[3]).release().ptr();
  1564. }
  1565. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1566. }
  1567. } // namespace mgb::imperative::python