You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

convert_utils_py.cc 18 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "utils/convert_utils_py.h"
  17. #include <vector>
  18. #include <string>
  19. #include <memory>
  20. #include <algorithm>
  21. #include <list>
  22. #include <utility>
  23. #include <cfloat>
  24. #include "abstract/abstract_value.h"
  25. #include "pipeline/jit/parse/parse.h"
  26. #include "pipeline/jit/parse/parse_base.h"
  27. #include "ir/value.h"
  28. #include "ir/tensor.h"
  29. #include "ir/param_info.h"
  30. #include "pybind_api/ir/base_ref_py.h"
  31. #include "utils/ms_context.h"
  32. namespace mindspore {
  33. py::object BuiltinsToPyData(const Any &value);
  34. py::object BuiltinsToPyData(const BaseRef &value);
  35. py::object VectorToPyData(const Any &value);
  36. py::object VectorRefToPyData(const VectorRef &value);
  37. py::object TensorToPyData(const tensor::TensorPtr &tensor) {
  38. MS_EXCEPTION_IF_NULL(tensor);
  39. if (tensor->NeedWait()) {
  40. py::gil_scoped_release release;
  41. tensor->Wait();
  42. }
  43. py::tuple v(1);
  44. v[0] = tensor;
  45. return v[0];
  46. }
  47. py::object ScalarPtrToPyData(const ScalarPtr &value) {
  48. py::int_ int_v;
  49. py::float_ float_v;
  50. py::bool_ bool_v;
  51. TypeId scalar_type = value->type()->type_id();
  52. switch (scalar_type) {
  53. case kNumberTypeUInt8:
  54. MS_LOG(DEBUG) << "uint8";
  55. int_v = value->cast<UInt8ImmPtr>()->value();
  56. return std::move(int_v);
  57. case kNumberTypeUInt16:
  58. MS_LOG(DEBUG) << "uint16";
  59. int_v = value->cast<UInt16ImmPtr>()->value();
  60. return std::move(int_v);
  61. case kNumberTypeUInt32:
  62. MS_LOG(DEBUG) << "uint32";
  63. int_v = value->cast<UInt32ImmPtr>()->value();
  64. return std::move(int_v);
  65. case kNumberTypeUInt64:
  66. MS_LOG(DEBUG) << "uint64";
  67. int_v = value->cast<UInt64ImmPtr>()->value();
  68. return std::move(int_v);
  69. case kNumberTypeInt8:
  70. MS_LOG(DEBUG) << "int8";
  71. int_v = value->cast<Int8ImmPtr>()->value();
  72. return std::move(int_v);
  73. case kNumberTypeInt16:
  74. MS_LOG(DEBUG) << "int16";
  75. int_v = value->cast<Int16ImmPtr>()->value();
  76. return std::move(int_v);
  77. case kNumberTypeInt32:
  78. MS_LOG(DEBUG) << "int32";
  79. int_v = value->cast<Int32ImmPtr>()->value();
  80. return std::move(int_v);
  81. case kNumberTypeInt64:
  82. MS_LOG(DEBUG) << "int64";
  83. int_v = value->cast<Int64ImmPtr>()->value();
  84. return std::move(int_v);
  85. case kNumberTypeFloat32:
  86. MS_LOG(DEBUG) << "float";
  87. float_v = value->cast<FP32ImmPtr>()->value();
  88. return std::move(float_v);
  89. case kNumberTypeFloat64:
  90. MS_LOG(DEBUG) << "double";
  91. float_v = value->cast<FP64ImmPtr>()->value();
  92. return std::move(float_v);
  93. case kNumberTypeBool:
  94. MS_LOG(DEBUG) << "bool";
  95. bool_v = value->cast<BoolImmPtr>()->value();
  96. return std::move(bool_v);
  97. default:
  98. MS_EXCEPTION(TypeError) << "Unsupported scalar converted to py data: " << value->ToString();
  99. }
  100. }
  101. py::object ValuePtrToPyData(const ValuePtr &value) {
  102. if (value == nullptr) {
  103. MS_LOG(EXCEPTION) << "value is null";
  104. }
  105. py::object ret;
  106. if (value->isa<Scalar>()) {
  107. ret = ScalarPtrToPyData(value->cast<ScalarPtr>());
  108. } else if (value->isa<StringImm>()) {
  109. MS_LOG(DEBUG) << "String";
  110. py::str v = value->cast<StringImmPtr>()->value();
  111. ret = v;
  112. } else if (value->isa<tensor::Tensor>()) {
  113. MS_LOG(DEBUG) << "tensor";
  114. auto tensor_ptr = value->cast<tensor::TensorPtr>();
  115. ret = TensorToPyData(tensor_ptr);
  116. } else if (value->isa<tensor::MetaTensor>()) {
  117. MS_LOG(DEBUG) << "MetaTensor";
  118. py::tuple v(1);
  119. v[0] = value->cast<tensor::MetaTensorPtr>();
  120. ret = v[0];
  121. } else if (value->isa<RefKey>()) {
  122. MS_LOG(DEBUG) << "RefKey";
  123. py::tuple v(1);
  124. v[0] = value->cast<RefKeyPtr>();
  125. ret = v[0];
  126. } else if (value->isa<ValueSequeue>()) {
  127. MS_LOG(DEBUG) << "tuple or list";
  128. auto value_sequeue = value->cast<ValueSequeuePtr>()->value();
  129. py::tuple ret_sequeue(value_sequeue.size());
  130. for (size_t i = 0; i < value_sequeue.size(); i++) {
  131. ret_sequeue[i] = ValuePtrToPyData(value_sequeue[i]);
  132. }
  133. if (value->isa<ValueTuple>()) {
  134. ret = ret_sequeue;
  135. } else {
  136. ret = ret_sequeue.cast<py::list>();
  137. }
  138. } else if (value->isa<ValueDictionary>()) {
  139. MS_LOG(DEBUG) << "dict";
  140. auto value_list = value->cast<ValueDictionaryPtr>()->value();
  141. py::dict ret_dict;
  142. for (const auto &v : value_list) {
  143. ret_dict[py::str(v.first)] = ValuePtrToPyData(v.second);
  144. }
  145. ret = ret_dict;
  146. } else if (value->isa<Ellipsis>()) {
  147. ret = py::ellipsis();
  148. } else if (value->isa<ValueSlice>()) {
  149. auto slice = value->cast<ValueSlicePtr>();
  150. auto start = ValuePtrToPyData(slice->start());
  151. auto end = ValuePtrToPyData(slice->stop());
  152. auto step = ValuePtrToPyData(slice->step());
  153. ret = parse::python_adapter::CallPyFn(parse::PYTHON_MOD_PARSE_MODULE, parse::PYTHON_PARSE_CLASS_SLICE, start, end,
  154. step);
  155. } else if (value->isa<Type>()) {
  156. py::tuple v(1);
  157. v[0] = value->cast<TypePtr>();
  158. ret = v[0];
  159. } else if (value->isa<AnyValue>() || value->isa<None>() || value->isa<Monad>() || value->isa<FuncGraph>()) {
  160. // FuncGraph is not used in the backend, return None
  161. ret = py::none();
  162. } else {
  163. MS_LOG(EXCEPTION) << "Unsupported convert value: " << value->ToString() << " to a PyData.";
  164. }
  165. return ret;
  166. }
  167. py::object AnyToPyData(const Any &value) {
  168. py::object ret;
  169. MS_LOG(DEBUG) << "AnyToPyData " << value.GetString();
  170. if (value.is<int>() || value.is<float>() || value.is<double>() || value.is<bool>()) {
  171. ret = BuiltinsToPyData(value);
  172. } else if (value.is<ValuePtr>()) {
  173. MS_LOG(DEBUG) << "ValuePtr";
  174. ValuePtr v = value.cast<ValuePtr>();
  175. ret = ValuePtrToPyData(v);
  176. } else if (value.is<tensor::TensorPtr>()) {
  177. MS_LOG(DEBUG) << "tensor";
  178. auto tensor_ptr = value.cast<tensor::TensorPtr>();
  179. ret = TensorToPyData(tensor_ptr);
  180. } else if (value.is<py::object>()) {
  181. MS_LOG(DEBUG) << "py obj";
  182. ret = value.cast<py::object>();
  183. } else if (value.is<std::vector<tensor::TensorPtr>>() || value.is<std::vector<Any>>()) {
  184. ret = VectorToPyData(value);
  185. } else if (value.is<std::list<Any>>()) {
  186. MS_LOG(DEBUG) << "list_any";
  187. auto value_list = value.cast<std::list<Any>>();
  188. py::list rets = py::list();
  189. for (auto &v : value_list) {
  190. rets.append(AnyToPyData(v));
  191. }
  192. ret = rets;
  193. } else if (value.is<std::vector<Any>>()) {
  194. auto value_list = value.cast<std::vector<Any>>();
  195. py::tuple rets(value_list.size());
  196. for (size_t i = 0; i < value_list.size(); i++) {
  197. rets[i] = AnyToPyData(value_list[i]);
  198. }
  199. ret = rets;
  200. } else if (value.is<TypePtr>()) {
  201. py::tuple v(1);
  202. v[0] = value.cast<TypePtr>();
  203. ret = v[0];
  204. } else {
  205. MS_LOG(EXCEPTION) << "value is not support type";
  206. }
  207. return ret;
  208. }
  209. py::object BaseRefToPyData(const BaseRef &value) {
  210. py::object ret;
  211. MS_LOG(DEBUG) << "BaseRefToPyData " << value.ToString();
  212. if (utils::isa<int>(value) || utils::isa<float>(value) || utils::isa<double>(value) || utils::isa<bool>(value)) {
  213. ret = BuiltinsToPyData(value);
  214. } else if (utils::isa<ValuePtr>(value)) {
  215. MS_LOG(DEBUG) << "ValuePtr";
  216. ValuePtr v = utils::cast<ValuePtr>(value);
  217. ret = ValuePtrToPyData(v);
  218. } else if (utils::isa<tensor::TensorPtr>(value)) {
  219. MS_LOG(DEBUG) << "tensor";
  220. auto tensor_ptr = utils::cast<tensor::TensorPtr>(value);
  221. ret = TensorToPyData(tensor_ptr);
  222. } else if (utils::isa<PyObjectRef>(value)) {
  223. MS_LOG(DEBUG) << "py obj";
  224. PyObjectRef py_ref = utils::cast<PyObjectRef>(value);
  225. ret = py_ref.object_;
  226. } else if (utils::isa<VectorRef>(value)) {
  227. auto vec_ref = utils::cast<VectorRef>(value);
  228. ret = VectorRefToPyData(vec_ref);
  229. } else if (utils::isa<TypePtr>(value)) {
  230. py::tuple v(1);
  231. v[0] = utils::cast<TypePtr>(value);
  232. ret = v[0];
  233. } else {
  234. MS_LOG(EXCEPTION) << "value is not support type";
  235. }
  236. return ret;
  237. }
  238. py::object BuiltinsToPyData(const Any &value) {
  239. if (value.is<int>()) {
  240. MS_LOG(DEBUG) << "int";
  241. py::int_ ret = value.cast<int>();
  242. return std::move(ret);
  243. } else if (value.is<float>()) {
  244. MS_LOG(DEBUG) << "float";
  245. py::float_ ret = value.cast<float>();
  246. return std::move(ret);
  247. } else if (value.is<double>()) {
  248. MS_LOG(DEBUG) << "double";
  249. py::float_ ret = value.cast<double>();
  250. return std::move(ret);
  251. } else {
  252. MS_LOG(DEBUG) << "bool";
  253. py::bool_ ret = value.cast<bool>();
  254. return std::move(ret);
  255. }
  256. }
  257. py::object BuiltinsToPyData(const BaseRef &value) {
  258. if (utils::isa<int>(value)) {
  259. MS_LOG(DEBUG) << "int";
  260. py::int_ ret = utils::cast<int>(value);
  261. return std::move(ret);
  262. } else if (utils::isa<float>(value)) {
  263. MS_LOG(DEBUG) << "float";
  264. py::float_ ret = utils::cast<float>(value);
  265. return std::move(ret);
  266. } else if (utils::isa<double>(value)) {
  267. MS_LOG(DEBUG) << "double";
  268. py::float_ ret = utils::cast<double>(value);
  269. return std::move(ret);
  270. } else {
  271. MS_LOG(DEBUG) << "bool";
  272. py::bool_ ret = utils::cast<bool>(value);
  273. return std::move(ret);
  274. }
  275. }
  276. py::object VectorToPyData(const Any &value) {
  277. py::object ret;
  278. if (value.is<std::vector<tensor::TensorPtr>>()) {
  279. MS_LOG(DEBUG) << "vector_tensor";
  280. std::vector<tensor::TensorPtr> outputs;
  281. outputs = value.cast<std::vector<tensor::TensorPtr>>();
  282. py::tuple tensor_tuple(outputs.size());
  283. for (std::size_t i = 0; i < outputs.size(); ++i) {
  284. tensor_tuple[i] = *outputs[i];
  285. }
  286. ret = tensor_tuple;
  287. } else {
  288. MS_LOG(DEBUG) << "vector_any";
  289. auto value_list = value.cast<std::vector<Any>>();
  290. py::tuple any_tuple = py::tuple(value_list.size());
  291. size_t i = 0;
  292. for (auto &v : value_list) {
  293. any_tuple[i] = AnyToPyData(v);
  294. i++;
  295. }
  296. ret = any_tuple;
  297. }
  298. return ret;
  299. }
  300. py::object VectorRefToPyData(const VectorRef &value_list) {
  301. py::object ret;
  302. MS_LOG(DEBUG) << "vector_ref";
  303. size_t value_size = value_list.size();
  304. auto ref_tuple = py::tuple(value_size);
  305. for (size_t i = 0; i < value_size; i++) {
  306. ref_tuple[i] = BaseRefToPyData(value_list[i]);
  307. }
  308. ret = ref_tuple;
  309. return ret;
  310. }
  311. void SetValueRange(const AbstractBasePtr &tensor, const py::object &output) {
  312. if (output.is_none()) {
  313. return;
  314. }
  315. py::object obj_min =
  316. output.contains(py::str(ATTR_MIN_VALUE)) ? (py::object)output[ATTR_MIN_VALUE] : (py::object)py::none();
  317. py::object obj_max =
  318. output.contains(py::str(ATTR_MAX_VALUE)) ? (py::object)output[ATTR_MAX_VALUE] : (py::object)py::none();
  319. if (!obj_min.is_none() && !obj_max.is_none()) {
  320. bool converted = true;
  321. ValuePtr min_value = nullptr;
  322. ValuePtr max_value = nullptr;
  323. converted = parse::ConvertData(obj_min, &min_value);
  324. if (!converted) {
  325. MS_LOG(EXCEPTION) << "Convert shape min value data failed";
  326. }
  327. converted = parse::ConvertData(obj_max, &max_value);
  328. if (!converted) {
  329. MS_LOG(EXCEPTION) << "Convert shape max value data failed";
  330. }
  331. auto abs_tensor = dyn_cast<abstract::AbstractTensor>(tensor);
  332. abs_tensor->set_value_range(min_value, max_value);
  333. }
  334. }
  335. AbstractBasePtr PyList2DynamicShapeTensor(const py::object &shape_obj, const py::object &type_obj,
  336. const py::object &output) {
  337. AbstractBasePtr tensor = nullptr;
  338. auto ret_vec = shape_obj.cast<ShapeVector>();
  339. auto ret_dtype = type_obj.cast<TypePtr>();
  340. ShapeVector min_shape_vec;
  341. ShapeVector max_shape_vec;
  342. if (!output.is_none()) {
  343. py::object min_shape =
  344. output.contains(py::str(ATTR_MIN_SHAPE)) ? (py::object)output[ATTR_MIN_SHAPE] : (py::object)py::none();
  345. py::object max_shape =
  346. output.contains(py::str(ATTR_MAX_SHAPE)) ? (py::object)output[ATTR_MAX_SHAPE] : (py::object)py::none();
  347. if (!min_shape.is_none()) {
  348. min_shape_vec = min_shape.cast<ShapeVector>();
  349. }
  350. if (!max_shape.is_none()) {
  351. max_shape_vec = max_shape.cast<ShapeVector>();
  352. }
  353. }
  354. auto ret_shape = std::make_shared<abstract::Shape>(ret_vec, min_shape_vec, max_shape_vec);
  355. if (ret_dtype->isa<TensorType>()) {
  356. auto tensor_type = type_obj.cast<TensorTypePtr>();
  357. MS_EXCEPTION_IF_NULL(tensor_type);
  358. auto element = std::make_shared<abstract::AbstractScalar>(kAnyValue, tensor_type->element());
  359. tensor = std::make_shared<abstract::AbstractTensor>(element, ret_shape);
  360. } else {
  361. auto element = std::make_shared<abstract::AbstractScalar>(kAnyValue, ret_dtype);
  362. tensor = std::make_shared<abstract::AbstractTensor>(element, ret_shape);
  363. }
  364. SetValueRange(tensor, output);
  365. return tensor;
  366. }
  367. static bool IsMonadType(const py::object &type_obj) {
  368. if (py::isinstance<Type>(type_obj)) {
  369. auto type = type_obj.cast<Type *>();
  370. return type->isa<MonadType>();
  371. }
  372. return false;
  373. }
  374. static AbstractBasePtr ToMonadAbstract(const py::object &type_obj) {
  375. if (py::isinstance<Type>(type_obj)) {
  376. auto type = type_obj.cast<Type *>();
  377. if (type->isa<UMonadType>()) {
  378. return kUMonad->ToAbstract();
  379. }
  380. if (type->isa<IOMonadType>()) {
  381. return kIOMonad->ToAbstract();
  382. }
  383. }
  384. MS_LOG(EXCEPTION) << "Not a monad type object: " << py::str(type_obj);
  385. }
  386. AbstractBasePtr PyListDtype2AbstractTensor(const py::object &shape_obj, const py::object &type_obj,
  387. const py::object &output) {
  388. if ((py::isinstance<py::list>(shape_obj) || py::isinstance<py::tuple>(shape_obj)) && py::isinstance<Type>(type_obj)) {
  389. auto ret_vec = shape_obj.cast<ShapeVector>();
  390. auto ret_dtype = type_obj.cast<TypePtr>();
  391. MS_EXCEPTION_IF_NULL(ret_dtype);
  392. // if the size of shape list is empty, return an scalar abstract
  393. if (ret_vec.empty() && (!ret_dtype->isa<TensorType>())) {
  394. abstract::AbstractScalarPtr abs_scalar = std::make_shared<abstract::AbstractScalar>(kAnyValue, ret_dtype);
  395. return abs_scalar;
  396. }
  397. return PyList2DynamicShapeTensor(shape_obj, type_obj, output);
  398. } else if (py::isinstance<py::tuple>(shape_obj) && py::isinstance<py::tuple>(type_obj)) {
  399. auto shape_tuple = shape_obj.cast<py::tuple>();
  400. auto typeid_tuple = type_obj.cast<py::tuple>();
  401. AbstractBasePtrList ptr_list;
  402. for (size_t it = 0; it < shape_tuple.size(); ++it) {
  403. auto tensor_it = PyListDtype2AbstractTensor(shape_tuple[it], typeid_tuple[it]);
  404. ptr_list.push_back(tensor_it);
  405. }
  406. auto tuple = std::make_shared<abstract::AbstractTuple>(ptr_list);
  407. return tuple;
  408. } else if (py::isinstance<py::list>(shape_obj) && py::isinstance<py::list>(type_obj)) {
  409. auto shape_list = shape_obj.cast<py::list>();
  410. auto typeid_list = type_obj.cast<py::list>();
  411. AbstractBasePtrList ptr_list;
  412. for (size_t it = 0; it < shape_list.size(); ++it) {
  413. auto tensor_it = PyListDtype2AbstractTensor(shape_list[it], typeid_list[it]);
  414. ptr_list.push_back(tensor_it);
  415. }
  416. auto list = std::make_shared<abstract::AbstractList>(ptr_list);
  417. return list;
  418. } else if (shape_obj.is_none() && type_obj.is_none()) {
  419. // AbstractNone indicates there is no output for this CNode node.
  420. auto abstract_none = std::make_shared<abstract::AbstractNone>();
  421. return abstract_none;
  422. } else if (IsMonadType(type_obj)) {
  423. // Return monad abstract if it is monad type.
  424. return ToMonadAbstract(type_obj);
  425. } else {
  426. // When sparse enabled, the undetermined might be raised and eliminated in opt passes
  427. auto context = MsContext::GetInstance();
  428. MS_EXCEPTION_IF_NULL(context);
  429. bool enable_sparse = context->get_param<bool>(MS_CTX_ENABLE_SPARSE);
  430. if (enable_sparse) {
  431. return std::make_shared<abstract::AbstractUndetermined>();
  432. }
  433. MS_LOG(EXCEPTION) << "Python evaluator return invalid shape or type. " << (std::string)py::str(type_obj);
  434. }
  435. }
  436. bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple &args,
  437. const std::shared_ptr<py::object> &ret_val) {
  438. if (output->isa<ValueNode>()) {
  439. MS_LOG(INFO) << "Graph's output is a constant. No need to execute.";
  440. ValuePtr value = GetValueNode(output);
  441. *ret_val = ValuePtrToPyData(value);
  442. return true;
  443. }
  444. // Adapter will transform values in __init__() and construct() to parameters, this could cause
  445. // inputs (a.k.a args in current function) size less than parameters'.
  446. if (output->isa<Parameter>()) {
  447. MS_LOG(INFO) << "Graph's output is a parameter. If all params are inputs, no need to execute.";
  448. // Find the right parameter as ret_val.
  449. auto func_graph = output->func_graph();
  450. MS_EXCEPTION_IF_NULL(func_graph);
  451. auto params = func_graph->parameters();
  452. if ((args.size() + func_graph->hyper_param_count()) != params.size()) {
  453. MS_LOG(EXCEPTION) << "Input size " << args.size() << " add Parameter count " << func_graph->hyper_param_count()
  454. << " not equal to graph input size " << params.size() << ", let graph to be executed.";
  455. }
  456. auto it = std::find(params.begin(), params.end(), output);
  457. if (it == params.end()) {
  458. MS_EXCEPTION(UnknownError) << "When graph output is Parameter, it should be found in graph parameters";
  459. }
  460. size_t index = it - params.cbegin();
  461. if (index >= args.size() + func_graph->hyper_param_count()) {
  462. MS_EXCEPTION(UnknownError) << "Index " << index << " equal or larger than args size " << args.size()
  463. << " add Parameter count " << func_graph->hyper_param_count() << ".";
  464. }
  465. if (index < args.size()) {
  466. *ret_val = args[index];
  467. } else {
  468. auto param = dyn_cast<Parameter>(params[index]);
  469. MS_EXCEPTION_IF_NULL(param);
  470. if (!param->has_default()) {
  471. MS_LOG(EXCEPTION) << "Can not determine value of Parameter " << index << " (" << param->name() << ")";
  472. }
  473. auto tensor = param->default_param();
  474. *ret_val = py::cast(tensor);
  475. }
  476. return true;
  477. }
  478. return false;
  479. }
  480. } // namespace mindspore