You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

convert_utils_py.cc 18 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "utils/convert_utils_py.h"
  17. #include <vector>
  18. #include <string>
  19. #include <memory>
  20. #include <algorithm>
  21. #include <list>
  22. #include <utility>
  23. #include <cfloat>
  24. #include "abstract/abstract_value.h"
  25. #include "abstract/utils.h"
  26. #include "pipeline/jit/parse/parse.h"
  27. #include "pipeline/jit/parse/parse_base.h"
  28. #include "ir/value.h"
  29. #include "ir/tensor.h"
  30. #include "ir/param_info.h"
  31. #include "pybind_api/ir/base_ref_py.h"
  32. #include "utils/ms_context.h"
  33. namespace mindspore {
  34. py::object BuiltinsToPyData(const Any &value);
  35. py::object BuiltinsToPyData(const BaseRef &value);
  36. py::object VectorToPyData(const Any &value);
  37. py::object VectorRefToPyData(const VectorRef &value);
  38. py::object TensorToPyData(const tensor::TensorPtr &tensor) {
  39. MS_EXCEPTION_IF_NULL(tensor);
  40. if (tensor->NeedWait()) {
  41. py::gil_scoped_release release;
  42. tensor->Wait();
  43. }
  44. py::tuple v(1);
  45. v[0] = tensor;
  46. return v[0];
  47. }
  48. py::object ScalarPtrToPyData(const ScalarPtr &value) {
  49. py::int_ int_v;
  50. py::float_ float_v;
  51. py::bool_ bool_v;
  52. TypeId scalar_type = value->type()->type_id();
  53. switch (scalar_type) {
  54. case kNumberTypeUInt8:
  55. MS_LOG(DEBUG) << "uint8";
  56. int_v = value->cast<UInt8ImmPtr>()->value();
  57. return std::move(int_v);
  58. case kNumberTypeUInt16:
  59. MS_LOG(DEBUG) << "uint16";
  60. int_v = value->cast<UInt16ImmPtr>()->value();
  61. return std::move(int_v);
  62. case kNumberTypeUInt32:
  63. MS_LOG(DEBUG) << "uint32";
  64. int_v = value->cast<UInt32ImmPtr>()->value();
  65. return std::move(int_v);
  66. case kNumberTypeUInt64:
  67. MS_LOG(DEBUG) << "uint64";
  68. int_v = value->cast<UInt64ImmPtr>()->value();
  69. return std::move(int_v);
  70. case kNumberTypeInt8:
  71. MS_LOG(DEBUG) << "int8";
  72. int_v = value->cast<Int8ImmPtr>()->value();
  73. return std::move(int_v);
  74. case kNumberTypeInt16:
  75. MS_LOG(DEBUG) << "int16";
  76. int_v = value->cast<Int16ImmPtr>()->value();
  77. return std::move(int_v);
  78. case kNumberTypeInt32:
  79. MS_LOG(DEBUG) << "int32";
  80. int_v = value->cast<Int32ImmPtr>()->value();
  81. return std::move(int_v);
  82. case kNumberTypeInt64:
  83. MS_LOG(DEBUG) << "int64";
  84. int_v = value->cast<Int64ImmPtr>()->value();
  85. return std::move(int_v);
  86. case kNumberTypeFloat32:
  87. MS_LOG(DEBUG) << "float";
  88. float_v = value->cast<FP32ImmPtr>()->value();
  89. return std::move(float_v);
  90. case kNumberTypeFloat64:
  91. MS_LOG(DEBUG) << "double";
  92. float_v = value->cast<FP64ImmPtr>()->value();
  93. return std::move(float_v);
  94. case kNumberTypeBool:
  95. MS_LOG(DEBUG) << "bool";
  96. bool_v = value->cast<BoolImmPtr>()->value();
  97. return std::move(bool_v);
  98. default:
  99. MS_EXCEPTION(TypeError) << "Unsupported scalar converted to py data: " << value->ToString();
  100. }
  101. }
  102. py::object ValuePtrToPyData(const ValuePtr &value) {
  103. if (value == nullptr) {
  104. MS_LOG(EXCEPTION) << "value is null";
  105. }
  106. py::object ret;
  107. if (value->isa<Scalar>()) {
  108. ret = ScalarPtrToPyData(value->cast<ScalarPtr>());
  109. } else if (value->isa<StringImm>()) {
  110. MS_LOG(DEBUG) << "String";
  111. py::str v = value->cast<StringImmPtr>()->value();
  112. ret = v;
  113. } else if (value->isa<tensor::Tensor>()) {
  114. MS_LOG(DEBUG) << "tensor";
  115. auto tensor_ptr = value->cast<tensor::TensorPtr>();
  116. ret = TensorToPyData(tensor_ptr);
  117. } else if (value->isa<tensor::MetaTensor>()) {
  118. MS_LOG(DEBUG) << "MetaTensor";
  119. py::tuple v(1);
  120. v[0] = value->cast<tensor::MetaTensorPtr>();
  121. ret = v[0];
  122. } else if (value->isa<RefKey>()) {
  123. MS_LOG(DEBUG) << "RefKey";
  124. py::tuple v(1);
  125. v[0] = value->cast<RefKeyPtr>();
  126. ret = v[0];
  127. } else if (value->isa<ValueSequeue>()) {
  128. MS_LOG(DEBUG) << "tuple or list";
  129. auto value_sequeue = value->cast<ValueSequeuePtr>()->value();
  130. py::tuple ret_sequeue(value_sequeue.size());
  131. for (size_t i = 0; i < value_sequeue.size(); i++) {
  132. ret_sequeue[i] = ValuePtrToPyData(value_sequeue[i]);
  133. }
  134. if (value->isa<ValueTuple>()) {
  135. ret = ret_sequeue;
  136. } else {
  137. ret = ret_sequeue.cast<py::list>();
  138. }
  139. } else if (value->isa<ValueDictionary>()) {
  140. MS_LOG(DEBUG) << "dict";
  141. auto value_list = value->cast<ValueDictionaryPtr>()->value();
  142. py::dict ret_dict;
  143. for (const auto &v : value_list) {
  144. ret_dict[py::str(v.first)] = ValuePtrToPyData(v.second);
  145. }
  146. ret = ret_dict;
  147. } else if (value->isa<Ellipsis>()) {
  148. ret = py::ellipsis();
  149. } else if (value->isa<ValueSlice>()) {
  150. auto slice = value->cast<ValueSlicePtr>();
  151. auto start = ValuePtrToPyData(slice->start());
  152. auto end = ValuePtrToPyData(slice->stop());
  153. auto step = ValuePtrToPyData(slice->step());
  154. ret = parse::python_adapter::CallPyFn(parse::PYTHON_MOD_PARSE_MODULE, parse::PYTHON_PARSE_CLASS_SLICE, start, end,
  155. step);
  156. } else if (value->isa<Type>()) {
  157. py::tuple v(1);
  158. v[0] = value->cast<TypePtr>();
  159. ret = v[0];
  160. } else if (value->isa<AnyValue>() || value->isa<None>() || value->isa<Monad>() || value->isa<FuncGraph>()) {
  161. // FuncGraph is not used in the backend, return None
  162. ret = py::none();
  163. } else if (value->isa<KeywordArg>()) {
  164. auto abs_keyword_arg = value->ToAbstract()->cast<abstract::AbstractKeywordArgPtr>();
  165. auto key = abs_keyword_arg->get_key();
  166. auto val = abs_keyword_arg->get_arg()->BuildValue();
  167. auto py_value = ValuePtrToPyData(val);
  168. auto kwargs = py::kwargs();
  169. kwargs[key.c_str()] = py_value;
  170. ret = kwargs;
  171. } else {
  172. MS_LOG(EXCEPTION) << "Unsupported convert value: " << value->ToString() << " to a PyData.";
  173. }
  174. return ret;
  175. }
  176. py::object AnyToPyData(const Any &value) {
  177. py::object ret;
  178. MS_LOG(DEBUG) << "AnyToPyData " << value.GetString();
  179. if (value.is<int>() || value.is<float>() || value.is<double>() || value.is<bool>()) {
  180. ret = BuiltinsToPyData(value);
  181. } else if (value.is<ValuePtr>()) {
  182. MS_LOG(DEBUG) << "ValuePtr";
  183. ValuePtr v = value.cast<ValuePtr>();
  184. ret = ValuePtrToPyData(v);
  185. } else if (value.is<tensor::TensorPtr>()) {
  186. MS_LOG(DEBUG) << "tensor";
  187. auto tensor_ptr = value.cast<tensor::TensorPtr>();
  188. ret = TensorToPyData(tensor_ptr);
  189. } else if (value.is<py::object>()) {
  190. MS_LOG(DEBUG) << "py obj";
  191. ret = value.cast<py::object>();
  192. } else if (value.is<std::vector<tensor::TensorPtr>>() || value.is<std::vector<Any>>()) {
  193. ret = VectorToPyData(value);
  194. } else if (value.is<std::list<Any>>()) {
  195. MS_LOG(DEBUG) << "list_any";
  196. auto value_list = value.cast<std::list<Any>>();
  197. py::list rets = py::list();
  198. for (auto &v : value_list) {
  199. rets.append(AnyToPyData(v));
  200. }
  201. ret = rets;
  202. } else if (value.is<std::vector<Any>>()) {
  203. auto value_list = value.cast<std::vector<Any>>();
  204. py::tuple rets(value_list.size());
  205. for (size_t i = 0; i < value_list.size(); i++) {
  206. rets[i] = AnyToPyData(value_list[i]);
  207. }
  208. ret = rets;
  209. } else if (value.is<TypePtr>()) {
  210. py::tuple v(1);
  211. v[0] = value.cast<TypePtr>();
  212. ret = v[0];
  213. } else {
  214. MS_LOG(EXCEPTION) << "value is not support type";
  215. }
  216. return ret;
  217. }
  218. py::object BaseRefToPyData(const BaseRef &value) {
  219. py::object ret;
  220. MS_LOG(DEBUG) << "BaseRefToPyData " << value.ToString();
  221. if (utils::isa<int>(value) || utils::isa<float>(value) || utils::isa<double>(value) || utils::isa<bool>(value)) {
  222. ret = BuiltinsToPyData(value);
  223. } else if (utils::isa<ValuePtr>(value)) {
  224. MS_LOG(DEBUG) << "ValuePtr";
  225. ValuePtr v = utils::cast<ValuePtr>(value);
  226. ret = ValuePtrToPyData(v);
  227. } else if (utils::isa<tensor::TensorPtr>(value)) {
  228. MS_LOG(DEBUG) << "tensor";
  229. auto tensor_ptr = utils::cast<tensor::TensorPtr>(value);
  230. ret = TensorToPyData(tensor_ptr);
  231. } else if (utils::isa<PyObjectRef>(value)) {
  232. MS_LOG(DEBUG) << "py obj";
  233. PyObjectRef py_ref = utils::cast<PyObjectRef>(value);
  234. ret = py_ref.object_;
  235. } else if (utils::isa<VectorRef>(value)) {
  236. auto vec_ref = utils::cast<VectorRef>(value);
  237. ret = VectorRefToPyData(vec_ref);
  238. } else if (utils::isa<TypePtr>(value)) {
  239. py::tuple v(1);
  240. v[0] = utils::cast<TypePtr>(value);
  241. ret = v[0];
  242. } else {
  243. MS_LOG(EXCEPTION) << "value is not support type";
  244. }
  245. return ret;
  246. }
  247. py::object BuiltinsToPyData(const Any &value) {
  248. if (value.is<int>()) {
  249. MS_LOG(DEBUG) << "int";
  250. py::int_ ret = value.cast<int>();
  251. return std::move(ret);
  252. } else if (value.is<float>()) {
  253. MS_LOG(DEBUG) << "float";
  254. py::float_ ret = value.cast<float>();
  255. return std::move(ret);
  256. } else if (value.is<double>()) {
  257. MS_LOG(DEBUG) << "double";
  258. py::float_ ret = value.cast<double>();
  259. return std::move(ret);
  260. } else {
  261. MS_LOG(DEBUG) << "bool";
  262. py::bool_ ret = value.cast<bool>();
  263. return std::move(ret);
  264. }
  265. }
  266. py::object BuiltinsToPyData(const BaseRef &value) {
  267. if (utils::isa<int>(value)) {
  268. MS_LOG(DEBUG) << "int";
  269. py::int_ ret = utils::cast<int>(value);
  270. return std::move(ret);
  271. } else if (utils::isa<float>(value)) {
  272. MS_LOG(DEBUG) << "float";
  273. py::float_ ret = utils::cast<float>(value);
  274. return std::move(ret);
  275. } else if (utils::isa<double>(value)) {
  276. MS_LOG(DEBUG) << "double";
  277. py::float_ ret = utils::cast<double>(value);
  278. return std::move(ret);
  279. } else {
  280. MS_LOG(DEBUG) << "bool";
  281. py::bool_ ret = utils::cast<bool>(value);
  282. return std::move(ret);
  283. }
  284. }
  285. py::object VectorToPyData(const Any &value) {
  286. py::object ret;
  287. if (value.is<std::vector<tensor::TensorPtr>>()) {
  288. MS_LOG(DEBUG) << "vector_tensor";
  289. std::vector<tensor::TensorPtr> outputs;
  290. outputs = value.cast<std::vector<tensor::TensorPtr>>();
  291. py::tuple tensor_tuple(outputs.size());
  292. for (std::size_t i = 0; i < outputs.size(); ++i) {
  293. tensor_tuple[i] = *outputs[i];
  294. }
  295. ret = tensor_tuple;
  296. } else {
  297. MS_LOG(DEBUG) << "vector_any";
  298. auto value_list = value.cast<std::vector<Any>>();
  299. py::tuple any_tuple = py::tuple(value_list.size());
  300. size_t i = 0;
  301. for (auto &v : value_list) {
  302. any_tuple[i] = AnyToPyData(v);
  303. i++;
  304. }
  305. ret = any_tuple;
  306. }
  307. return ret;
  308. }
  309. py::object VectorRefToPyData(const VectorRef &value_list) {
  310. py::object ret;
  311. MS_LOG(DEBUG) << "vector_ref";
  312. size_t value_size = value_list.size();
  313. auto ref_tuple = py::tuple(value_size);
  314. for (size_t i = 0; i < value_size; i++) {
  315. ref_tuple[i] = BaseRefToPyData(value_list[i]);
  316. }
  317. ret = ref_tuple;
  318. return ret;
  319. }
  320. void SetValueRange(const AbstractBasePtr &tensor, const py::object &output) {
  321. if (output.is_none()) {
  322. return;
  323. }
  324. py::object obj_min =
  325. output.contains(py::str(ATTR_MIN_VALUE)) ? (py::object)output[ATTR_MIN_VALUE] : (py::object)py::none();
  326. py::object obj_max =
  327. output.contains(py::str(ATTR_MAX_VALUE)) ? (py::object)output[ATTR_MAX_VALUE] : (py::object)py::none();
  328. if (!obj_min.is_none() && !obj_max.is_none()) {
  329. bool converted = true;
  330. ValuePtr min_value = nullptr;
  331. ValuePtr max_value = nullptr;
  332. converted = parse::ConvertData(obj_min, &min_value);
  333. if (!converted) {
  334. MS_LOG(EXCEPTION) << "Convert shape min value data failed";
  335. }
  336. converted = parse::ConvertData(obj_max, &max_value);
  337. if (!converted) {
  338. MS_LOG(EXCEPTION) << "Convert shape max value data failed";
  339. }
  340. auto abs_tensor = dyn_cast<abstract::AbstractTensor>(tensor);
  341. abs_tensor->set_value_range(min_value, max_value);
  342. }
  343. }
  344. AbstractBasePtr MakePyInferRes2AbstractTensor(const py::object &shape_obj, const py::object &type_obj,
  345. const py::object &output) {
  346. auto ret_vec = shape_obj.cast<ShapeVector>();
  347. auto ret_dtype = type_obj.cast<TypePtr>();
  348. ShapeVector min_shape_vec;
  349. ShapeVector max_shape_vec;
  350. if (!output.is_none()) {
  351. py::object min_shape =
  352. output.contains(py::str(ATTR_MIN_SHAPE)) ? (py::object)output[ATTR_MIN_SHAPE] : (py::object)py::none();
  353. py::object max_shape =
  354. output.contains(py::str(ATTR_MAX_SHAPE)) ? (py::object)output[ATTR_MAX_SHAPE] : (py::object)py::none();
  355. if (!min_shape.is_none()) {
  356. min_shape_vec = min_shape.cast<ShapeVector>();
  357. }
  358. if (!max_shape.is_none()) {
  359. max_shape_vec = max_shape.cast<ShapeVector>();
  360. }
  361. }
  362. auto ret_shape = std::make_shared<abstract::Shape>(ret_vec, min_shape_vec, max_shape_vec);
  363. AbstractBasePtr tensor = MakeAbstractTensor(ret_shape, ret_dtype);
  364. SetValueRange(tensor, output);
  365. return tensor;
  366. }
  367. static bool IsMonadType(const py::object &type_obj) {
  368. if (py::isinstance<Type>(type_obj)) {
  369. auto type = type_obj.cast<Type *>();
  370. return type->isa<MonadType>();
  371. }
  372. return false;
  373. }
  374. static AbstractBasePtr ToMonadAbstract(const py::object &type_obj) {
  375. if (py::isinstance<Type>(type_obj)) {
  376. auto type = type_obj.cast<Type *>();
  377. if (!type->isa<MonadType>()) {
  378. MS_LOG(EXCEPTION) << "Not a monad type object: " << py::str(type_obj);
  379. }
  380. return abstract::MakeMonadAbstract(type->cast<MonadTypePtr>());
  381. }
  382. MS_LOG(EXCEPTION) << "Not a type object: " << py::str(type_obj);
  383. }
  384. AbstractBasePtr MakePyInferRes2Abstract(const py::object &shape_obj, const py::object &type_obj,
  385. const py::object &output) {
  386. if ((py::isinstance<py::list>(shape_obj) || py::isinstance<py::tuple>(shape_obj)) && py::isinstance<Type>(type_obj)) {
  387. auto ret_vec = shape_obj.cast<ShapeVector>();
  388. auto ret_dtype = type_obj.cast<TypePtr>();
  389. MS_EXCEPTION_IF_NULL(ret_dtype);
  390. // if the size of shape list is empty, return an scalar abstract
  391. if (ret_vec.empty() && (!ret_dtype->isa<TensorType>())) {
  392. abstract::AbstractScalarPtr abs_scalar = std::make_shared<abstract::AbstractScalar>(kAnyValue, ret_dtype);
  393. return abs_scalar;
  394. }
  395. return MakePyInferRes2AbstractTensor(shape_obj, type_obj, output);
  396. } else if (py::isinstance<py::tuple>(shape_obj) && py::isinstance<py::tuple>(type_obj)) {
  397. auto shape_tuple = shape_obj.cast<py::tuple>();
  398. auto typeid_tuple = type_obj.cast<py::tuple>();
  399. AbstractBasePtrList ptr_list;
  400. for (size_t it = 0; it < shape_tuple.size(); ++it) {
  401. auto tensor_it = MakePyInferRes2Abstract(shape_tuple[it], typeid_tuple[it]);
  402. ptr_list.push_back(tensor_it);
  403. }
  404. auto tuple = std::make_shared<abstract::AbstractTuple>(ptr_list);
  405. return tuple;
  406. } else if (py::isinstance<py::list>(shape_obj) && py::isinstance<py::list>(type_obj)) {
  407. auto shape_list = shape_obj.cast<py::list>();
  408. auto typeid_list = type_obj.cast<py::list>();
  409. AbstractBasePtrList ptr_list;
  410. for (size_t it = 0; it < shape_list.size(); ++it) {
  411. auto tensor_it = MakePyInferRes2Abstract(shape_list[it], typeid_list[it]);
  412. ptr_list.push_back(tensor_it);
  413. }
  414. auto list = std::make_shared<abstract::AbstractList>(ptr_list);
  415. return list;
  416. } else if (shape_obj.is_none() && type_obj.is_none()) {
  417. // AbstractNone indicates there is no output for this CNode node.
  418. auto abstract_none = std::make_shared<abstract::AbstractNone>();
  419. return abstract_none;
  420. } else if (IsMonadType(type_obj)) {
  421. // Return monad abstract if it is monad type.
  422. return ToMonadAbstract(type_obj);
  423. } else {
  424. // When sparse enabled, the undetermined might be raised and eliminated in opt passes
  425. auto context = MsContext::GetInstance();
  426. MS_EXCEPTION_IF_NULL(context);
  427. bool enable_sparse = context->get_param<bool>(MS_CTX_ENABLE_SPARSE);
  428. if (enable_sparse) {
  429. return std::make_shared<abstract::AbstractUndetermined>();
  430. }
  431. MS_LOG(EXCEPTION) << "Python evaluator return invalid shape or type. " << (std::string)py::str(type_obj);
  432. }
  433. }
  434. bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple &args,
  435. const std::shared_ptr<py::object> &ret_val) {
  436. if (output->isa<ValueNode>()) {
  437. MS_LOG(INFO) << "Graph's output is a constant. No need to execute.";
  438. ValuePtr value = GetValueNode(output);
  439. *ret_val = ValuePtrToPyData(value);
  440. return true;
  441. }
  442. // Adapter will transform values in __init__() and construct() to parameters, this could cause
  443. // inputs (a.k.a args in current function) size less than parameters'.
  444. if (output->isa<Parameter>()) {
  445. MS_LOG(INFO) << "Graph's output is a parameter. If all params are inputs, no need to execute.";
  446. // Find the right parameter as ret_val.
  447. auto func_graph = output->func_graph();
  448. MS_EXCEPTION_IF_NULL(func_graph);
  449. auto params = func_graph->parameters();
  450. if ((args.size() + func_graph->hyper_param_count()) != params.size()) {
  451. MS_LOG(EXCEPTION) << "Input size " << args.size() << " add Parameter count " << func_graph->hyper_param_count()
  452. << " not equal to graph input size " << params.size() << ", let graph to be executed.";
  453. }
  454. auto it = std::find(params.begin(), params.end(), output);
  455. if (it == params.end()) {
  456. MS_EXCEPTION(UnknownError) << "When graph output is Parameter, it should be found in graph parameters";
  457. }
  458. size_t index = it - params.cbegin();
  459. if (index >= args.size() + func_graph->hyper_param_count()) {
  460. MS_EXCEPTION(UnknownError) << "Index " << index << " equal or larger than args size " << args.size()
  461. << " add Parameter count " << func_graph->hyper_param_count() << ".";
  462. }
  463. if (index < args.size()) {
  464. *ret_val = args[index];
  465. } else {
  466. auto param = dyn_cast<Parameter>(params[index]);
  467. MS_EXCEPTION_IF_NULL(param);
  468. if (!param->has_default()) {
  469. MS_LOG(EXCEPTION) << "Can not determine value of Parameter " << index << " (" << param->name() << ")";
  470. }
  471. auto tensor = param->default_param();
  472. *ret_val = py::cast(tensor);
  473. }
  474. return true;
  475. }
  476. return false;
  477. }
  478. } // namespace mindspore