You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

convert_utils_py.cc 19 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "utils/convert_utils_py.h"
  17. #include <vector>
  18. #include <string>
  19. #include <memory>
  20. #include <algorithm>
  21. #include <list>
  22. #include <utility>
  23. #include <cfloat>
  24. #include "abstract/abstract_value.h"
  25. #include "pipeline/jit/parse/parse.h"
  26. #include "pipeline/jit/parse/parse_base.h"
  27. #include "ir/value.h"
  28. #include "ir/tensor.h"
  29. #include "ir/param_info.h"
  30. #include "pybind_api/ir/base_ref_py.h"
  31. #include "utils/ms_context.h"
  32. namespace mindspore {
  33. py::object BuiltinsToPyData(const Any &value);
  34. py::object BuiltinsToPyData(const BaseRef &value);
  35. py::object VectorToPyData(const Any &value);
  36. py::object VectorRefToPyData(const VectorRef &value);
  37. py::object TensorToPyData(const tensor::TensorPtr &tensor) {
  38. MS_EXCEPTION_IF_NULL(tensor);
  39. if (tensor->NeedWait()) {
  40. py::gil_scoped_release release;
  41. tensor->Wait();
  42. }
  43. py::tuple v(1);
  44. v[0] = tensor;
  45. return v[0];
  46. }
  47. py::object ScalarPtrToPyData(const ScalarPtr &value) {
  48. py::int_ int_v;
  49. py::float_ float_v;
  50. py::bool_ bool_v;
  51. TypeId scalar_type = value->type()->type_id();
  52. switch (scalar_type) {
  53. case kNumberTypeUInt8:
  54. MS_LOG(DEBUG) << "uint8";
  55. int_v = value->cast<UInt8ImmPtr>()->value();
  56. return std::move(int_v);
  57. case kNumberTypeUInt16:
  58. MS_LOG(DEBUG) << "uint16";
  59. int_v = value->cast<UInt16ImmPtr>()->value();
  60. return std::move(int_v);
  61. case kNumberTypeUInt32:
  62. MS_LOG(DEBUG) << "uint32";
  63. int_v = value->cast<UInt32ImmPtr>()->value();
  64. return std::move(int_v);
  65. case kNumberTypeUInt64:
  66. MS_LOG(DEBUG) << "uint64";
  67. int_v = value->cast<UInt64ImmPtr>()->value();
  68. return std::move(int_v);
  69. case kNumberTypeInt8:
  70. MS_LOG(DEBUG) << "int8";
  71. int_v = value->cast<Int8ImmPtr>()->value();
  72. return std::move(int_v);
  73. case kNumberTypeInt16:
  74. MS_LOG(DEBUG) << "int16";
  75. int_v = value->cast<Int16ImmPtr>()->value();
  76. return std::move(int_v);
  77. case kNumberTypeInt32:
  78. MS_LOG(DEBUG) << "int32";
  79. int_v = value->cast<Int32ImmPtr>()->value();
  80. return std::move(int_v);
  81. case kNumberTypeInt64:
  82. MS_LOG(DEBUG) << "int64";
  83. int_v = value->cast<Int64ImmPtr>()->value();
  84. return std::move(int_v);
  85. case kNumberTypeFloat32:
  86. MS_LOG(DEBUG) << "float";
  87. float_v = value->cast<FP32ImmPtr>()->value();
  88. return std::move(float_v);
  89. case kNumberTypeFloat64:
  90. MS_LOG(DEBUG) << "double";
  91. float_v = value->cast<FP64ImmPtr>()->value();
  92. return std::move(float_v);
  93. case kNumberTypeBool:
  94. MS_LOG(DEBUG) << "bool";
  95. bool_v = value->cast<BoolImmPtr>()->value();
  96. return std::move(bool_v);
  97. default:
  98. MS_EXCEPTION(TypeError) << "Unsupported scalar converted to py data: " << value->ToString();
  99. }
  100. }
  101. py::object ValuePtrToPyData(const ValuePtr &value) {
  102. if (value == nullptr) {
  103. MS_LOG(EXCEPTION) << "value is null";
  104. }
  105. py::object ret;
  106. if (value->isa<Scalar>()) {
  107. ret = ScalarPtrToPyData(value->cast<ScalarPtr>());
  108. } else if (value->isa<StringImm>()) {
  109. MS_LOG(DEBUG) << "String";
  110. py::str v = value->cast<StringImmPtr>()->value();
  111. ret = v;
  112. } else if (value->isa<tensor::Tensor>()) {
  113. MS_LOG(DEBUG) << "tensor";
  114. auto tensor_ptr = value->cast<tensor::TensorPtr>();
  115. ret = TensorToPyData(tensor_ptr);
  116. } else if (value->isa<tensor::MetaTensor>()) {
  117. MS_LOG(DEBUG) << "MetaTensor";
  118. py::tuple v(1);
  119. v[0] = value->cast<tensor::MetaTensorPtr>();
  120. ret = v[0];
  121. } else if (value->isa<RefKey>()) {
  122. MS_LOG(DEBUG) << "RefKey";
  123. py::tuple v(1);
  124. v[0] = value->cast<RefKeyPtr>();
  125. ret = v[0];
  126. } else if (value->isa<ValueSequeue>()) {
  127. MS_LOG(DEBUG) << "tuple or list";
  128. auto value_sequeue = value->cast<ValueSequeuePtr>()->value();
  129. py::tuple ret_sequeue(value_sequeue.size());
  130. for (size_t i = 0; i < value_sequeue.size(); i++) {
  131. ret_sequeue[i] = ValuePtrToPyData(value_sequeue[i]);
  132. }
  133. if (value->isa<ValueTuple>()) {
  134. ret = ret_sequeue;
  135. } else {
  136. ret = ret_sequeue.cast<py::list>();
  137. }
  138. } else if (value->isa<ValueDictionary>()) {
  139. MS_LOG(DEBUG) << "dict";
  140. auto value_list = value->cast<ValueDictionaryPtr>()->value();
  141. py::dict ret_dict;
  142. for (const auto &v : value_list) {
  143. ret_dict[py::str(v.first)] = ValuePtrToPyData(v.second);
  144. }
  145. ret = ret_dict;
  146. } else if (value->isa<Ellipsis>()) {
  147. ret = py::ellipsis();
  148. } else if (value->isa<ValueSlice>()) {
  149. auto slice = value->cast<ValueSlicePtr>();
  150. auto start = ValuePtrToPyData(slice->start());
  151. auto end = ValuePtrToPyData(slice->stop());
  152. auto step = ValuePtrToPyData(slice->step());
  153. ret = parse::python_adapter::CallPyFn(parse::PYTHON_MOD_PARSE_MODULE, parse::PYTHON_PARSE_CLASS_SLICE, start, end,
  154. step);
  155. } else if (value->isa<Type>()) {
  156. py::tuple v(1);
  157. v[0] = value->cast<TypePtr>();
  158. ret = v[0];
  159. } else if (value->isa<AnyValue>() || value->isa<None>() || value->isa<Monad>() || value->isa<FuncGraph>()) {
  160. // FuncGraph is not used in the backend, return None
  161. ret = py::none();
  162. } else if (value->isa<KeywordArg>()) {
  163. auto abs_keyword_arg = value->ToAbstract()->cast<abstract::AbstractKeywordArgPtr>();
  164. auto key = abs_keyword_arg->get_key();
  165. auto val = abs_keyword_arg->get_arg()->BuildValue();
  166. auto py_value = ValuePtrToPyData(val);
  167. auto kwargs = py::kwargs();
  168. kwargs[key.c_str()] = py_value;
  169. ret = kwargs;
  170. } else {
  171. MS_LOG(EXCEPTION) << "Unsupported convert value: " << value->ToString() << " to a PyData.";
  172. }
  173. return ret;
  174. }
  175. py::object AnyToPyData(const Any &value) {
  176. py::object ret;
  177. MS_LOG(DEBUG) << "AnyToPyData " << value.GetString();
  178. if (value.is<int>() || value.is<float>() || value.is<double>() || value.is<bool>()) {
  179. ret = BuiltinsToPyData(value);
  180. } else if (value.is<ValuePtr>()) {
  181. MS_LOG(DEBUG) << "ValuePtr";
  182. ValuePtr v = value.cast<ValuePtr>();
  183. ret = ValuePtrToPyData(v);
  184. } else if (value.is<tensor::TensorPtr>()) {
  185. MS_LOG(DEBUG) << "tensor";
  186. auto tensor_ptr = value.cast<tensor::TensorPtr>();
  187. ret = TensorToPyData(tensor_ptr);
  188. } else if (value.is<py::object>()) {
  189. MS_LOG(DEBUG) << "py obj";
  190. ret = value.cast<py::object>();
  191. } else if (value.is<std::vector<tensor::TensorPtr>>() || value.is<std::vector<Any>>()) {
  192. ret = VectorToPyData(value);
  193. } else if (value.is<std::list<Any>>()) {
  194. MS_LOG(DEBUG) << "list_any";
  195. auto value_list = value.cast<std::list<Any>>();
  196. py::list rets = py::list();
  197. for (auto &v : value_list) {
  198. rets.append(AnyToPyData(v));
  199. }
  200. ret = rets;
  201. } else if (value.is<std::vector<Any>>()) {
  202. auto value_list = value.cast<std::vector<Any>>();
  203. py::tuple rets(value_list.size());
  204. for (size_t i = 0; i < value_list.size(); i++) {
  205. rets[i] = AnyToPyData(value_list[i]);
  206. }
  207. ret = rets;
  208. } else if (value.is<TypePtr>()) {
  209. py::tuple v(1);
  210. v[0] = value.cast<TypePtr>();
  211. ret = v[0];
  212. } else {
  213. MS_LOG(EXCEPTION) << "value is not support type";
  214. }
  215. return ret;
  216. }
  217. py::object BaseRefToPyData(const BaseRef &value) {
  218. py::object ret;
  219. MS_LOG(DEBUG) << "BaseRefToPyData " << value.ToString();
  220. if (utils::isa<int>(value) || utils::isa<float>(value) || utils::isa<double>(value) || utils::isa<bool>(value)) {
  221. ret = BuiltinsToPyData(value);
  222. } else if (utils::isa<ValuePtr>(value)) {
  223. MS_LOG(DEBUG) << "ValuePtr";
  224. ValuePtr v = utils::cast<ValuePtr>(value);
  225. ret = ValuePtrToPyData(v);
  226. } else if (utils::isa<tensor::TensorPtr>(value)) {
  227. MS_LOG(DEBUG) << "tensor";
  228. auto tensor_ptr = utils::cast<tensor::TensorPtr>(value);
  229. ret = TensorToPyData(tensor_ptr);
  230. } else if (utils::isa<PyObjectRef>(value)) {
  231. MS_LOG(DEBUG) << "py obj";
  232. PyObjectRef py_ref = utils::cast<PyObjectRef>(value);
  233. ret = py_ref.object_;
  234. } else if (utils::isa<VectorRef>(value)) {
  235. auto vec_ref = utils::cast<VectorRef>(value);
  236. ret = VectorRefToPyData(vec_ref);
  237. } else if (utils::isa<TypePtr>(value)) {
  238. py::tuple v(1);
  239. v[0] = utils::cast<TypePtr>(value);
  240. ret = v[0];
  241. } else {
  242. MS_LOG(EXCEPTION) << "value is not support type";
  243. }
  244. return ret;
  245. }
  246. py::object BuiltinsToPyData(const Any &value) {
  247. if (value.is<int>()) {
  248. MS_LOG(DEBUG) << "int";
  249. py::int_ ret = value.cast<int>();
  250. return std::move(ret);
  251. } else if (value.is<float>()) {
  252. MS_LOG(DEBUG) << "float";
  253. py::float_ ret = value.cast<float>();
  254. return std::move(ret);
  255. } else if (value.is<double>()) {
  256. MS_LOG(DEBUG) << "double";
  257. py::float_ ret = value.cast<double>();
  258. return std::move(ret);
  259. } else {
  260. MS_LOG(DEBUG) << "bool";
  261. py::bool_ ret = value.cast<bool>();
  262. return std::move(ret);
  263. }
  264. }
  265. py::object BuiltinsToPyData(const BaseRef &value) {
  266. if (utils::isa<int>(value)) {
  267. MS_LOG(DEBUG) << "int";
  268. py::int_ ret = utils::cast<int>(value);
  269. return std::move(ret);
  270. } else if (utils::isa<float>(value)) {
  271. MS_LOG(DEBUG) << "float";
  272. py::float_ ret = utils::cast<float>(value);
  273. return std::move(ret);
  274. } else if (utils::isa<double>(value)) {
  275. MS_LOG(DEBUG) << "double";
  276. py::float_ ret = utils::cast<double>(value);
  277. return std::move(ret);
  278. } else {
  279. MS_LOG(DEBUG) << "bool";
  280. py::bool_ ret = utils::cast<bool>(value);
  281. return std::move(ret);
  282. }
  283. }
  284. py::object VectorToPyData(const Any &value) {
  285. py::object ret;
  286. if (value.is<std::vector<tensor::TensorPtr>>()) {
  287. MS_LOG(DEBUG) << "vector_tensor";
  288. std::vector<tensor::TensorPtr> outputs;
  289. outputs = value.cast<std::vector<tensor::TensorPtr>>();
  290. py::tuple tensor_tuple(outputs.size());
  291. for (std::size_t i = 0; i < outputs.size(); ++i) {
  292. tensor_tuple[i] = *outputs[i];
  293. }
  294. ret = tensor_tuple;
  295. } else {
  296. MS_LOG(DEBUG) << "vector_any";
  297. auto value_list = value.cast<std::vector<Any>>();
  298. py::tuple any_tuple = py::tuple(value_list.size());
  299. size_t i = 0;
  300. for (auto &v : value_list) {
  301. any_tuple[i] = AnyToPyData(v);
  302. i++;
  303. }
  304. ret = any_tuple;
  305. }
  306. return ret;
  307. }
  308. py::object VectorRefToPyData(const VectorRef &value_list) {
  309. py::object ret;
  310. MS_LOG(DEBUG) << "vector_ref";
  311. size_t value_size = value_list.size();
  312. auto ref_tuple = py::tuple(value_size);
  313. for (size_t i = 0; i < value_size; i++) {
  314. ref_tuple[i] = BaseRefToPyData(value_list[i]);
  315. }
  316. ret = ref_tuple;
  317. return ret;
  318. }
  319. void SetValueRange(const AbstractBasePtr &tensor, const py::object &output) {
  320. if (output.is_none()) {
  321. return;
  322. }
  323. py::object obj_min =
  324. output.contains(py::str(ATTR_MIN_VALUE)) ? (py::object)output[ATTR_MIN_VALUE] : (py::object)py::none();
  325. py::object obj_max =
  326. output.contains(py::str(ATTR_MAX_VALUE)) ? (py::object)output[ATTR_MAX_VALUE] : (py::object)py::none();
  327. if (!obj_min.is_none() && !obj_max.is_none()) {
  328. bool converted = true;
  329. ValuePtr min_value = nullptr;
  330. ValuePtr max_value = nullptr;
  331. converted = parse::ConvertData(obj_min, &min_value);
  332. if (!converted) {
  333. MS_LOG(EXCEPTION) << "Convert shape min value data failed";
  334. }
  335. converted = parse::ConvertData(obj_max, &max_value);
  336. if (!converted) {
  337. MS_LOG(EXCEPTION) << "Convert shape max value data failed";
  338. }
  339. auto abs_tensor = dyn_cast<abstract::AbstractTensor>(tensor);
  340. abs_tensor->set_value_range(min_value, max_value);
  341. }
  342. }
  343. AbstractBasePtr PyList2DynamicShapeTensor(const py::object &shape_obj, const py::object &type_obj,
  344. const py::object &output) {
  345. AbstractBasePtr tensor = nullptr;
  346. auto ret_vec = shape_obj.cast<ShapeVector>();
  347. auto ret_dtype = type_obj.cast<TypePtr>();
  348. ShapeVector min_shape_vec;
  349. ShapeVector max_shape_vec;
  350. if (!output.is_none()) {
  351. py::object min_shape =
  352. output.contains(py::str(ATTR_MIN_SHAPE)) ? (py::object)output[ATTR_MIN_SHAPE] : (py::object)py::none();
  353. py::object max_shape =
  354. output.contains(py::str(ATTR_MAX_SHAPE)) ? (py::object)output[ATTR_MAX_SHAPE] : (py::object)py::none();
  355. if (!min_shape.is_none()) {
  356. min_shape_vec = min_shape.cast<ShapeVector>();
  357. }
  358. if (!max_shape.is_none()) {
  359. max_shape_vec = max_shape.cast<ShapeVector>();
  360. }
  361. }
  362. auto ret_shape = std::make_shared<abstract::Shape>(ret_vec, min_shape_vec, max_shape_vec);
  363. if (ret_dtype->isa<TensorType>()) {
  364. auto tensor_type = type_obj.cast<TensorTypePtr>();
  365. MS_EXCEPTION_IF_NULL(tensor_type);
  366. auto element = std::make_shared<abstract::AbstractScalar>(kAnyValue, tensor_type->element());
  367. tensor = std::make_shared<abstract::AbstractTensor>(element, ret_shape);
  368. } else {
  369. auto element = std::make_shared<abstract::AbstractScalar>(kAnyValue, ret_dtype);
  370. tensor = std::make_shared<abstract::AbstractTensor>(element, ret_shape);
  371. }
  372. SetValueRange(tensor, output);
  373. return tensor;
  374. }
  375. static bool IsMonadType(const py::object &type_obj) {
  376. if (py::isinstance<Type>(type_obj)) {
  377. auto type = type_obj.cast<Type *>();
  378. return type->isa<MonadType>();
  379. }
  380. return false;
  381. }
  382. static AbstractBasePtr ToMonadAbstract(const py::object &type_obj) {
  383. if (py::isinstance<Type>(type_obj)) {
  384. auto type = type_obj.cast<Type *>();
  385. if (type->isa<UMonadType>()) {
  386. return kUMonad->ToAbstract();
  387. }
  388. if (type->isa<IOMonadType>()) {
  389. return kIOMonad->ToAbstract();
  390. }
  391. }
  392. MS_LOG(EXCEPTION) << "Not a monad type object: " << py::str(type_obj);
  393. }
  394. AbstractBasePtr PyListDtype2AbstractTensor(const py::object &shape_obj, const py::object &type_obj,
  395. const py::object &output) {
  396. if ((py::isinstance<py::list>(shape_obj) || py::isinstance<py::tuple>(shape_obj)) && py::isinstance<Type>(type_obj)) {
  397. auto ret_vec = shape_obj.cast<ShapeVector>();
  398. auto ret_dtype = type_obj.cast<TypePtr>();
  399. MS_EXCEPTION_IF_NULL(ret_dtype);
  400. // if the size of shape list is empty, return an scalar abstract
  401. if (ret_vec.empty() && (!ret_dtype->isa<TensorType>())) {
  402. abstract::AbstractScalarPtr abs_scalar = std::make_shared<abstract::AbstractScalar>(kAnyValue, ret_dtype);
  403. return abs_scalar;
  404. }
  405. return PyList2DynamicShapeTensor(shape_obj, type_obj, output);
  406. } else if (py::isinstance<py::tuple>(shape_obj) && py::isinstance<py::tuple>(type_obj)) {
  407. auto shape_tuple = shape_obj.cast<py::tuple>();
  408. auto typeid_tuple = type_obj.cast<py::tuple>();
  409. AbstractBasePtrList ptr_list;
  410. for (size_t it = 0; it < shape_tuple.size(); ++it) {
  411. auto tensor_it = PyListDtype2AbstractTensor(shape_tuple[it], typeid_tuple[it]);
  412. ptr_list.push_back(tensor_it);
  413. }
  414. auto tuple = std::make_shared<abstract::AbstractTuple>(ptr_list);
  415. return tuple;
  416. } else if (py::isinstance<py::list>(shape_obj) && py::isinstance<py::list>(type_obj)) {
  417. auto shape_list = shape_obj.cast<py::list>();
  418. auto typeid_list = type_obj.cast<py::list>();
  419. AbstractBasePtrList ptr_list;
  420. for (size_t it = 0; it < shape_list.size(); ++it) {
  421. auto tensor_it = PyListDtype2AbstractTensor(shape_list[it], typeid_list[it]);
  422. ptr_list.push_back(tensor_it);
  423. }
  424. auto list = std::make_shared<abstract::AbstractList>(ptr_list);
  425. return list;
  426. } else if (shape_obj.is_none() && type_obj.is_none()) {
  427. // AbstractNone indicates there is no output for this CNode node.
  428. auto abstract_none = std::make_shared<abstract::AbstractNone>();
  429. return abstract_none;
  430. } else if (IsMonadType(type_obj)) {
  431. // Return monad abstract if it is monad type.
  432. return ToMonadAbstract(type_obj);
  433. } else {
  434. // When sparse enabled, the undetermined might be raised and eliminated in opt passes
  435. auto context = MsContext::GetInstance();
  436. MS_EXCEPTION_IF_NULL(context);
  437. bool enable_sparse = context->get_param<bool>(MS_CTX_ENABLE_SPARSE);
  438. if (enable_sparse) {
  439. return std::make_shared<abstract::AbstractUndetermined>();
  440. }
  441. MS_LOG(EXCEPTION) << "Python evaluator return invalid shape or type. " << (std::string)py::str(type_obj);
  442. }
  443. }
  444. bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple &args,
  445. const std::shared_ptr<py::object> &ret_val) {
  446. if (output->isa<ValueNode>()) {
  447. MS_LOG(INFO) << "Graph's output is a constant. No need to execute.";
  448. ValuePtr value = GetValueNode(output);
  449. *ret_val = ValuePtrToPyData(value);
  450. return true;
  451. }
  452. // Adapter will transform values in __init__() and construct() to parameters, this could cause
  453. // inputs (a.k.a args in current function) size less than parameters'.
  454. if (output->isa<Parameter>()) {
  455. MS_LOG(INFO) << "Graph's output is a parameter. If all params are inputs, no need to execute.";
  456. // Find the right parameter as ret_val.
  457. auto func_graph = output->func_graph();
  458. MS_EXCEPTION_IF_NULL(func_graph);
  459. auto params = func_graph->parameters();
  460. if ((args.size() + func_graph->hyper_param_count()) != params.size()) {
  461. MS_LOG(EXCEPTION) << "Input size " << args.size() << " add Parameter count " << func_graph->hyper_param_count()
  462. << " not equal to graph input size " << params.size() << ", let graph to be executed.";
  463. }
  464. auto it = std::find(params.begin(), params.end(), output);
  465. if (it == params.end()) {
  466. MS_EXCEPTION(UnknownError) << "When graph output is Parameter, it should be found in graph parameters";
  467. }
  468. size_t index = it - params.cbegin();
  469. if (index >= args.size() + func_graph->hyper_param_count()) {
  470. MS_EXCEPTION(UnknownError) << "Index " << index << " equal or larger than args size " << args.size()
  471. << " add Parameter count " << func_graph->hyper_param_count() << ".";
  472. }
  473. if (index < args.size()) {
  474. *ret_val = args[index];
  475. } else {
  476. auto param = dyn_cast<Parameter>(params[index]);
  477. MS_EXCEPTION_IF_NULL(param);
  478. if (!param->has_default()) {
  479. MS_LOG(EXCEPTION) << "Can not determine value of Parameter " << index << " (" << param->name() << ")";
  480. }
  481. auto tensor = param->default_param();
  482. *ret_val = py::cast(tensor);
  483. }
  484. return true;
  485. }
  486. return false;
  487. }
  488. } // namespace mindspore