You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

base.py 25 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610
  1. # This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
  2. #
  3. # Copyright 2020 Huawei Technologies Co., Ltd
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. # ============================================================================
  17. """Basic composite operations."""
  18. from functools import partial
  19. from types import FunctionType
  20. from mindspore import context
  21. from ..._c_expression import EnvInstance_, GradOperation_, HyperMap_, Map_, MultitypeFuncGraph_, Tail_, \
  22. TupleAdd_, TupleSlice_, UnpackCall_, ZipOperation_, ListAppend_, TupleGetItemTensor_
  23. from ...common import dtype as mstype
  24. from ...common.api import ms_function, _pynative_exec, _wrap_func
  25. from .. import functional as F
  26. from ...common.tensor import Tensor
  27. from .. import signature as sig
  28. __all__ = [EnvInstance_, TupleAdd_, TupleSlice_, UnpackCall_, TupleGetItemTensor_]
  29. def add_flags(fn=None, **flags):
  30. """
  31. A decorator that adds a flag to the function.
  32. Note:
  33. Only supports bool value.
  34. Args:
  35. fn (Function): Function or cell to add flag. Default: None.
  36. flags (dict): Flags use kwargs. Default: None.
  37. Returns:
  38. Function, the function with added flags.
  39. Examples:
  40. >>> add_flags(net, predit=True)
  41. """
  42. def deco(fn):
  43. # need set the attr and access on c++
  44. if not hasattr(fn, "_mindspore_flags"):
  45. fn._mindspore_flags = {}
  46. fn._mindspore_flags.update({**flags})
  47. return fn
  48. ret = deco
  49. if fn is not None:
  50. ret = deco(fn)
  51. return ret
  52. def core(fn=None, **flags):
  53. """
  54. A decorator that adds a flag to the function.
  55. By default, the function is marked as True, enabling to use this decorator to
  56. set flag to a graph.
  57. Args:
  58. fn (Function): Function to add flag. Default: None.
  59. flags (dict): The following flags can be set core, which indicates that this is a core function or
  60. other flag. Default: None.
  61. """
  62. # need set the attr and access on c++
  63. def deco(fn):
  64. fn._mindspore_flags = {
  65. 'core': True,
  66. **flags,
  67. }
  68. return fn
  69. if fn is not None:
  70. ret = deco(fn)
  71. else:
  72. ret = deco
  73. return ret
  74. class GradOperation(GradOperation_):
  75. """
  76. A higher-order function which is used to generate the gradient function for the input function.
  77. The gradient function generated by `GradOperation` higher-order function can be customized by
  78. construction arguments.
  79. Given an input function `net = Net()` that takes `x` and `y` as inputs, and has a parameter `z`,
  80. see `Net` in Examples.
  81. To generate a gradient function that returns gradients with respect to the first input
  82. (see `GradNetWrtX` in Examples).
  83. 1. Construct a `GradOperation` higher-order function with default arguments:
  84. `grad_op = GradOperation()`.
  85. 2. Call it with input function as argument to get the gradient function: `gradient_function = grad_op(net)`.
  86. 3. Call the gradient function with input function's inputs to get the gradients with respect to the first input:
  87. `grad_op(net)(x, y)`.
  88. To generate a gradient function that returns gradients with respect to all inputs (see `GradNetWrtXY` in Examples).
  89. 1. Construct a `GradOperation` higher-order function with `get_all=True` which
  90. indicates getting gradients with respect to all inputs, they are `x` and `y` in example function `Net()`:
  91. `grad_op = GradOperation(get_all=True)`.
  92. 2. Call it with input function as argument to get the gradient function: `gradient_function = grad_op(net)`.
  93. 3. Call the gradient function with input function's inputs to get the gradients with respect to all inputs:
  94. `gradient_function(x, y)`.
  95. To generate a gradient function that returns gradients with respect to given parameters
  96. (see `GradNetWithWrtParams` in Examples).
  97. 1. Construct a `GradOperation` higher-order function with `get_by_list=True`:
  98. `grad_op = GradOperation(get_by_list=True)`.
  99. 2. Construct a `ParameterTuple` that will be passed to the input function when constructing
  100. `GradOperation` higher-order function, it will be used as a parameter filter that determine
  101. which gradient to return: `params = ParameterTuple(net.trainable_params())`.
  102. 3. Call it with input function and `params` as arguments to get the gradient function:
  103. `gradient_function = grad_op(net, params)`.
  104. 4. Call the gradient function with input function's inputs to get the gradients with
  105. respect to given parameters: `gradient_function(x, y)`.
  106. To generate a gradient function that returns gradients with respect to all inputs and given parameters
  107. in the format of ((dx, dy), (dz))(see `GradNetWrtInputsAndParams` in Examples).
  108. 1. Construct a `GradOperation` higher-order function with `get_all=True` and `get_by_list=True`:
  109. `grad_op = GradOperation(get_all=True, get_by_list=True)`.
  110. 2. Construct a `ParameterTuple` that will be passed along input function when constructing
  111. `GradOperation` higher-order function: `params = ParameterTuple(net.trainable_params())`.
  112. 3. Call it with input function and `params` as arguments to get the gradient function:
  113. `gradient_function = grad_op(net, params)`.
  114. 4. Call the gradient function with input function's inputs
  115. to get the gradients with respect to all inputs and given parameters: `gradient_function(x, y)`.
  116. We can configure the sensitivity(gradient with respect to output) by setting `sens_param` as True and
  117. passing an extra sensitivity input to the gradient function, the sensitivity input should has the
  118. same shape and type with input function's output(see `GradNetWrtXYWithSensParam` in Examples).
  119. 1. Construct a `GradOperation` higher-order function with `get_all=True` and `sens_param=True`:
  120. `grad_op = GradOperation(get_all=True, sens_param=True)`.
  121. 2. Define `grad_wrt_output` as `sens_param` which works as the gradient with respect to output:
  122. `grad_wrt_output = Tensor(np.ones([2, 2]).astype(np.float32))`.
  123. 3. Call it with input function as argument to get the gradient function:
  124. `gradient_function = grad_op(net)`.
  125. 4. Call the gradient function with input function's inputs and `sens_param` to
  126. get the gradients with respect to all inputs:
  127. `gradient_function(x, y, grad_wrt_output)`.
  128. Args:
  129. get_all (bool): If True, get all the gradients with respect to inputs. Default: False.
  130. get_by_list (bool): If True, get all the gradients with respect to Parameter variables.
  131. If get_all and get_by_list are both False, get the gradient with respect to first input.
  132. If get_all and get_by_list are both True, get the gradients with respect to inputs and Parameter variables
  133. at the same time in the form of ((gradients with respect to inputs),
  134. (gradients with respect to parameters)). Default: False.
  135. sens_param (bool): Whether to append sensitivity (gradient with respect to output) as input.
  136. If sens_param is False, a 'ones_like(outputs)' sensitivity will be attached automatically.
  137. Default: False.
  138. Returns:
  139. The higher-order function which takes a function as argument and returns gradient function for it.
  140. Examples:
  141. >>> class Net(nn.Cell):
  142. >>> def __init__(self):
  143. >>> super(Net, self).__init__()
  144. >>> self.matmul = P.MatMul()
  145. >>> self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z')
  146. >>> def construct(self, x, y):
  147. >>> x = x * self.z
  148. >>> out = self.matmul(x, y)
  149. >>> return out
  150. >>>
  151. >>> class GradNetWrtX(nn.Cell):
  152. >>> def __init__(self, net):
  153. >>> super(GradNetWrtX, self).__init__()
  154. >>> self.net = net
  155. >>> self.grad_op = GradOperation()
  156. >>> def construct(self, x, y):
  157. >>> gradient_function = self.grad_op(self.net)
  158. >>> return gradient_function(x, y)
  159. >>>
  160. >>> x = Tensor([[0.5, 0.6, 0.4], [1.2, 1.3, 1.1]], dtype=mstype.float32)
  161. >>> y = Tensor([[0.01, 0.3, 1.1], [0.1, 0.2, 1.3], [2.1, 1.2, 3.3]], dtype=mstype.float32)
  162. >>> GradNetWrtX(Net())(x, y)
  163. Tensor(shape=[2, 3], dtype=Float32,
  164. [[1.4100001 1.5999999 6.6 ]
  165. [1.4100001 1.5999999 6.6 ]])
  166. >>>
  167. >>> class GradNetWrtXY(nn.Cell):
  168. >>> def __init__(self, net):
  169. >>> super(GradNetWrtXY, self).__init__()
  170. >>> self.net = net
  171. >>> self.grad_op = GradOperation(get_all=True)
  172. >>> def construct(self, x, y):
  173. >>> gradient_function = self.grad_op(self.net)
  174. >>> return gradient_function(x, y)
  175. >>>
  176. >>> x = Tensor([[0.8, 0.6, 0.2], [1.8, 1.3, 1.1]], dtype=mstype.float32)
  177. >>> y = Tensor([[0.11, 3.3, 1.1], [1.1, 0.2, 1.4], [1.1, 2.2, 0.3]], dtype=mstype.float32)
  178. >>> GradNetWrtXY(Net())(x, y)
  179. (Tensor(shape=[2, 3], dtype=Float32,
  180. [[4.5099998 2.7 3.6000001]
  181. [4.5099998 2.7 3.6000001]]), Tensor(shape=[3, 3], dtype=Float32,
  182. [[2.6 2.6 2.6 ]
  183. [1.9 1.9 1.9 ]
  184. [1.3000001 1.3000001 1.3000001]]))
  185. >>>
  186. >>> class GradNetWrtXYWithSensParam(nn.Cell):
  187. >>> def __init__(self, net):
  188. >>> super(GradNetWrtXYWithSensParam, self).__init__()
  189. >>> self.net = net
  190. >>> self.grad_op = GradOperation(get_all=True, sens_param=True)
  191. >>> self.grad_wrt_output = Tensor([[0.1, 0.6, 0.2], [0.8, 1.3, 1.1]], dtype=mstype.float32)
  192. >>> def construct(self, x, y):
  193. >>> gradient_function = self.grad_op(self.net)
  194. >>> return gradient_function(x, y, self.grad_wrt_output)
  195. >>>
  196. >>> x = Tensor([[0.8, 0.6, 0.2], [1.8, 1.3, 1.1]], dtype=mstype.float32)
  197. >>> y = Tensor([[0.11, 3.3, 1.1], [1.1, 0.2, 1.4], [1.1, 2.2, 0.3]], dtype=mstype.float32)
  198. >>> GradNetWrtXYWithSensParam(Net())(x, y)
  199. (Tensor(shape=[2, 3], dtype=Float32,
  200. [[2.211 0.51 1.4900001]
  201. [5.588 2.68 4.07 ]]), Tensor(shape=[3, 3], dtype=Float32,
  202. [[1.52 2.82 2.14 ]
  203. [1.1 2.05 1.55 ]
  204. [0.90000004 1.55 1.25 ]]))
  205. >>>
  206. >>> class GradNetWithWrtParams(nn.Cell):
  207. >>> def __init__(self, net):
  208. >>> super(GradNetWithWrtParams, self).__init__()
  209. >>> self.net = net
  210. >>> self.params = ParameterTuple(net.trainable_params())
  211. >>> self.grad_op = GradOperation(get_by_list=True)
  212. >>> def construct(self, x, y):
  213. >>> gradient_function = self.grad_op(self.net, self.params)
  214. >>> return gradient_function(x, y)
  215. >>>
  216. >>> x = Tensor([[0.8, 0.6, 0.2], [1.8, 1.3, 1.1]], dtype=mstype.float32)
  217. >>> y = Tensor([[0.11, 3.3, 1.1], [1.1, 0.2, 1.4], [1.1, 2.2, 0.3]], dtype=mstype.float32)
  218. >>> GradNetWithWrtParams(Net())(x, y)
  219. (Tensor(shape=[1], dtype=Float32, [21.536]),)
  220. >>>
  221. >>> class GradNetWrtInputsAndParams(nn.Cell):
  222. >>> def __init__(self, net):
  223. >>> super(GradNetWrtInputsAndParams, self).__init__()
  224. >>> self.net = net
  225. >>> self.params = ParameterTuple(net.trainable_params())
  226. >>> self.grad_op = GradOperation(get_all=True, get_by_list=True)
  227. >>> def construct(self, x, y):
  228. >>> gradient_function = self.grad_op(self.net, self.params)
  229. >>> return gradient_function(x, y)
  230. >>>
  231. >>> x = Tensor([[0.1, 0.6, 1.2], [0.5, 1.3, 0.1]], dtype=mstype.float32)
  232. >>> y = Tensor([[0.12, 2.3, 1.1], [1.3, 0.2, 2.4], [0.1, 2.2, 0.3]], dtype=mstype.float32)
  233. >>> GradNetWrtInputsAndParams(Net())(x, y)
  234. ((Tensor(shape=[2, 3], dtype=Float32,
  235. [[3.52 3.9 2.6 ]
  236. [3.52 3.9 2.6 ]]), Tensor(shape=[3, 3], dtype=Float32,
  237. [[0.6 0.6 0.6 ]
  238. [1.9 1.9 1.9 ]
  239. [1.3000001 1.3000001 1.3000001]])), (Tensor(shape=[1], dtype=Float32, [12.902]),))
  240. """
  241. def __init__(self, get_all=False, get_by_list=False, sens_param=False):
  242. if not isinstance(get_all, bool):
  243. raise TypeError(f'get_all should be bool, but got {type(get_all)}')
  244. if not isinstance(get_by_list, bool):
  245. raise TypeError(f'get_by_list should be bool, but got {type(get_by_list)}')
  246. if not isinstance(sens_param, bool):
  247. raise TypeError(f'sens_param should be bool, but got {type(sens_param)}')
  248. self.get_all = get_all
  249. self.get_by_list = get_by_list
  250. self.sens_param = sens_param
  251. GradOperation_.__init__(self, 'grad', get_all, get_by_list, sens_param)
  252. self.grad_fn = None
  253. self.fn = None
  254. self.need_forward = False
  255. def _pynative_forward_run(self, args, kwargs, fn):
  256. """ Pynative forward run to build grad graph. """
  257. if self.sens_param:
  258. args = args[:-1]
  259. for arg in args:
  260. if not isinstance(arg, Tensor):
  261. raise TypeError("grad inputs should be tensor in pynative mode")
  262. if isinstance(fn, FunctionType):
  263. _pynative_exec.set_grad_flag(True)
  264. _pynative_exec.new_graph(fn, *args, **kwargs)
  265. output = fn(*args, **kwargs)
  266. _pynative_exec.end_graph(fn, output, *args, **kwargs)
  267. else:
  268. if fn.already_run and not fn.requires_grad:
  269. raise ValueError("obj must set_grad.")
  270. if not fn.already_run:
  271. self.need_forward = True
  272. if self.need_forward:
  273. fn.set_grad()
  274. fn(*args, **kwargs)
  275. fn.already_run = False
  276. def __call__(self, fn, weights=None):
  277. grad_ = GradOperation(self.get_all, self.get_by_list, self.sens_param)
  278. if self.grad_fn is None or self.fn != fn:
  279. if context.get_context("mode") == context.GRAPH_MODE:
  280. if self.get_by_list:
  281. @ms_function(obj=fn)
  282. def after_grad(*args):
  283. return grad_(fn, weights)(*args)
  284. else:
  285. @ms_function(obj=fn)
  286. def after_grad(*args):
  287. return grad_(fn)(*args)
  288. else:
  289. @_wrap_func
  290. def after_grad(*args, **kwargs):
  291. self._pynative_forward_run(args, kwargs, fn)
  292. _pynative_exec.grad(grad_, fn, weights, *args, **kwargs)
  293. out = _pynative_exec(*args, **kwargs)
  294. _pynative_exec.clear()
  295. return out
  296. self.grad_fn = after_grad
  297. self.fn = fn
  298. return self.grad_fn
  299. class MultitypeFuncGraph(MultitypeFuncGraph_):
  300. """
  301. Generate overloaded functions.
  302. MultitypeFuncGraph is a class used to generate overloaded functions, considering different types as inputs.
  303. Initialize an `MultitypeFuncGraph` object with name, and use `register` with input types as the decorator
  304. for the function to be registed. And the object can be called with different types of inputs,
  305. and work with `HyperMap` and `Map`.
  306. Args:
  307. name (str): Operator name.
  308. read_value (bool): If the registered function not need to set value on Parameter,
  309. and all inputs will pass by value, set `read_value` to True. Default: False.
  310. Raises:
  311. ValueError: If failed to find find a matching function for the given arguments.
  312. Examples:
  313. >>> # `add` is a metagraph object which will add two objects according to
  314. >>> # input type using ".register" decorator.
  315. >>> from mindspore import Tensor
  316. >>> from mindspore.ops import Primitive, operations as P
  317. >>> from mindspore import dtype as mstype
  318. >>>
  319. >>> scala_add = Primitive('scala_add')
  320. >>> tensor_add = P.TensorAdd()
  321. >>>
  322. >>> add = MultitypeFuncGraph('add')
  323. >>> @add.register("Number", "Number")
  324. ... def add_scala(x, y):
  325. ... return scala_add(x, y)
  326. >>> @add.register("Tensor", "Tensor")
  327. ... def add_tensor(x, y):
  328. ... return tensor_add(x, y)
  329. >>> add(1, 2)
  330. 3
  331. >>> add(Tensor(1, mstype.float32), Tensor(2, mstype.float32))
  332. Tensor(shape=[], dtype=Float32, 3)
  333. """
  334. def __init__(self, name, read_value=False):
  335. MultitypeFuncGraph_.__init__(self, name)
  336. self.entries = list()
  337. if read_value:
  338. self.set_signatures((
  339. sig.make_sig('args', sig.sig_rw.RW_READ, sig.sig_kind.KIND_VAR_POSITIONAL),))
  340. def __call__(self, *args):
  341. if len(self.entries) == 1:
  342. output = self.entries[0][1](*args)
  343. return output
  344. types = tuple(map(mstype.get_py_obj_dtype, args))
  345. for sigs, fn in self.entries:
  346. if len(sigs) != len(types):
  347. continue
  348. if any(not mstype.issubclass_(type_, sig) for sig, type_ in zip(sigs, types)):
  349. continue
  350. output = fn(*args)
  351. return output
  352. raise ValueError("Cannot find fn match given args.")
  353. def register(self, *type_names):
  354. """
  355. Register a function for the given type string.
  356. Args:
  357. type_names (Union[str, :class:`mindspore.dtype`]): Inputs type names or types list.
  358. Return:
  359. decorator, a decorator to register the function to run, when called under the
  360. types described in `type_names`.
  361. """
  362. def deco(fn):
  363. def convert_type(type_input):
  364. if isinstance(type_input, str):
  365. return mstype.typing.str_to_type(type_input)
  366. if not isinstance(type_input, mstype.Type):
  367. raise TypeError(f"MultitypeFuncGraph register only support str or {mstype.Type}")
  368. return type_input
  369. types = tuple(map(convert_type, type_names))
  370. self.register_fn(type_names, fn)
  371. self.entries.append((types, fn))
  372. return fn
  373. return deco
  374. class HyperMap(HyperMap_):
  375. """
  376. Hypermap will apply the set operation to input sequences.
  377. Apply the operations to every elements of the sequence or nested sequence. Different
  378. from `Map`, the `HyperMap` supports to apply on nested structure.
  379. Args:
  380. ops (Union[MultitypeFuncGraph, None]): `ops` is the operation to apply. If `ops` is `None`,
  381. the operations should be put in the first input of the instance.
  382. Inputs:
  383. - **args** (Tuple[sequence]) - If `ops` is `None`, all the inputs should be sequences with the same length.
  384. And each row of the sequences will be the inputs of the operation.
  385. If `ops` is not `None`, the first input is the operation, and the others are inputs.
  386. Outputs:
  387. Sequence or nested sequence, the sequence of output after applying the function.
  388. e.g. `operation(args[0][i], args[1][i])`.
  389. Examples:
  390. >>> from mindspore import dtype as mstype
  391. >>> nest_tensor_list = ((Tensor(1, mstype.float32), Tensor(2, mstype.float32)),
  392. ... (Tensor(3, mstype.float32), Tensor(4, mstype.float32)))
  393. >>> # square all the tensor in the nested list
  394. >>>
  395. >>> square = MultitypeFuncGraph('square')
  396. >>> @square.register("Tensor")
  397. ... def square_tensor(x):
  398. ... return F.square(x)
  399. >>>
  400. >>> common_map = HyperMap()
  401. >>> common_map(square, nest_tensor_list)
  402. ((Tensor(shape=[], dtype=Float32, 1), Tensor(shape=[], dtype=Float32, 4)),
  403. (Tensor(shape=[], dtype=Float32, 9), Tensor(shape=[], dtype=Float32, 16))
  404. >>> square_map = HyperMap(square)
  405. >>> square_map(nest_tensor_list)
  406. ((Tensor(shape=[], dtype=Float32, 1), Tensor(shape=[], dtype=Float32, 4)),
  407. (Tensor(shape=[], dtype=Float32, 9), Tensor(shape=[], dtype=Float32, 16))
  408. """
  409. def __init__(self, ops=None):
  410. self.ops = ops
  411. if ops:
  412. HyperMap_.__init__(self, ops)
  413. else:
  414. HyperMap_.__init__(self)
  415. def __call__(self, *args):
  416. func = self.ops
  417. args_list = args
  418. hypermap = self
  419. if self.ops is None:
  420. func = args[0]
  421. args_list = args[1:]
  422. hypermap = partial(self, func)
  423. # is leaf
  424. if not isinstance(args_list[0], (tuple, list)):
  425. return func(*args_list)
  426. return tuple(map(hypermap, *args_list))
  427. class Map(Map_):
  428. """
  429. Map will apply the set operation on input sequences.
  430. Apply the operations to every elements of the sequence.
  431. Args:
  432. ops (Union[MultitypeFuncGraph, None]): `ops` is the operation to apply. If `ops` is `None`,
  433. the operations should be put in the first input of the instance. Default: None
  434. Inputs:
  435. - **args** (Tuple[sequence]) - If `ops` is not `None`, all the inputs should be the same length sequences,
  436. and each row of the sequences. e.g. If args length is 2, and for `i` in length of each sequence
  437. `(args[0][i], args[1][i])` will be the input of the operation.
  438. If `ops` is not `None`, the first input is the operation, and the other is inputs.
  439. Outputs:
  440. Sequence, the sequence of output after applying the function. e.g. `operation(args[0][i], args[1][i])`.
  441. Examples:
  442. >>> from mindspore import dtype as mstype
  443. >>> tensor_list = (Tensor(1, mstype.float32), Tensor(2, mstype.float32), Tensor(3, mstype.float32))
  444. >>> # square all the tensor in the list
  445. >>>
  446. >>> square = MultitypeFuncGraph('square')
  447. >>> @square.register("Tensor")
  448. >>> def square_tensor(x):
  449. ... return F.square(x)
  450. >>>
  451. >>> common_map = Map()
  452. >>> common_map(square, tensor_list)
  453. (Tensor(shape=[], dtype=Float32, 1), Tensor(shape=[], dtype=Float32, 4), Tensor(shape=[], dtype=Float32, 9))
  454. >>> square_map = Map(square)
  455. >>> square_map(tensor_list)
  456. (Tensor(shape=[], dtype=Float32, 1), Tensor(shape=[], dtype=Float32, 4), Tensor(shape=[], dtype=Float32, 9))
  457. """
  458. def __init__(self, ops=None):
  459. self.ops = ops
  460. if ops:
  461. Map_.__init__(self, ops)
  462. else:
  463. Map_.__init__(self)
  464. def __call__(self, *args):
  465. func = self.ops
  466. args_list = args
  467. if self.ops is None:
  468. func = args[0]
  469. args_list = args[1:]
  470. return tuple(map(func, *args_list))
  471. class _ListAppend(ListAppend_):
  472. """
  473. A metafuncgraph class that append one element to list.
  474. Args:
  475. name (str): The name of the metafuncgraph object.
  476. """
  477. def __init__(self, name):
  478. ListAppend_.__init__(self, name)
  479. def __call__(self, *args):
  480. pass
  481. _append = _ListAppend("append")
  482. class _Tail(Tail_):
  483. """
  484. A metafuncgraph class that generates tail elements of the tuple.
  485. Args:
  486. name (str): The name of the metafuncgraph object.
  487. """
  488. def __init__(self, name):
  489. Tail_.__init__(self, name)
  490. def __call__(self, *args):
  491. pass
  492. tail = _Tail('tail')
  493. class _ZipOperation(ZipOperation_):
  494. """Generates a tuple of zip iterations for inputs."""
  495. def __init__(self, name):
  496. ZipOperation_.__init__(self, name)
  497. def __call__(self, *args):
  498. pass
  499. zip_operation = _ZipOperation('zip_operation')
  500. """`zip_operation` will generate a tuple of zip iterations of inputs."""
  501. env_get = MultitypeFuncGraph("env_get")
  502. @env_get.register("EnvType", "Tensor")
  503. def _tensor_env_get(env, parameter):
  504. """Used to get env."""
  505. return F.env_getitem(env, F.ref_to_embed(parameter), F.zeros_like(parameter))