You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math_ops.py 72 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Operators for math."""
  16. import numpy as np
  17. from ..._c_expression import signature_rw as sig_rw
  18. from ..._c_expression import signature_kind as sig_kind
  19. from ..._c_expression import signature_dtype as sig_dtype
  20. from ..._checkparam import Validator as validator
  21. from ..._checkparam import Rel
  22. from ...common import dtype as mstype
  23. from ...common.tensor import Tensor
  24. from .._utils import _get_broadcast_shape
  25. from ..primitive import PrimitiveWithInfer, prim_attr_register, _run_op
  26. def _infer_shape_reduce(x, axis, keep_dims, prim_name):
  27. """Common infer for reduce operator"""
  28. def reduce_one_axis(one_axis):
  29. validator.check_int_range('axis', one_axis, -dim, dim, Rel.INC_LEFT, prim_name)
  30. if one_axis < 0:
  31. one_axis += dim
  32. axis_reduce.add(one_axis)
  33. validator.check_value_type('axis', axis, [int, tuple, list], prim_name)
  34. dim = len(x)
  35. axis_reduce = set()
  36. if isinstance(axis, int):
  37. reduce_one_axis(axis)
  38. else:
  39. if not axis:
  40. if keep_dims:
  41. return [1] * dim
  42. return []
  43. for index, one_axis in enumerate(axis):
  44. validator.check_value_type('axis[%d]' % index, one_axis, [int], prim_name)
  45. reduce_one_axis(one_axis)
  46. out_shape = []
  47. for i in range(dim):
  48. if i in axis_reduce:
  49. if keep_dims:
  50. out_shape.append(1)
  51. else:
  52. out_shape.append(x[i])
  53. return out_shape
  54. class _BinaryOp(PrimitiveWithInfer):
  55. """
  56. Define binary operators.
  57. """
  58. __mindspore_signature__ = (sig_dtype.T, sig_dtype.T)
  59. @prim_attr_register
  60. def __init__(self):
  61. """init _MathBinaryOp"""
  62. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  63. def infer_shape(self, x_shape, y_shape):
  64. return _get_broadcast_shape(x_shape, y_shape, self.name)
  65. class _MathBinaryOp(_BinaryOp):
  66. """
  67. Define math binary operators.
  68. """
  69. @staticmethod
  70. def do_infer_dtype(x_dtype, y_dtype, valid_dtype=mstype.number_type, prim_name=None):
  71. args_type = {"x": x_dtype, "y": y_dtype}
  72. validator.check_tensor_type_same(args_type, valid_dtype, prim_name)
  73. return x_dtype
  74. def infer_dtype(self, x_dtype, y_dtype):
  75. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.name)
  76. class TensorAdd(_MathBinaryOp):
  77. """
  78. Adds two input tensors element-wise.
  79. The inputs must be two tensors or one tensor and one scalar.
  80. When the inputs are two tensors, the shapes of them could be broadcast,
  81. and the data types of them should be same.
  82. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  83. and the type of the scalar is the same as the data type of the tensor.
  84. Inputs:
  85. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  86. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
  87. a number.
  88. Outputs:
  89. Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'.
  90. Examples:
  91. >>> add = P.TensorAdd()
  92. >>> input_x = Tensor(np.array([1,2,3]).astype(np.float32))
  93. >>> input_y = Tensor(np.array([4,5,6]).astype(np.float32))
  94. >>> add(input_x, input_y)
  95. [5,7,9]
  96. """
  97. class AssignAdd(PrimitiveWithInfer):
  98. """
  99. Updates a `Parameter` by adding a value to it.
  100. Inputs:
  101. - **variable** (Parameter) - The `Parameter`.
  102. - **value** (Union[numbers.Number, Tensor]) - The value to be added to the `variable`.
  103. It should have the same shape as `variable` if it is a Tensor.
  104. Examples:
  105. >>> class Net(Cell):
  106. >>> def __init__(self):
  107. >>> super(Net, self).__init__()
  108. >>> self.AssignAdd = P.AssignAdd()
  109. >>> self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
  110. >>>
  111. >>> def construct(self, x):
  112. >>> self.AssignAdd(self.variable, x)
  113. >>> return self.variable
  114. >>>
  115. >>> net = Net()
  116. >>> value = Tensor(np.ones([1]).astype(np.int64)*100)
  117. >>> net(value)
  118. """
  119. __mindspore_signature__ = (
  120. ('variable', sig_rw.RW_WRITE, sig_kind.KIND_POSITIONAL_KEYWORD),
  121. ('value', sig_rw.RW_READ, sig_kind.KIND_POSITIONAL_KEYWORD)
  122. )
  123. @prim_attr_register
  124. def __init__(self):
  125. """init AssignAdd"""
  126. self.init_prim_io_names(inputs=['ref', 'value'], outputs=['output'])
  127. def infer_shape(self, variable, value):
  128. return value
  129. def infer_dtype(self, variable, value):
  130. args = {"value": value}
  131. validator.check_scalar_or_tensor_type_same(args, mstype.number_type, self.name)
  132. return value
  133. class AssignSub(PrimitiveWithInfer):
  134. """
  135. Updates a `Parameter` by subtracting a value from it.
  136. Inputs:
  137. - **variable** (Parameter) - The `Parameter`.
  138. - **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`.
  139. It should have the same shape as `variable` if it is a Tensor.
  140. Examples:
  141. >>> class Net(Cell):
  142. >>> def __init__(self):
  143. >>> super(Net, self).__init__()
  144. >>> self.AssignSub = P.AssignSub()
  145. >>> self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
  146. >>>
  147. >>> def construct(self, x):
  148. >>> self.AssignSub(self.variable, x)
  149. >>> return self.variable
  150. >>>
  151. >>> net = Net()
  152. >>> value = Tensor(np.ones([1]).astype(np.int64)*100)
  153. >>> net(value)
  154. """
  155. __mindspore_signature__ = (
  156. ('variable', sig_rw.RW_WRITE, sig_kind.KIND_POSITIONAL_KEYWORD),
  157. ('value', sig_rw.RW_READ, sig_kind.KIND_POSITIONAL_KEYWORD)
  158. )
  159. @prim_attr_register
  160. def __init__(self):
  161. """init AssignSub"""
  162. def infer_shape(self, variable, value):
  163. return value
  164. def infer_dtype(self, variable, value):
  165. args = {"value": value}
  166. validator.check_scalar_or_tensor_type_same(args, mstype.number_type, self.name)
  167. return value
  168. class _Reduce(PrimitiveWithInfer):
  169. """
  170. Definition of base class of reduction class operators.
  171. Args:
  172. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  173. If False, don't keep these dimensions.
  174. """
  175. __mindspore_signature__ = (
  176. ('input_x', sig_rw.RW_READ, sig_kind.KIND_POSITIONAL_KEYWORD),
  177. ('axis', sig_rw.RW_READ, sig_kind.KIND_POSITIONAL_KEYWORD, ()),
  178. )
  179. @prim_attr_register
  180. def __init__(self, keep_dims=False):
  181. """init Reduce"""
  182. validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
  183. self.init_prim_io_names(inputs=['input_x', 'axis'], outputs=['y'])
  184. def __call__(self, x, axis=()):
  185. args = [x, axis]
  186. output = _run_op(self, self.name, args)
  187. return output
  188. def do_infer(self, input_x, axis, valid_dtype=mstype.number_type):
  189. axis_v = axis['value']
  190. input_shp = input_x['shape']
  191. args = {'input_x': input_x['dtype']}
  192. validator.check_tensor_type_same(args, valid_dtype, self.name)
  193. input_shp = _infer_shape_reduce(input_shp, axis_v, self.keep_dims, self.name)
  194. return {'shape': input_shp,
  195. 'dtype': input_x['dtype'],
  196. 'value': None}
  197. def __infer__(self, input_x, axis):
  198. return self.do_infer(input_x, axis)
  199. class ReduceMean(_Reduce):
  200. """
  201. Reduce a dimension of a tensor by averaging all elements in the dimension.
  202. The dtype of the tensor to be reduced is number.
  203. Args:
  204. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  205. If False, don't keep these dimensions. Default : False.
  206. Inputs:
  207. - **input_x** (Tensor[Number]) - The input tensor.
  208. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  209. Only constant value is allowed.
  210. Outputs:
  211. Tensor, has the same dtype as the 'input_x'.
  212. - If axis is (), and keep_dims is false,
  213. the output is a 0-D tensor representing the sum of all elements in the input tensor.
  214. - If axis is int, set as 2, and keep_dims is false,
  215. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  216. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  217. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  218. Examples:
  219. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  220. >>> op = P.ReduceMean(keep_dims=True)
  221. >>> output = op(input_x, 1)
  222. """
  223. class ReduceSum(_Reduce):
  224. """
  225. Reduce a dimension of a tensor by summing all elements in the dimension.
  226. The dtype of the tensor to be reduced is number.
  227. Args:
  228. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  229. If False, don't keep these dimensions. Default : False.
  230. Inputs:
  231. - **input_x** (Tensor[Number]) - The input tensor.
  232. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  233. Only constant value is allowed.
  234. Outputs:
  235. Tensor, has the same dtype as the 'input_x'.
  236. - If axis is (), and keep_dims is false,
  237. the output is a 0-D tensor representing the sum of all elements in the input tensor.
  238. - If axis is int, set as 2, and keep_dims is false,
  239. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  240. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  241. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  242. Examples:
  243. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  244. >>> op = P.ReduceSum(keep_dims=True)
  245. >>> output = op(input_x, 1)
  246. """
  247. class ReduceAll(_Reduce):
  248. """
  249. Reduce a dimension of a tensor by the "logical and" of all elements in the dimension.
  250. The dtype of the tensor to be reduced is bool.
  251. Args:
  252. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  253. If False, don't keep these dimensions.
  254. Default : False, don't keep these reduced dimensions.
  255. Inputs:
  256. - **input_x** (Tensor[bool]) - The input tensor.
  257. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  258. Only constant value is allowed.
  259. Outputs:
  260. Tensor, the dtype is bool.
  261. - If axis is (), and keep_dims is false,
  262. the output is a 0-D tensor representing the "logical and" of of all elements in the input tensor.
  263. - If axis is int, set as 2, and keep_dims is false,
  264. and keep_dims is false, the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  265. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  266. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  267. Examples:
  268. >>> input_x = Tensor(np.array([[True, False], [True, True]]))
  269. >>> op = P.ReduceAll(keep_dims=True)
  270. >>> output = op(input_x, 1)
  271. """
  272. def __infer__(self, input_x, axis):
  273. return self.do_infer(input_x, axis, (mstype.bool_,))
  274. class ReduceMax(_Reduce):
  275. """
  276. Reduce a dimension of a tensor by the maximum value in this dimension.
  277. The dtype of the tensor to be reduced is number.
  278. Args:
  279. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  280. If False, don't keep these dimensions.
  281. Default : False, don't keep these reduced dimensions.
  282. Inputs:
  283. - **input_x** (Tensor[Number]) - The input tensor.
  284. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  285. Only constant value is allowed.
  286. Outputs:
  287. Tensor, has the same dtype as the 'input_x'.
  288. - If axis is (), and keep_dims is false,
  289. the output is a 0-D tensor representing the maximum of all elements in the input tensor.
  290. - If axis is int, set as 2, and keep_dims is false,
  291. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  292. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  293. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  294. Examples:
  295. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  296. >>> op = P.ReduceMax(keep_dims=True)
  297. >>> output = op(input_x, 1)
  298. """
  299. class ReduceMin(_Reduce):
  300. """
  301. Reduce a dimension of a tensor by the minimum value in the dimension.
  302. The dtype of the tensor to be reduced is number.
  303. Args:
  304. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  305. If False, don't keep these dimensions.
  306. Default : False, don't keep these reduced dimensions.
  307. Inputs:
  308. - **input_x** (Tensor[Number]) - The input tensor.
  309. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  310. Only constant value is allowed.
  311. Outputs:
  312. Tensor, has the same dtype as the 'input_x'.
  313. - If axis is (), and keep_dims is false,
  314. the output is a 0-D tensor representing the minimum of all elements in the input tensor.
  315. - If axis is int, set as 2, and keep_dims is false,
  316. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  317. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  318. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  319. Examples:
  320. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  321. >>> op = P.ReduceMin(keep_dims=True)
  322. >>> output = op(input_x, 1)
  323. """
  324. class ReduceProd(_Reduce):
  325. """
  326. Reduce a dimension of a tensor by multiplying all elements in the dimension.
  327. The dtype of the tensor to be reduced is number.
  328. Args:
  329. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  330. If False, don't keep these dimensions.
  331. Default : False, don't keep these reduced dimensions.
  332. Inputs:
  333. - **input_x** (Tensor[Number]) - The input tensor.
  334. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  335. Outputs:
  336. Tensor, has the same dtype as the 'input_x'.
  337. - If axis is (), and keep_dims is false,
  338. the output is a 0-D tensor representing the product of all elements in the input tensor.
  339. - If axis is int, set as 2, and keep_dims is false,
  340. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  341. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  342. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  343. Examples:
  344. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  345. >>> op = P.ReduceProd(keep_dims=True)
  346. >>> output = op(input_x, 1)
  347. """
  348. class CumProd(PrimitiveWithInfer):
  349. """
  350. Compute the cumulative product of the tensor x along axis.
  351. Args:
  352. exclusive (bool): If True, perform exclusive cumulative product. Default: False.
  353. reverse (bool): If True, reverse the result along axis. Default: False
  354. Inputs:
  355. - **input_x** (Tensor[Number]) - The input tensor.
  356. - **axis** (int) - The dimensions to compute the cumulative product.
  357. Outputs:
  358. Tensor, has the same shape and dtype as the 'input_x'.
  359. Examples:
  360. >>> input_x = Tensor(np.array([a, b, c]).astype(np.float32))
  361. >>> op0 = P.CumProd()
  362. >>> output = op0(input_x, 0) # output=[a, a * b, a * b * c]
  363. >>> op1 = P.CumProd(exclusive=True)
  364. >>> output = op1(input_x, 0) # output=[1, a, a * b]
  365. >>> op2 = P.CumProd(reverse=True)
  366. >>> output = op2(input_x, 0) # output=[a * b * c, b * c, c]
  367. >>> op3 = P.CumProd(exclusive=True, reverse=True)
  368. >>> output = op3(input_x, 0) # output=[b * c, c, 1]
  369. """
  370. @prim_attr_register
  371. def __init__(self, exclusive=False, reverse=False):
  372. cls_name = self.name
  373. self.exclusive = validator.check_value_type("exclusive", exclusive, [bool], cls_name)
  374. self.reverse = validator.check_value_type("reverse", reverse, [bool], cls_name)
  375. def infer_shape(self, x_shape, axis_shape):
  376. return x_shape
  377. def infer_dtype(self, x_type, axis_type):
  378. cls_name = self.name
  379. validator.check_tensor_type_same({'x': x_type}, mstype.number_type, cls_name)
  380. validator.check_subclass("axis", axis_type, mstype.int_, cls_name)
  381. return x_type
  382. class MatMul(PrimitiveWithInfer):
  383. """
  384. Multiplies matrix `a` by matrix `b`.
  385. The rank of input tensors must be `2`.
  386. Args:
  387. transpose_a (bool): If True, `a` is transposed before multiplication. Default: False.
  388. transpose_b (bool): If True, `b` is transposed before multiplication. Default: False.
  389. Inputs:
  390. - **input_x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If
  391. `transpose_a` is True, its shape should be :math:`(N, C)` after transposing.
  392. - **input_y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If
  393. `transpose_b` is True, its shape should be :math:`(C, M)` after transpose.
  394. Outputs:
  395. Tensor, the shape of the output tensor is :math:`(N, M)`.
  396. Examples:
  397. >>> input_x = Tensor(np.ones(shape=[1, 3]), mindspore.float32)
  398. >>> input_y = Tensor(np.ones(shape=[3, 4]), mindspore.float32)
  399. >>> matmul = P.MatMul()
  400. >>> output = matmul(input_x, input_y)
  401. """
  402. @prim_attr_register
  403. def __init__(self, transpose_a=False, transpose_b=False):
  404. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
  405. self.__setattr_flag__ = True
  406. cls_name = self.name
  407. validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
  408. validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
  409. def check_shape_size(self, x, y):
  410. if len(x) != 2 or len(y) != 2:
  411. raise ValueError('MatMul input x, y should be the same dimension size and should be '
  412. + f'equal to 2, while x size = {len(x)}, y size= {len(y)}')
  413. def infer_shape(self, x, y):
  414. self.check_shape_size(x, y)
  415. cls_name = self.name
  416. # expected dimension of x, y, x:[...,a,b] y:[..., c,d], the dim size should be the same except the last two
  417. for i in range(len(x) - 2):
  418. if x[i] != y[i]:
  419. raise ValueError(f'For \'{cls_name}\' shape in dim[{i}] not the same, while x is {x[i]}, y is {y[i]}')
  420. # validate whether last two dims satifing matrix multiply
  421. x_last = x[-2:]
  422. y_last = y[-2:]
  423. x_col = x_last[not self.transpose_a] # x_col = x_last[1] if (not transpose_a) else x_last[0]
  424. y_row = y_last[self.transpose_b] # y_row = y_last[0] if (not transpose_b) else y_last[1]
  425. if x_col != y_row:
  426. raise ValueError(f'For \'{cls_name}\' evaluator shapes of inputs can not do this operator,'
  427. + f' got {x_col} and {y_row}, with x shape {x}(transpose_a={self.transpose_a})'
  428. + f', y shape {y}(transpose_b={self.transpose_b}).')
  429. # set attribute
  430. self.add_prim_attr('transpose_x1', self.transpose_a)
  431. self.add_prim_attr('transpose_x2', self.transpose_b)
  432. ret_dims = x[: -2] + [x_last[self.transpose_a], y_last[not self.transpose_b]]
  433. return ret_dims
  434. def infer_dtype(self, x, y):
  435. args = {"x": x, "y": y}
  436. validator.check_tensor_type_same(args, mstype.float_type + mstype.int_type, self.name)
  437. return x
  438. class BatchMatMul(MatMul):
  439. """
  440. Computes matrix multiplication between two tensors by batch
  441. `result[..., :, :] = tensor(a[..., :, :]) * tensor(b[..., :, :])`.
  442. The two input tensors must have same rank and the rank must be `3` at least.
  443. Args:
  444. transpose_a (bool): If True, `a` is transposed on the last two dimensions before multiplication.
  445. Default: False.
  446. transpose_b (bool): If True, `b` is transposed on the last two dimensions before multiplication.
  447. Default: False.
  448. Inputs:
  449. - **input_x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(*B, N, C)`,
  450. where :math:`*B` represents the batch size which can be multidimensional, :math:`N` and :math:`C` are the
  451. size of the last two dimensions. If `transpose_a` is True, its shape should be :math:`(*B, C, N)`.
  452. - **input_y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(*B, C, M)`. If
  453. `transpose_b` is True, its shape should be :math:`(*B, M, C)`.
  454. Outputs:
  455. Tensor, the shape of the output tensor is :math:`(*B, N, M)`.
  456. Examples:
  457. >>> input_x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32)
  458. >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
  459. >>> batmatmul = P.BatchMatMul()
  460. >>> output = batmatmul(input_x, input_y)
  461. >>>
  462. >>> input_x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
  463. >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
  464. >>> batmatmul = P.BatchMatMul(transpose_a=True)
  465. >>> output = batmatmul(input_x, input_y)
  466. """
  467. @prim_attr_register
  468. def __init__(self, transpose_a=False, transpose_b=False):
  469. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
  470. self.__setattr_flag__ = True
  471. cls_name = self.name
  472. validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
  473. validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
  474. def check_shape_size(self, x, y):
  475. if len(x) != len(y) or len(x) < 3:
  476. raise ValueError('For \'BatchMatMul\' input x, y should be the same dimension size and should be '
  477. 'greater or equal to 3,' + f' while x size = {len(x)}, y size= {len(y)}')
  478. class CumSum(PrimitiveWithInfer):
  479. """
  480. Computes the cumulative sum of input tensor along axis.
  481. Args:
  482. exclusive (bool): If True, perform exclusive mode. Default: False.
  483. reverse (bool): If True, perform inverse cumulative sum. Default: False.
  484. Inputs:
  485. - **input** (Tensor) - The input tensor to accumulate.
  486. - **axis** (int) - The axis to accumulate the tensor's value.
  487. Outputs:
  488. Tensor, the shape of the output tensor is consistent with the input tensor's.
  489. Examples:
  490. >>> input = Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float32))
  491. >>> cumsum = P.CumSum()
  492. >>> output = cumsum(input, 1)
  493. [[ 3. 7. 13. 23.]
  494. [ 1. 7. 14. 23.]
  495. [ 4. 7. 15. 22.]
  496. [ 1. 4. 11. 20.]]
  497. """
  498. @prim_attr_register
  499. def __init__(self, exclusive=False, reverse=False):
  500. """init cumsum"""
  501. cls_name = self.name
  502. validator.check_value_type('exclusive', exclusive, [bool], cls_name)
  503. validator.check_value_type('reverse', reverse, [bool], cls_name)
  504. self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
  505. def __infer__(self, x, axis):
  506. cls_name = self.name
  507. x_shp = x['shape']
  508. validator.check_value_type('axis', axis['value'], [int], cls_name)
  509. valid_types = [mstype.uint8, mstype.int8, mstype.int32, mstype.float16, mstype.float32]
  510. validator.check_tensor_type_same({'x': x['dtype']}, valid_types, cls_name)
  511. return {'shape': x_shp,
  512. 'dtype': x['dtype'],
  513. 'value': None}
  514. class AddN(PrimitiveWithInfer):
  515. """
  516. Computes addition of all input tensors element-wise.
  517. All input tensors should have the same shape.
  518. Inputs:
  519. - **input_x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
  520. is made up of multiple tensors whose dtype is number or bool to be added together.
  521. Outputs:
  522. Tensor, has the same shape and dtype as each entry of the `input_x`.
  523. Examples:
  524. >>> class NetAddN(nn.Cell):
  525. >>> def __init__(self):
  526. >>> super(NetAddN, self).__init__()
  527. >>> self.addN = P.AddN()
  528. >>>
  529. >>> def construct(self, *z):
  530. >>> return self.addN(z)
  531. >>>
  532. >>> net = NetAddN()
  533. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  534. >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.float32)
  535. >>> net(input_x, input_y, input_x, input_y)
  536. Tensor([10, 14, 18], shape=(3,), dtype=mindspore.int32)
  537. """
  538. @prim_attr_register
  539. def __init__(self):
  540. self.__setattr_flag__ = True
  541. self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
  542. def infer_shape(self, inputs):
  543. cls_name = self.name
  544. validator.check_integer("inputs", len(inputs), 1, Rel.GE, cls_name)
  545. self.add_prim_attr('n', len(inputs))
  546. shp0 = inputs[0]
  547. for i, shp in enumerate(inputs):
  548. validator.check(f"shape of inputs[{i}]", shp, 'shape of inputs[0]', shp0, Rel.EQ, cls_name)
  549. return shp0
  550. def infer_dtype(self, inputs):
  551. cls_name = self.name
  552. validator.check_value_type("inputs", inputs, [tuple, list], cls_name)
  553. validator.check_integer("inputs", len(inputs), 1, Rel.GE, cls_name)
  554. args = {}
  555. for i, dtype in enumerate(inputs):
  556. args[f"inputs[{i}]"] = dtype
  557. validator.check_tensor_type_same(args, mstype.number_type + (mstype.bool_,), cls_name)
  558. return inputs[0]
  559. class Neg(PrimitiveWithInfer):
  560. """
  561. Returns a tensor with negative values of the input tensor element-wise.
  562. Inputs:
  563. - **input_x** (Tensor) - The input tensor whose dtype is number.
  564. Outputs:
  565. Tensor, has the same shape and dtype as input.
  566. """
  567. @prim_attr_register
  568. def __init__(self):
  569. """init Neg"""
  570. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  571. def infer_shape(self, input_x):
  572. return input_x
  573. def infer_dtype(self, input_x):
  574. validator.check_tensor_type_same({"input_x": input_x}, mstype.number_type, self.name)
  575. return input_x
  576. class Sub(_MathBinaryOp):
  577. """
  578. Subtracts the second input tensor from the first input tensor element-wise.
  579. The inputs must be two tensors or one tensor and one scalar.
  580. When the inputs are two tensors, the shapes of them could be broadcast,
  581. and the data types of them should be same.
  582. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  583. and the type of the scalar is the same as the data type of the tensor.
  584. Inputs:
  585. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  586. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
  587. a number.
  588. Outputs:
  589. Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'.
  590. Examples:
  591. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  592. >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.int32)
  593. >>> sub = P.Sub()
  594. >>> sub(input_x, input_y)
  595. [-3, -3, -3]
  596. """
  597. class Mul(_MathBinaryOp):
  598. """
  599. Multiplies two tensors element-wise.
  600. The inputs must be two tensors or one tensor and one scalar.
  601. When the inputs are two tensors, the shapes of them could be broadcast,
  602. and the data types of them should be same.
  603. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  604. and the type of the scalar is the same as the data type of the tensor.
  605. Inputs:
  606. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  607. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
  608. a number.
  609. Outputs:
  610. Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'.
  611. Examples:
  612. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  613. >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
  614. >>> mul = P.Mul()
  615. >>> mul(input_x, input_y)
  616. [4, 10, 18]
  617. """
  618. def infer_value(self, x, y):
  619. if x is not None and y is not None:
  620. x = x.asnumpy()
  621. y = y.asnumpy()
  622. out = x * y
  623. out = np.array(out, x.dtype)
  624. return Tensor(out)
  625. return None
  626. class Square(PrimitiveWithInfer):
  627. """
  628. Returns square of a tensor element-wise.
  629. Inputs:
  630. - **input_x** (Tensor) - The input tensor whose dtype is number.
  631. Outputs:
  632. Tensor, has the same shape and dtype as the `input_x`.
  633. Examples:
  634. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  635. >>> square = P.Square()
  636. >>> square(input_x)
  637. [1.0, 4.0, 9.0]
  638. """
  639. @prim_attr_register
  640. def __init__(self):
  641. """init Square"""
  642. def infer_shape(self, x_shape):
  643. return x_shape
  644. def infer_dtype(self, x_type):
  645. validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.name)
  646. return x_type
  647. class Rsqrt(PrimitiveWithInfer):
  648. """
  649. Computes reciprocal of square root of input tensor element-wise.
  650. Inputs:
  651. - **input_x** (Tensor) - The input of Rsqrt. Each element should be a non-negative number.
  652. Outputs:
  653. Tensor, has the same type and shape as `input_x`.
  654. Examples:
  655. >>> input_tensor = Tensor([[4, 4], [9, 9]], mindspore.float32)
  656. >>> rsqrt = P.Rsqrt()
  657. >>> rsqrt(input_tensor)
  658. [[0.5, 0.5], [0.333333, 0.333333]]
  659. """
  660. @prim_attr_register
  661. def __init__(self):
  662. """init Rsqrt"""
  663. def infer_shape(self, x_shape):
  664. return x_shape
  665. def infer_dtype(self, x_type):
  666. validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.name)
  667. return x_type
  668. class Sqrt(PrimitiveWithInfer):
  669. """
  670. Returns square root of a tensor element-wise.
  671. Inputs:
  672. - **input_x** (Tensor) - The input tensor whose dtype is number.
  673. Outputs:
  674. Tensor, has the same shape as the `input_x`.
  675. Examples:
  676. >>> input_x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
  677. >>> sqrt = P.Sqrt()
  678. >>> sqrt(input_x)
  679. [1.0, 2.0, 3.0]
  680. """
  681. @prim_attr_register
  682. def __init__(self):
  683. """init Sqrt"""
  684. def infer_shape(self, x_shape):
  685. return x_shape
  686. def infer_dtype(self, x_type):
  687. validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.name)
  688. return x_type
  689. class Reciprocal(PrimitiveWithInfer):
  690. """
  691. Returns reciprocal of a tensor element-wise.
  692. Inputs:
  693. - **input_x** (Tensor) - The input tensor.
  694. Outputs:
  695. Tensor, has the same shape as the `input_x`.
  696. Examples:
  697. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  698. >>> reciprocal = P.Reciprocal()
  699. >>> reciprocal(input_x)
  700. [1.0, 0.5, 0.25]
  701. """
  702. @prim_attr_register
  703. def __init__(self):
  704. """init Reciprocal"""
  705. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  706. def infer_shape(self, x):
  707. return x
  708. def infer_dtype(self, x):
  709. validator.check_subclass("x", x, mstype.tensor, self.name)
  710. return x
  711. class Pow(_MathBinaryOp):
  712. """
  713. Computes a tensor to the power of the second input.
  714. The first input must be a tensor, and the second input should be a tensor or a number.
  715. When the inputs are two tensors, the shapes of them could be broadcast,
  716. and the data types of them should be the same.
  717. When the inputs are one tensor and one scalar, the scalar could not be a parameter,
  718. only could be a constant, and the type of the scalar is the same as the data type of the tensor.
  719. Inputs:
  720. - **input_x** (Union[Tensor]) - The first input is a tensor whose data type is number.
  721. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
  722. a number.
  723. Outputs:
  724. Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'.
  725. Inputs:
  726. - **input_x** (Tensor) - The input tensor.
  727. - **input_y** (Union[Tensor, Number]) - The exponent part. If exponent is a tensor, its shape must be able to
  728. broadcast to the shape of the `input_x`.
  729. Outputs:
  730. Tensor, has the same shape as the `input_x`.
  731. Examples:
  732. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  733. >>> input_y = 3.0
  734. >>> pow = P.Pow()
  735. >>> pow(input_x, input_y)
  736. [1.0, 8.0, 64.0]
  737. >>>
  738. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  739. >>> input_y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
  740. >>> pow = P.Pow()
  741. >>> pow(input_x, input_y)
  742. [1.0, 16.0, 64.0]
  743. """
  744. class Exp(PrimitiveWithInfer):
  745. """
  746. Returns exponential of a tensor element-wise.
  747. Inputs:
  748. - **input_x** (Tensor) - The input tensor.
  749. Outputs:
  750. Tensor, has the same shape as the `input_x`.
  751. Examples:
  752. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  753. >>> exp = P.Exp()
  754. >>> exp(input_x)
  755. [ 2.71828183, 7.3890561 , 54.59815003]
  756. """
  757. @prim_attr_register
  758. def __init__(self):
  759. """init Exp"""
  760. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  761. def infer_shape(self, x_shape):
  762. return x_shape
  763. def infer_dtype(self, x_type):
  764. validator.check_subclass("x", x_type, mstype.tensor, self.name)
  765. return x_type
  766. class Log(PrimitiveWithInfer):
  767. """
  768. Returns the natural logarithm of a tensor element-wise.
  769. Inputs:
  770. - **input_x** (Tensor) - The input tensor.
  771. Outputs:
  772. Tensor, has the same shape as the `input_x`.
  773. Examples:
  774. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  775. >>> log = P.Log()
  776. >>> log(input_x)
  777. [0.0, 0.69314718, 1.38629436]
  778. """
  779. @prim_attr_register
  780. def __init__(self):
  781. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  782. def infer_shape(self, x):
  783. return x
  784. def infer_dtype(self, x):
  785. validator.check_subclass("x", x, mstype.tensor, self.name)
  786. return x
  787. class Erf(PrimitiveWithInfer):
  788. r"""
  789. Computes the Gauss error function of `input_x` element-wise.
  790. Inputs:
  791. - **input_x** (Tensor) - The input tensor.
  792. Outputs:
  793. Tensor, has the same shape and dtype as the `input_x`.
  794. Examples:
  795. >>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
  796. >>> erf = P.Erf()
  797. >>> erf(input_x)
  798. [-0.8427168, 0., 0.8427168, 0.99530876, 0.99997765]
  799. """
  800. @prim_attr_register
  801. def __init__(self):
  802. """init Erf"""
  803. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  804. def infer_shape(self, x_shape):
  805. return x_shape
  806. def infer_dtype(self, x_type):
  807. validator.check_tensor_type_same({"x": x_type}, [mstype.float16, mstype.float32], self.name)
  808. return x_type
  809. class Minimum(_MathBinaryOp):
  810. """
  811. Computes the element-wise minimum of input tensors.
  812. The inputs must be two tensors or one tensor and one scalar.
  813. When the inputs are two tensors, the shapes of them could be broadcast,
  814. and the data types of them should be same.
  815. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  816. and the type of the scalar is the same as the data type of the tensor.
  817. Inputs:
  818. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  819. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
  820. a number.
  821. Outputs:
  822. Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'.
  823. Examples:
  824. >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
  825. >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
  826. >>> minimum = P.Minimum()
  827. >>> minimum(input_x, input_y)
  828. [1.0, 2.0, 3.0]
  829. """
  830. class Maximum(_MathBinaryOp):
  831. """
  832. Computes the element-wise maximum of input tensors.
  833. The inputs must be two tensors or one tensor and one scalar.
  834. When the inputs are two tensors, the shapes of them could be broadcast,
  835. and the data types of them should be same.
  836. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  837. and the type of the scalar is the same as the data type of the tensor.
  838. Inputs:
  839. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  840. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
  841. a number.
  842. Outputs:
  843. Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'.
  844. Examples:
  845. >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
  846. >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
  847. >>> maximum = P.Maximum()
  848. >>> maximum(input_x, input_y)
  849. [4.0, 5.0, 6.0]
  850. """
  851. class RealDiv(_MathBinaryOp):
  852. """
  853. Divide the first input tensor by the second input tensor in floating-point type element-wise.
  854. The inputs must be two tensors or one tensor and one scalar.
  855. When the inputs are two tensors, the shapes of them could be broadcast,
  856. and the data types of them should be same.
  857. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  858. and the type of the scalar is the same as the data type of the tensor.
  859. Inputs:
  860. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  861. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
  862. a number.
  863. Outputs:
  864. Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'.
  865. Examples:
  866. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  867. >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
  868. >>> realdiv = P.RealDiv()
  869. >>> realdiv(input_x, input_y)
  870. [0.25, 0.4, 0.5]
  871. """
  872. def infer_value(self, x, y):
  873. if x is not None and y is not None:
  874. x = x.asnumpy()
  875. y = y.asnumpy()
  876. out = x / y
  877. out = np.array(out, x.dtype)
  878. return Tensor(out)
  879. return None
  880. class Div(_MathBinaryOp):
  881. """
  882. Computes the quotient of dividing the first input tensor by the second input tensor element-wise.
  883. The inputs must be two tensors or one tensor and one scalar.
  884. When the inputs are two tensors, the shapes of them could be broadcast,
  885. and the data types of them should be same.
  886. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  887. and the type of the scalar is the same as the data type of the tensor.
  888. Inputs:
  889. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  890. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
  891. a number.
  892. Outputs:
  893. Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'.
  894. Raises:
  895. ValueError: When `input_x` and `input_y` are not the same dtype.
  896. Examples:
  897. >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
  898. >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
  899. >>> div = P.Div()
  900. >>> div(input_x, input_y)
  901. """
  902. def infer_value(self, x, y):
  903. if x is not None and y is not None:
  904. x = x.asnumpy()
  905. y = y.asnumpy()
  906. return Tensor(x / y)
  907. return None
  908. class FloorDiv(_MathBinaryOp):
  909. """
  910. Divide the first input tensor by the second input tensor element-wise and rounds down to the closest integer.
  911. The inputs must be two tensors or one tensor and one scalar.
  912. When the inputs are two tensors, the shapes of them could be broadcast,
  913. and the data types of them should be same.
  914. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  915. and the type of the scalar is the same as the data type of the tensor.
  916. Inputs:
  917. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  918. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
  919. a number.
  920. Outputs:
  921. Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'.
  922. Examples:
  923. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  924. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  925. >>> floor_div = P.FloorDiv()
  926. >>> floor_div(input_x, input_y)
  927. [0, 1, -1]
  928. """
  929. class Floor(PrimitiveWithInfer):
  930. """
  931. Round a tensor down to the closest integer element-wise.
  932. Inputs:
  933. - **input_x** (Tensor) - The input tensor. Its element data type must be float.
  934. Outputs:
  935. Tensor, has the same shape as `input_x`.
  936. Examples:
  937. >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
  938. >>> floor = P.Floor()
  939. >>> floor(input_x)
  940. [1.0, 2.0, -2.0]
  941. """
  942. @prim_attr_register
  943. def __init__(self):
  944. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  945. def infer_shape(self, x_shape):
  946. return x_shape
  947. def infer_dtype(self, x_dtype):
  948. validator.check_tensor_type_same({"x": x_dtype}, mstype.float_type, self.name)
  949. return x_dtype
  950. class FloorMod(_MathBinaryOp):
  951. """
  952. Compute element-wise remainder of division.
  953. The inputs must be two tensors or one tensor and one scalar.
  954. When the inputs are two tensors, the shapes of them could be broadcast,
  955. and the data types of them should be same.
  956. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  957. and the type of the scalar is the same as the data type of the tensor.
  958. Inputs:
  959. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  960. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or
  961. a number.
  962. Outputs:
  963. Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'.
  964. Examples:
  965. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  966. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  967. >>> floor_mod = P.FloorMod()
  968. >>> floor_mod(input_x, input_y)
  969. [2, 1, 2]
  970. """
  971. class Acosh(PrimitiveWithInfer):
  972. """
  973. Compute inverse hyperbolic cosine of x element-wise.
  974. Inputs:
  975. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`,
  976. and the data type of 'input_x' is number, the element in 'input_x' should be greater than or equal to 1.
  977. Outputs:
  978. Tensor, has the same shape as `input_x`.
  979. Examples:
  980. >>> acosh = P.Acosh()
  981. >>> input_x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
  982. >>> output = acosh(input_x)
  983. """
  984. @prim_attr_register
  985. def __init__(self):
  986. """init Acosh"""
  987. def infer_shape(self, x):
  988. return x
  989. def infer_dtype(self, x):
  990. validator.check_tensor_type_same({'x': x}, mstype.number_type, self.name)
  991. return x
  992. class _LogicBinaryOp(_BinaryOp):
  993. """
  994. Define logic binary operators.
  995. """
  996. @staticmethod
  997. def do_infer_dtype(x_dtype, y_dtype, valid_type=mstype.number_type, prim_name=None):
  998. args_dtype = {"x": x_dtype, "y": y_dtype}
  999. validator.check_tensor_type_same(args_dtype, valid_type, prim_name)
  1000. return mstype.tensor_type(mstype.bool_)
  1001. def infer_dtype(self, x_dtype, y_dtype):
  1002. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.name)
  1003. class Equal(_LogicBinaryOp):
  1004. """
  1005. Computes the equivalence between two tensors element-wise.
  1006. The inputs must be two tensors or one tensor and one scalar.
  1007. When the inputs are two tensors, the shapes of them could be broadcast,
  1008. and the data types of them should be same.
  1009. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  1010. and the type of the scalar is the same as the data type of the tensor.
  1011. Inputs:
  1012. - **input_x** (Union[Tensor, Number, bool]) - The first input is a tensor whose data type is number or bool, or
  1013. a number or a bool object.
  1014. - **input_y** (Union[Tensor, Number, bool]) - The second input tensor whose data type is same as 'input_x' or
  1015. a number or a bool object.
  1016. Outputs:
  1017. Tensor, the shape is same as the shape after broadcasting, and the data type is bool.
  1018. Examples:
  1019. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  1020. >>> equal = P.Equal()
  1021. >>> equal(input_x, 2.0)
  1022. [False, True, False]
  1023. >>>
  1024. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1025. >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  1026. >>> equal = P.Equal()
  1027. >>> equal(input_x, input_y)
  1028. [True, True, False]
  1029. """
  1030. def infer_dtype(self, x_dtype, y_dtype):
  1031. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name)
  1032. class EqualCount(PrimitiveWithInfer):
  1033. """
  1034. Computes the number of the same elements of two tensors.
  1035. The two input tensors should have same shape.
  1036. Inputs:
  1037. - **input_x** (Tensor) - The first input tensor.
  1038. - **input_y** (Tensor) - The second input tensor.
  1039. Outputs:
  1040. Tensor, with the type as `mindspore.int32` and size as (1,).
  1041. Examples:
  1042. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1043. >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  1044. >>> equal_count = P.EqualCount()
  1045. >>> equal_count(input_x, input_y)
  1046. [2]
  1047. """
  1048. @prim_attr_register
  1049. def __init__(self):
  1050. """init EqualCount"""
  1051. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  1052. def infer_shape(self, x_shape, y_shape):
  1053. output_shape = (1,)
  1054. return output_shape
  1055. def infer_dtype(self, x_dtype, y_dtype):
  1056. args = {'x': x_dtype, 'y': y_dtype}
  1057. validator.check_tensor_type_same(args, mstype.number_type + (mstype.bool_,), self.name)
  1058. return x_dtype
  1059. class NotEqual(_LogicBinaryOp):
  1060. """
  1061. Computes the non-equivalence of two tensors element-wise.
  1062. The inputs must be two tensors or one tensor and one scalar.
  1063. When the inputs are two tensors, the shapes of them could be broadcast,
  1064. and the data types of them should be same.
  1065. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  1066. and the type of the scalar is the same as the data type of the tensor.
  1067. Inputs:
  1068. - **input_x** (Union[Tensor, Number, bool]) - The first input is a tensor whose data type is number or bool, or
  1069. a number or a bool object.
  1070. - **input_y** (Union[Tensor, Number, bool]) - The second input tensor whose data type is same as `input_x` or
  1071. a number or a bool object.
  1072. Outputs:
  1073. Tensor, the shape is same as the shape after broadcasting, and the data type is bool.
  1074. Examples:
  1075. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  1076. >>> not_equal = P.NotEqual()
  1077. >>> not_equal(input_x, 2.0)
  1078. [True, False, True]
  1079. >>>
  1080. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1081. >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  1082. >>> not_equal = P.NotEqual()
  1083. >>> not_equal(input_x, input_y)
  1084. [False, False, True]
  1085. """
  1086. def infer_dtype(self, x_dtype, y_dtype):
  1087. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name)
  1088. class Greater(_LogicBinaryOp):
  1089. """
  1090. Computes the boolean value of :math:`x > y` element-wise.
  1091. The inputs must be two tensors or one tensor and one scalar.
  1092. When the inputs are two tensors, the shapes of them could be broadcast,
  1093. and the data types of them should be same.
  1094. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  1095. and the type of the scalar is the same as the data type of the tensor.
  1096. Inputs:
  1097. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  1098. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as `input_x` or
  1099. a number.
  1100. Outputs:
  1101. Tensor, the shape is same as the shape after broadcasting, and the data type is bool.
  1102. Examples:
  1103. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1104. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  1105. >>> greater = P.Greater()
  1106. >>> greater(input_x, input_y)
  1107. [False, True, False]
  1108. """
  1109. class GreaterEqual(_LogicBinaryOp):
  1110. """
  1111. Computes the boolean value of :math:`x >= y` element-wise.
  1112. The inputs must be two tensors or one tensor and one scalar.
  1113. When the inputs are two tensors, the shapes of them could be broadcast,
  1114. and the data types of them should be same.
  1115. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  1116. and the type of the scalar is the same as the data type of the tensor.
  1117. Inputs:
  1118. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  1119. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as `input_x` or
  1120. a number.
  1121. Outputs:
  1122. Tensor, the shape is same as the shape after broadcasting, and the data type is bool.
  1123. Examples:
  1124. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1125. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  1126. >>> greater_equal = P.GreaterEqual()
  1127. >>> greater_equal(input_x, input_y)
  1128. [True, True, False]
  1129. """
  1130. class Less(_LogicBinaryOp):
  1131. """
  1132. Computes the boolean value of :math:`x < y` element-wise.
  1133. The inputs must be two tensors or one tensor and one scalar.
  1134. When the inputs are two tensors, the shapes of them could be broadcast,
  1135. and the data types of them should be same.
  1136. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  1137. and the type of the scalar is the same as the data type of the tensor.
  1138. Inputs:
  1139. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  1140. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as `input_x` or
  1141. a number.
  1142. Outputs:
  1143. Tensor, the shape is same as the shape after broadcasting, and the data type is bool.
  1144. Examples:
  1145. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1146. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  1147. >>> less = P.Less()
  1148. >>> less(input_x, input_y)
  1149. [False, False, True]
  1150. """
  1151. class LessEqual(_LogicBinaryOp):
  1152. """
  1153. Computes the boolean value of :math:`x <= y` element-wise.
  1154. The inputs must be two tensors or one tensor and one scalar.
  1155. When the inputs are two tensors, the shapes of them could be broadcast,
  1156. and the data types of them should be same.
  1157. When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant,
  1158. and the type of the scalar is the same as the data type of the tensor.
  1159. Inputs:
  1160. - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number.
  1161. - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as `input_x` or
  1162. a number.
  1163. Outputs:
  1164. Tensor, the shape is same as the shape after broadcasting, and the data type is bool.
  1165. Examples:
  1166. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1167. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  1168. >>> less_equal = P.LessEqual()
  1169. >>> less_equal(input_x, input_y)
  1170. [True, False, True]
  1171. """
  1172. class LogicalNot(PrimitiveWithInfer):
  1173. """
  1174. Computes the "logical NOT" of a tensor element-wise.
  1175. Inputs:
  1176. - **input_x** (Tensor) - The input tensor whose dtype is bool.
  1177. Outputs:
  1178. Tensor, the shape is same as the `input_x`, and the dtype is bool.
  1179. Examples:
  1180. >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
  1181. >>> logical_not = P.LogicalNot()
  1182. >>> logical_not(input_x)
  1183. [False, True, False]
  1184. """
  1185. @prim_attr_register
  1186. def __init__(self):
  1187. """init LogicalNot"""
  1188. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1189. def infer_shape(self, x_shape):
  1190. return x_shape
  1191. def infer_dtype(self, x_dtype):
  1192. validator.check_tensor_type_same({"x": x_dtype}, [mstype.bool_], self.name)
  1193. return mstype.tensor_type(mstype.bool_)
  1194. class LogicalAnd(_LogicBinaryOp):
  1195. """
  1196. Computes the "logical AND" of two tensors element-wise.
  1197. The inputs must be two tensors or one tensor and one bool object.
  1198. When the inputs are two tensors, the shapes of them could be broadcast,
  1199. and the data types of them should be bool.
  1200. When the inputs are one tensor and one bool object, the bool object cannot be a parameter, only can be a constant,
  1201. and the data type of the tensor should be bool.
  1202. Inputs:
  1203. - **input_x** (Union[Tensor, bool]) - The first input is a tensor whose data type is bool or a bool object.
  1204. - **input_y** (Union[Tensor, bool]) - The second input is a tensor whose data type is bool or a bool object.
  1205. Outputs:
  1206. Tensor, the shape is same as the shape after broadcasting, and the data type is bool.
  1207. Examples:
  1208. >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
  1209. >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_)
  1210. >>> logical_and = P.LogicalAnd()
  1211. >>> logical_and(input_x, input_y)
  1212. [True, False, False]
  1213. """
  1214. def infer_dtype(self, x_dtype, y_dtype):
  1215. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name)
  1216. class LogicalOr(_LogicBinaryOp):
  1217. """
  1218. Computes the "logical OR" of two tensors element-wise.
  1219. The inputs must be two tensors or one tensor and one bool object.
  1220. When the inputs are two tensors, the shapes of them could be broadcast,
  1221. and the data types of them should be bool.
  1222. When the inputs are one tensor and one bool object, the bool object cannot be a parameter, only can be a constant,
  1223. and the data type of the tensor should be bool.
  1224. Inputs:
  1225. - **input_x** (Union[Tensor, bool]) - The first input is a tensor whose data type is bool or a bool object.
  1226. - **input_y** (Union[Tensor, bool]) - The second input is a tensor whose data type is bool or a bool object.
  1227. Outputs:
  1228. Tensor, the shape is same as the shape after broadcasting, and the data type is bool.
  1229. Examples:
  1230. >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
  1231. >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_)
  1232. >>> logical_or = P.LogicalOr()
  1233. >>> logical_or(input_x, input_y)
  1234. [True, True, True]
  1235. """
  1236. def infer_dtype(self, x_dtype, y_dtype):
  1237. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name)
  1238. class IsNan(PrimitiveWithInfer):
  1239. """
  1240. Judging which elements are nan for each position
  1241. Inputs:
  1242. - **input_x** (Tensor) - The input tensor.
  1243. Outputs:
  1244. Tensor, has the same shape of input, and the dtype is bool.
  1245. """
  1246. @prim_attr_register
  1247. def __init__(self):
  1248. """init IsNan"""
  1249. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1250. def infer_shape(self, x_shape):
  1251. return x_shape
  1252. def infer_dtype(self, x_dtype):
  1253. return mstype.bool_
  1254. class IsInf(PrimitiveWithInfer):
  1255. """
  1256. Judging which elements are inf or -inf for each position
  1257. Inputs:
  1258. - **input_x** (Tensor) - The input tensor.
  1259. Outputs:
  1260. Tensor, has the same shape of input, and the dtype is bool.
  1261. """
  1262. @prim_attr_register
  1263. def __init__(self):
  1264. """init IsInf"""
  1265. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1266. def infer_shape(self, x_shape):
  1267. return x_shape
  1268. def infer_dtype(self, x_dtype):
  1269. return mstype.bool_
  1270. class IsFinite(PrimitiveWithInfer):
  1271. """
  1272. Judging which elements are finite for each position
  1273. Inputs:
  1274. - **input_x** (Tensor) - The input tensor.
  1275. Outputs:
  1276. Tensor, has the same shape of input, and the dtype is bool.
  1277. """
  1278. @prim_attr_register
  1279. def __init__(self):
  1280. """init IsFinite"""
  1281. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1282. def infer_shape(self, x_shape):
  1283. return x_shape
  1284. def infer_dtype(self, x_dtype):
  1285. return mstype.bool_
  1286. class FloatStatus(PrimitiveWithInfer):
  1287. """
  1288. Determine if the elements contains nan, inf or -inf. `0` for normal, `1` for overflow.
  1289. Inputs:
  1290. - **input_x** (Tensor) - The input tensor.
  1291. Outputs:
  1292. Tensor, has the shape of `(1,)`, and has the same dtype of input `mindspore.dtype.float32` or
  1293. `mindspore.dtype.float16`.
  1294. """
  1295. @prim_attr_register
  1296. def __init__(self):
  1297. """init FloatStatus"""
  1298. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1299. def infer_shape(self, x_shape):
  1300. return [1]
  1301. def infer_dtype(self, x_dtype):
  1302. return x_dtype
  1303. class NPUAllocFloatStatus(PrimitiveWithInfer):
  1304. """
  1305. Allocates a flag to store the overflow status.
  1306. The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
  1307. Note:
  1308. Examples: see `NPUGetFloatStatus`.
  1309. Outputs:
  1310. Tensor, has the shape of `(8,)`.
  1311. Examples:
  1312. >>> alloc_status = P.NPUAllocFloatStatus()
  1313. >>> init = alloc_status()
  1314. Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32)
  1315. """
  1316. @prim_attr_register
  1317. def __init__(self):
  1318. """init NPUAllocFloatStatus"""
  1319. self.add_prim_attr("_side_effect_flag", True)
  1320. def infer_shape(self):
  1321. return [8]
  1322. def infer_dtype(self):
  1323. return mstype.float32
  1324. class NPUGetFloatStatus(PrimitiveWithInfer):
  1325. """
  1326. Updates the flag which is the output tensor of `NPUAllocFloatStatus` with latest overflow status.
  1327. The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
  1328. If the sum of the flag equals 0, there is no overflow happened. If the sum of the flag is bigger than 0, there
  1329. is overflow happened.
  1330. Inputs:
  1331. - **input_x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
  1332. Outputs:
  1333. Tensor, has the same shape as `input_x`. All the elements in the tensor will be zero.
  1334. Examples:
  1335. >>> alloc_status = P.NPUAllocFloatStatus()
  1336. >>> get_status = P.NPUGetFloatStatus()
  1337. >>> init = alloc_status()
  1338. >>> flag = get_status(init)
  1339. Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32)
  1340. """
  1341. @prim_attr_register
  1342. def __init__(self):
  1343. """init NPUGetFloatStatus"""
  1344. self.add_prim_attr("_side_effect_flag", True)
  1345. def infer_shape(self, x_shape):
  1346. cls_name = self.name
  1347. validator.check_integer("len(x_shape)", len(x_shape), 1, Rel.EQ, cls_name)
  1348. validator.check_integer("x_shape[0]", x_shape[0], 8, Rel.EQ, cls_name)
  1349. return [8]
  1350. def infer_dtype(self, x_dtype):
  1351. validator.check_tensor_type_same({'x': x_dtype}, [mstype.float32], self.name)
  1352. return mstype.float32
  1353. class NPUClearFloatStatus(PrimitiveWithInfer):
  1354. """
  1355. Clear the flag which stores the overflow status.
  1356. Note:
  1357. The flag is in the register on the `Ascend` device. It will be reset and can not be reused again after the
  1358. `NPUClearFloatStatus` is called.
  1359. Examples: see `NPUGetFloatStatus`.
  1360. Inputs:
  1361. - **input_x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
  1362. Outputs:
  1363. Tensor, has the same shape as `input_x`. All the elements in the tensor will be zero.
  1364. Examples:
  1365. >>> alloc_status = P.NPUAllocFloatStatus()
  1366. >>> get_status = P.NPUGetFloatStatus()
  1367. >>> clear_status = P.NPUClearFloatStatus()
  1368. >>> init = alloc_status()
  1369. >>> flag = get_status(init)
  1370. >>> clear = clear_status(init)
  1371. Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32)
  1372. """
  1373. @prim_attr_register
  1374. def __init__(self):
  1375. """init NPUClearFloatStatus"""
  1376. self.add_prim_attr("_side_effect_flag", True)
  1377. def infer_shape(self, x_shape):
  1378. cls_name = self.name
  1379. validator.check_integer("len(x_shape)", len(x_shape), 1, Rel.EQ, cls_name)
  1380. validator.check_integer("x_shape[0]", x_shape[0], 8, Rel.EQ, cls_name)
  1381. return [8]
  1382. def infer_dtype(self, x_dtype):
  1383. validator.check_tensor_type_same({'x': x_dtype}, [mstype.float32], self.name)
  1384. return mstype.float32
  1385. class Cos(PrimitiveWithInfer):
  1386. """
  1387. Computes cosine of input element-wise.
  1388. Inputs:
  1389. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1390. Outputs:
  1391. Tensor, has the same shape as `input_x`.
  1392. Examples:
  1393. >>> cos = P.Cos()
  1394. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  1395. >>> output = cos(input_x)
  1396. """
  1397. @prim_attr_register
  1398. def __init__(self):
  1399. """init Cos"""
  1400. def infer_shape(self, x):
  1401. return x
  1402. def infer_dtype(self, x):
  1403. validator.check_tensor_type_same({'x': x}, mstype.number_type, self.name)
  1404. return x
  1405. class ACos(PrimitiveWithInfer):
  1406. """
  1407. Computes arccosine of input element-wise.
  1408. Inputs:
  1409. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1410. Outputs:
  1411. Tensor, has the same shape as `input_x`.
  1412. Examples:
  1413. >>> acos = P.ACos()
  1414. >>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
  1415. >>> output = acos(input_x)
  1416. """
  1417. @prim_attr_register
  1418. def __init__(self):
  1419. """init ACos"""
  1420. def infer_shape(self, x):
  1421. return x
  1422. def infer_dtype(self, x):
  1423. validator.check_tensor_type_same({'x': x}, mstype.number_type, self.name)
  1424. return x
  1425. class Sin(PrimitiveWithInfer):
  1426. """
  1427. Computes sine of input element-wise.
  1428. Inputs:
  1429. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1430. Outputs:
  1431. Tensor, has the same shape as `input_x`.
  1432. Examples:
  1433. >>> sin = P.Sin()
  1434. >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
  1435. >>> output = sin(input_x)
  1436. """
  1437. @prim_attr_register
  1438. def __init__(self):
  1439. """Init Sin."""
  1440. def infer_shape(self, x):
  1441. return x
  1442. def infer_dtype(self, x):
  1443. validator.check_tensor_type_same({'x': x}, mstype.number_type, self.name)
  1444. return x
  1445. class NMSWithMask(PrimitiveWithInfer):
  1446. """
  1447. Select some bounding boxes in descending order of score.
  1448. Args:
  1449. iou_threshold (float): Specifies the threshold of overlap boxes with respect to
  1450. IOU. Default: 0.5.
  1451. Raises:
  1452. ValueError: If the iou_threshold is not a float number, or if the first dimension
  1453. of input Tensor is less than or equal to 0, or if the data type of the input
  1454. Tensor is not float16 or float32.
  1455. Inputs:
  1456. - **bboxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. Input bounding boxes.
  1457. `N` is the number of input bounding boxes. Every bounding box
  1458. contains 5 values, the first 4 values are the coordinates of bounding
  1459. box, and the last value is the score of this bounding box.
  1460. Outputs:
  1461. tuple[Tensor], tuple of three tensors, they are selected_boxes, selected_idx and selected_mask.
  1462. - **selected_boxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. Bounding boxes
  1463. list after non-max suppression calculation.
  1464. - **selected_idx** (Tensor) - The shape of tensor is :math:`(N,)`. The indexes list of
  1465. valid input bounding boxes.
  1466. - **selected_mask** (Tensor) - The shape of tensor is :math:`(N,)`. A mask list of
  1467. valid output bounding boxes.
  1468. Examples:
  1469. >>> bbox = np.random.rand(128, 5)
  1470. >>> bbox[:, 2] += bbox[:, 0]
  1471. >>> bbox[:, 3] += bbox[:, 1]
  1472. >>> inputs = Tensor(bbox, mindspore.float32)
  1473. >>> nms = P.NMSWithMask(0.5)
  1474. >>> output_boxes, indices, mask = nms(inputs)
  1475. """
  1476. @prim_attr_register
  1477. def __init__(self, iou_threshold=0.5):
  1478. """Init NMSWithMask"""
  1479. validator.check_value_type("iou_threshold", iou_threshold, [float], self.name)
  1480. self.init_prim_io_names(inputs=['bboxes'], outputs=['selected_boxes', 'selected_idx', 'selected_mask'])
  1481. def infer_shape(self, bboxes_shape):
  1482. cls_name = self.name
  1483. validator.check_integer("bboxes rank", len(bboxes_shape), 2, Rel.EQ, cls_name)
  1484. validator.check_integer("bboxes.shape()[0]", bboxes_shape[0], 0, Rel.GT, cls_name)
  1485. validator.check_integer("bboxes.shape()[1]", bboxes_shape[1], 5, Rel.EQ, cls_name)
  1486. num = bboxes_shape[0]
  1487. return (bboxes_shape, (num,), (num,))
  1488. def infer_dtype(self, bboxes_dtype):
  1489. validator.check_tensor_type_same({"bboxes": bboxes_dtype}, [mstype.float16, mstype.float32], self.name)
  1490. return (bboxes_dtype, mstype.int32, mstype.bool_)
  1491. class Abs(PrimitiveWithInfer):
  1492. """
  1493. Returns absolute value of a tensor element-wise.
  1494. Inputs:
  1495. - **input_x** (Tensor) - The input tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1496. Outputs:
  1497. Tensor, has the same shape as the `input_x`.
  1498. Examples:
  1499. >>> input_x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
  1500. >>> abs = P.Abs()
  1501. >>> abs(input_x)
  1502. [1.0, 1.0, 0.0]
  1503. """
  1504. @prim_attr_register
  1505. def __init__(self):
  1506. """init Abs"""
  1507. def infer_shape(self, x_shape):
  1508. return x_shape
  1509. def infer_dtype(self, x_type):
  1510. validator.check_tensor_type_same({'x': x_type}, mstype.number_type, self.name)
  1511. return x_type
  1512. def infer_value(self, x):
  1513. if x is not None:
  1514. x = x.asnumpy()
  1515. out = np.abs(x, dtype=x.dtype)
  1516. return Tensor(out)
  1517. return None
  1518. class Sign(PrimitiveWithInfer):
  1519. r"""
  1520. Perform :math:`sign` on tensor element-wise.
  1521. Note:
  1522. .. math::
  1523. sign(x) = \begin{cases} -1, &if\ x < 0 \cr
  1524. 0, &if\ x == 0 \cr
  1525. 1, &if\ x > 0\end{cases}
  1526. Inputs:
  1527. - **input_x** (Tensor) - The input tensor.
  1528. Outputs:
  1529. Tensor, has the same shape and type as the `input_x`.
  1530. Examples:
  1531. >>> input_x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
  1532. >>> sign = P.Sign()
  1533. >>> output = sign(input_x)
  1534. [[1.0, 0.0, -1.0]]
  1535. """
  1536. @prim_attr_register
  1537. def __init__(self):
  1538. pass
  1539. def infer_shape(self, x_shape):
  1540. return x_shape
  1541. def infer_dtype(self, x_dtype):
  1542. validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.name)
  1543. return x_dtype
  1544. class Round(PrimitiveWithInfer):
  1545. """
  1546. Returns half to even of a tensor element-wise.
  1547. Inputs:
  1548. - **input_x** (Tensor) - The input tensor.
  1549. Outputs:
  1550. Tensor, has the same shape and type as the `input_x`.
  1551. Examples:
  1552. >>> input_x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
  1553. >>> round = P.Round()
  1554. >>> round(input_x)
  1555. [1.0, 2.0, 2.0, 2.0, -4.0]
  1556. """
  1557. @prim_attr_register
  1558. def __init__(self):
  1559. pass
  1560. def infer_shape(self, x_shape):
  1561. return x_shape
  1562. def infer_dtype(self, x_type):
  1563. validator.check_tensor_type_same({'x': x_type}, mstype.number_type, self.name)
  1564. return x_type
  1565. class Atan2(_MathBinaryOp):
  1566. r"""
  1567. Returns arctangent of input_x/input_y element-wise.
  1568. It returns :math:`\theta\ \in\ (-\frac{\pi}{2}, \frac{\pi}{2})`
  1569. such that :math:`x = r*\sin(\theta), y = r*\cos(\theta)`, where :math:`r = \sqrt{x^2 + y^2}`.
  1570. Inputs:
  1571. - **input_x** (Tensor) - The input tensor.
  1572. - **input_y** (Tensor) - The input tensor.
  1573. Outputs:
  1574. Tensor, the shape is same as the shape after broadcasting, and the data type is same as `input_x`.
  1575. Examples:
  1576. >>> input_x = Tensor(np.array([[0, 1]]), mindspore.float32)
  1577. >>> input_y = Tensor(np.array([[1, 1]]), mindspore.float32)
  1578. >>> atan2 = P.Atan2()
  1579. >>> atan2(input_x, input_y)
  1580. [[0. 0.7853982]]
  1581. """