You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math_ops.py 125 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Operators for math."""
  16. import copy
  17. import numpy as np
  18. from ... import context
  19. from .. import signature as sig
  20. from ..._checkparam import Validator as validator
  21. from ..._checkparam import Rel
  22. from ...common import dtype as mstype
  23. from ...common.tensor import Tensor
  24. from .._utils import get_broadcast_shape
  25. from ..primitive import PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
  26. def _infer_shape_reduce(x, axis, keep_dims, prim_name):
  27. """Common infer for reduce operator"""
  28. def reduce_one_axis(one_axis):
  29. validator.check_int_range('axis', one_axis, -dim, dim, Rel.INC_LEFT, prim_name)
  30. if one_axis < 0:
  31. one_axis += dim
  32. axis_reduce.add(one_axis)
  33. validator.check_value_type('axis', axis, [int, tuple, list], prim_name)
  34. dim = len(x)
  35. axis_reduce = set()
  36. if isinstance(axis, int):
  37. reduce_one_axis(axis)
  38. else:
  39. if not axis:
  40. if keep_dims:
  41. return [1] * dim
  42. return []
  43. for index, one_axis in enumerate(axis):
  44. validator.check_value_type('axis[%d]' % index, one_axis, [int], prim_name)
  45. reduce_one_axis(one_axis)
  46. out_shape = []
  47. for i in range(dim):
  48. if i in axis_reduce:
  49. if keep_dims:
  50. out_shape.append(1)
  51. else:
  52. out_shape.append(x[i])
  53. return out_shape
  54. class _BinaryOp(PrimitiveWithInfer):
  55. """
  56. Define binary operators.
  57. """
  58. __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
  59. @prim_attr_register
  60. def __init__(self):
  61. """init _BinaryOp"""
  62. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  63. def infer_shape(self, x_shape, y_shape):
  64. return get_broadcast_shape(x_shape, y_shape, self.name)
  65. class _MathBinaryOp(_BinaryOp):
  66. """
  67. Define math binary operators.
  68. """
  69. @staticmethod
  70. def do_infer_dtype(x_dtype, y_dtype, valid_dtype=mstype.number_type, prim_name=None):
  71. args_type = {"x": x_dtype, "y": y_dtype}
  72. validator.check_tensor_type_same(args_type, valid_dtype, prim_name)
  73. return x_dtype
  74. def infer_dtype(self, x_dtype, y_dtype):
  75. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.name)
  76. class _BitwiseBinaryOp(_MathBinaryOp):
  77. """
  78. Define bitwise binary operators.
  79. """
  80. @prim_attr_register
  81. def __init__(self):
  82. """init _BitwiseBinaryOp"""
  83. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
  84. @staticmethod
  85. def _check_bitwise_op_input_type(x1_type, x2_type, prim):
  86. args = {'x1': x1_type, 'x2': x2_type}
  87. valid_types = mstype.int_type + mstype.uint_type
  88. validator.check_tensor_type_same(args, valid_types, prim)
  89. return x1_type
  90. def infer_dtype(self, x1_type, x2_type):
  91. return _BitwiseBinaryOp._check_bitwise_op_input_type(x1_type, x2_type, self.name)
  92. class TensorAdd(_MathBinaryOp):
  93. """
  94. Adds two input tensors element-wise.
  95. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  96. The inputs must be two tensors or one tensor and one scalar.
  97. When the inputs are two tensors,
  98. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  99. When the inputs are one tensor and one scalar,
  100. the scalar could only be a constant.
  101. Inputs:
  102. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  103. a bool or a tensor whose data type is number or bool.
  104. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  105. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  106. Outputs:
  107. Tensor, the shape is the same as the one after broadcasting,
  108. and the data type is the one with high precision or high digits among the two inputs.
  109. Examples:
  110. >>> add = P.TensorAdd()
  111. >>> input_x = Tensor(np.array([1,2,3]).astype(np.float32))
  112. >>> input_y = Tensor(np.array([4,5,6]).astype(np.float32))
  113. >>> add(input_x, input_y)
  114. [5,7,9]
  115. """
  116. def infer_value(self, x, y):
  117. if x is not None and y is not None:
  118. x = x.asnumpy()
  119. y = y.asnumpy()
  120. out = x + y
  121. out = np.array(out, x.dtype)
  122. return Tensor(out)
  123. return None
  124. class AssignAdd(PrimitiveWithInfer):
  125. """
  126. Updates a `Parameter` by adding a value to it.
  127. Inputs of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
  128. If they have different data types, lower priority data type will be converted to
  129. relatively highest priority data type.
  130. If `value` is a number, the number is automatically converted to Tensor,
  131. and the data type is consistent with the Tensor data type involved in the operation.
  132. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  133. Inputs:
  134. - **variable** (Parameter) - The `Parameter`.
  135. - **value** (Union[numbers.Number, Tensor]) - The value to be added to the `variable`.
  136. It should have the same shape as `variable` if it is a Tensor.
  137. Examples:
  138. >>> class Net(Cell):
  139. >>> def __init__(self):
  140. >>> super(Net, self).__init__()
  141. >>> self.AssignAdd = P.AssignAdd()
  142. >>> self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
  143. >>>
  144. >>> def construct(self, x):
  145. >>> self.AssignAdd(self.variable, x)
  146. >>> return self.variable
  147. >>>
  148. >>> net = Net()
  149. >>> value = Tensor(np.ones([1]).astype(np.int64)*100)
  150. >>> net(value)
  151. """
  152. __mindspore_signature__ = (
  153. sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
  154. sig.make_sig('value', dtype=sig.sig_dtype.T)
  155. )
  156. @prim_attr_register
  157. def __init__(self):
  158. """init AssignAdd"""
  159. self.init_prim_io_names(inputs=['ref', 'value'], outputs=['output'])
  160. def infer_shape(self, variable, value):
  161. return value
  162. def infer_dtype(self, variable, value):
  163. args = {"variable": variable, "value": value}
  164. validator.check_scalar_or_tensor_type_same(args, mstype.number_type, self.name)
  165. return value
  166. class AssignSub(PrimitiveWithInfer):
  167. """
  168. Updates a `Parameter` by subtracting a value from it.
  169. Inputs of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
  170. If they have different data types, lower priority data type will be converted to
  171. relatively highest priority data type.
  172. If `value` is a number, the number is automatically converted to Tensor,
  173. and the data type is consistent with the Tensor data type involved in the operation.
  174. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  175. Inputs:
  176. - **variable** (Parameter) - The `Parameter`.
  177. - **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`.
  178. It should have the same shape as `variable` if it is a Tensor.
  179. Examples:
  180. >>> class Net(Cell):
  181. >>> def __init__(self):
  182. >>> super(Net, self).__init__()
  183. >>> self.AssignSub = P.AssignSub()
  184. >>> self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
  185. >>>
  186. >>> def construct(self, x):
  187. >>> self.AssignSub(self.variable, x)
  188. >>> return self.variable
  189. >>>
  190. >>> net = Net()
  191. >>> value = Tensor(np.ones([1]).astype(np.int32)*100)
  192. >>> net(value)
  193. """
  194. __mindspore_signature__ = (
  195. sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
  196. sig.make_sig('value', dtype=sig.sig_dtype.T)
  197. )
  198. @prim_attr_register
  199. def __init__(self):
  200. """init AssignSub"""
  201. def infer_shape(self, variable, value):
  202. return value
  203. def infer_dtype(self, variable, value):
  204. args = {"variable": variable, "value": value}
  205. validator.check_scalar_or_tensor_type_same(args, mstype.number_type, self.name)
  206. return value
  207. class _Reduce(PrimitiveWithInfer):
  208. """
  209. Definition of base class of reduction class operators.
  210. Args:
  211. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  212. If False, don't keep these dimensions.
  213. """
  214. __mindspore_signature__ = (
  215. sig.make_sig('input_x'),
  216. sig.make_sig('axis', default=())
  217. )
  218. @prim_attr_register
  219. def __init__(self, keep_dims=False):
  220. """init Reduce"""
  221. validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
  222. self.init_prim_io_names(inputs=['input_x', 'axis'], outputs=['y'])
  223. self.add_prim_attr("io_format", "ND")
  224. def __call__(self, x, axis=()):
  225. args = [x, axis]
  226. output = _run_op(self, self.name, args)
  227. return output
  228. def do_infer(self, input_x, axis, valid_dtype=mstype.number_type):
  229. """ return meta infos of input parameters """
  230. axis_v = axis['value']
  231. input_shp = input_x['shape']
  232. args = {'input_x': input_x['dtype']}
  233. validator.check_tensor_type_same(args, valid_dtype, self.name)
  234. if axis_v is None:
  235. raise ValueError(f"For {self.name}, axis must be const.")
  236. input_shp = _infer_shape_reduce(input_shp, axis_v, self.keep_dims, self.name)
  237. value = None
  238. if input_x['value'] is not None:
  239. prim_map = {
  240. 'ReduceSum': np.sum,
  241. 'ReduceMax': np.max,
  242. 'ReduceMin': np.min,
  243. }
  244. np_reduce_func = prim_map.get(self.name, None)
  245. if np_reduce_func is not None:
  246. value = input_x['value'].asnumpy()
  247. if not axis_v:
  248. axis_v = [i for i in range(len(input_x['shape']))]
  249. axis_v = tuple(axis_v)
  250. value = np_reduce_func(value, axis_v, keepdims=self.keep_dims)
  251. value = np.array(value)
  252. value = Tensor(value)
  253. return {'shape': input_shp,
  254. 'dtype': input_x['dtype'],
  255. 'value': value}
  256. def __infer__(self, input_x, axis):
  257. return self.do_infer(input_x, axis)
  258. class ReduceMean(_Reduce):
  259. """
  260. Reduce a dimension of a tensor by averaging all elements in the dimension.
  261. The dtype of the tensor to be reduced is number.
  262. Args:
  263. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  264. If False, don't keep these dimensions. Default : False.
  265. Inputs:
  266. - **input_x** (Tensor[Number]) - The input tensor.
  267. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  268. Only constant value is allowed.
  269. Outputs:
  270. Tensor, has the same dtype as the 'input_x'.
  271. - If axis is (), and keep_dims is false,
  272. the output is a 0-D tensor representing the mean of all elements in the input tensor.
  273. - If axis is int, set as 2, and keep_dims is false,
  274. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  275. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  276. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  277. Examples:
  278. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  279. >>> op = P.ReduceMean(keep_dims=True)
  280. >>> output = op(input_x, 1)
  281. """
  282. class ReduceSum(_Reduce):
  283. """
  284. Reduce a dimension of a tensor by summing all elements in the dimension.
  285. The dtype of the tensor to be reduced is number.
  286. Args:
  287. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  288. If False, don't keep these dimensions. Default : False.
  289. Inputs:
  290. - **input_x** (Tensor[Number]) - The input tensor.
  291. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  292. Only constant value is allowed.
  293. Outputs:
  294. Tensor, has the same dtype as the 'input_x'.
  295. - If axis is (), and keep_dims is false,
  296. the output is a 0-D tensor representing the sum of all elements in the input tensor.
  297. - If axis is int, set as 2, and keep_dims is false,
  298. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  299. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  300. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  301. Examples:
  302. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  303. >>> op = P.ReduceSum(keep_dims=True)
  304. >>> output = op(input_x, 1)
  305. """
  306. @prim_attr_register
  307. def __init__(self, keep_dims=False):
  308. """init ReduceSum"""
  309. super(ReduceSum, self).__init__(keep_dims)
  310. self.__setattr_flag__ = True
  311. class ReduceAll(_Reduce):
  312. """
  313. Reduce a dimension of a tensor by the "logical and" of all elements in the dimension.
  314. The dtype of the tensor to be reduced is bool.
  315. Args:
  316. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  317. If False, don't keep these dimensions.
  318. Default : False, don't keep these reduced dimensions.
  319. Inputs:
  320. - **input_x** (Tensor[bool]) - The input tensor.
  321. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  322. Only constant value is allowed.
  323. Outputs:
  324. Tensor, the dtype is bool.
  325. - If axis is (), and keep_dims is false,
  326. the output is a 0-D tensor representing the "logical and" of of all elements in the input tensor.
  327. - If axis is int, set as 2, and keep_dims is false,
  328. and keep_dims is false, the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  329. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  330. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  331. Examples:
  332. >>> input_x = Tensor(np.array([[True, False], [True, True]]))
  333. >>> op = P.ReduceAll(keep_dims=True)
  334. >>> output = op(input_x, 1)
  335. """
  336. def __infer__(self, input_x, axis):
  337. return self.do_infer(input_x, axis, (mstype.bool_,))
  338. class ReduceAny(_Reduce):
  339. """
  340. Reduce a dimension of a tensor by the "logical or" of all elements in the dimension.
  341. The dtype of the tensor to be reduced is bool.
  342. Args:
  343. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  344. If False, don't keep these dimensions.
  345. Default : False, don't keep these reduced dimensions.
  346. Inputs:
  347. - **input_x** (Tensor[bool]) - The input tensor.
  348. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  349. Only constant value is allowed.
  350. Outputs:
  351. Tensor, the dtype is bool.
  352. - If axis is (), and keep_dims is false,
  353. the output is a 0-D tensor representing the "logical or" of of all elements in the input tensor.
  354. - If axis is int, set as 2, and keep_dims is false,
  355. and keep_dims is false, the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  356. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  357. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  358. Examples:
  359. >>> input_x = Tensor(np.array([[True, False], [True, True]]))
  360. >>> op = P.ReduceAny(keep_dims=True)
  361. >>> output = op(input_x, 1)
  362. """
  363. def __infer__(self, input_x, axis):
  364. return self.do_infer(input_x, axis, (mstype.bool_,))
  365. class ReduceMax(_Reduce):
  366. """
  367. Reduce a dimension of a tensor by the maximum value in this dimension.
  368. The dtype of the tensor to be reduced is number.
  369. Args:
  370. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  371. If False, don't keep these dimensions.
  372. Default : False, don't keep these reduced dimensions.
  373. Inputs:
  374. - **input_x** (Tensor[Number]) - The input tensor.
  375. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  376. Only constant value is allowed.
  377. Outputs:
  378. Tensor, has the same dtype as the 'input_x'.
  379. - If axis is (), and keep_dims is false,
  380. the output is a 0-D tensor representing the maximum of all elements in the input tensor.
  381. - If axis is int, set as 2, and keep_dims is false,
  382. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  383. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  384. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  385. Examples:
  386. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  387. >>> op = P.ReduceMax(keep_dims=True)
  388. >>> output = op(input_x, 1)
  389. """
  390. @prim_attr_register
  391. def __init__(self, keep_dims=False):
  392. """ReduceMax"""
  393. super(ReduceMax, self).__init__(keep_dims)
  394. self.__setattr_flag__ = True
  395. class ReduceMin(_Reduce):
  396. """
  397. Reduce a dimension of a tensor by the minimum value in the dimension.
  398. The dtype of the tensor to be reduced is number.
  399. Args:
  400. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  401. If False, don't keep these dimensions.
  402. Default : False, don't keep these reduced dimensions.
  403. Inputs:
  404. - **input_x** (Tensor[Number]) - The input tensor.
  405. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  406. Only constant value is allowed.
  407. Outputs:
  408. Tensor, has the same dtype as the 'input_x'.
  409. - If axis is (), and keep_dims is false,
  410. the output is a 0-D tensor representing the minimum of all elements in the input tensor.
  411. - If axis is int, set as 2, and keep_dims is false,
  412. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  413. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  414. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  415. Examples:
  416. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  417. >>> op = P.ReduceMin(keep_dims=True)
  418. >>> output = op(input_x, 1)
  419. """
  420. class ReduceProd(_Reduce):
  421. """
  422. Reduce a dimension of a tensor by multiplying all elements in the dimension.
  423. The dtype of the tensor to be reduced is number.
  424. Args:
  425. keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
  426. If False, don't keep these dimensions.
  427. Default : False, don't keep these reduced dimensions.
  428. Inputs:
  429. - **input_x** (Tensor[Number]) - The input tensor.
  430. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  431. Only constant value is allowed.
  432. Outputs:
  433. Tensor, has the same dtype as the 'input_x'.
  434. - If axis is (), and keep_dims is false,
  435. the output is a 0-D tensor representing the product of all elements in the input tensor.
  436. - If axis is int, set as 2, and keep_dims is false,
  437. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  438. - If axis is tuple(int), set as (2, 3), and keep_dims is false,
  439. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  440. Examples:
  441. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  442. >>> op = P.ReduceProd(keep_dims=True)
  443. >>> output = op(input_x, 1)
  444. """
  445. class CumProd(PrimitiveWithInfer):
  446. """
  447. Compute the cumulative product of the tensor x along axis.
  448. Args:
  449. exclusive (bool): If True, perform exclusive cumulative product. Default: False.
  450. reverse (bool): If True, reverse the result along axis. Default: False
  451. Inputs:
  452. - **input_x** (Tensor[Number]) - The input tensor.
  453. - **axis** (int) - The dimensions to compute the cumulative product.
  454. Only constant value is allowed.
  455. Outputs:
  456. Tensor, has the same shape and dtype as the 'input_x'.
  457. Examples:
  458. >>> input_x = Tensor(np.array([a, b, c]).astype(np.float32))
  459. >>> op0 = P.CumProd()
  460. >>> output = op0(input_x, 0) # output=[a, a * b, a * b * c]
  461. >>> op1 = P.CumProd(exclusive=True)
  462. >>> output = op1(input_x, 0) # output=[1, a, a * b]
  463. >>> op2 = P.CumProd(reverse=True)
  464. >>> output = op2(input_x, 0) # output=[a * b * c, b * c, c]
  465. >>> op3 = P.CumProd(exclusive=True, reverse=True)
  466. >>> output = op3(input_x, 0) # output=[b * c, c, 1]
  467. """
  468. @prim_attr_register
  469. def __init__(self, exclusive=False, reverse=False):
  470. cls_name = self.name
  471. self.exclusive = validator.check_value_type("exclusive", exclusive, [bool], cls_name)
  472. self.reverse = validator.check_value_type("reverse", reverse, [bool], cls_name)
  473. self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
  474. def infer_shape(self, x_shape, axis_shape):
  475. return x_shape
  476. def infer_dtype(self, x_type, axis_type):
  477. cls_name = self.name
  478. validator.check_tensor_type_same({'x': x_type}, mstype.number_type, cls_name)
  479. validator.check_subclass("axis", axis_type, mstype.int_, cls_name)
  480. return x_type
  481. def infer_value(self, x, axis):
  482. if axis is None:
  483. raise ValueError(f"For {self.name}, axis must be const.")
  484. class MatMul(PrimitiveWithInfer):
  485. """
  486. Multiplies matrix `a` by matrix `b`.
  487. The rank of input tensors must be `2`.
  488. Args:
  489. transpose_a (bool): If True, `a` is transposed before multiplication. Default: False.
  490. transpose_b (bool): If True, `b` is transposed before multiplication. Default: False.
  491. Inputs:
  492. - **input_x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If
  493. `transpose_a` is True, its shape should be :math:`(N, C)` after transposing.
  494. - **input_y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If
  495. `transpose_b` is True, its shape should be :math:`(C, M)` after transpose.
  496. Outputs:
  497. Tensor, the shape of the output tensor is :math:`(N, M)`.
  498. Examples:
  499. >>> input_x = Tensor(np.ones(shape=[1, 3]), mindspore.float32)
  500. >>> input_y = Tensor(np.ones(shape=[3, 4]), mindspore.float32)
  501. >>> matmul = P.MatMul()
  502. >>> output = matmul(input_x, input_y)
  503. """
  504. @prim_attr_register
  505. def __init__(self, transpose_a=False, transpose_b=False):
  506. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
  507. cls_name = self.name
  508. validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
  509. validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
  510. self.add_prim_attr("io_format", "ND")
  511. def check_shape_size(self, x, y):
  512. if len(x) != 2 or len(y) != 2:
  513. raise ValueError('MatMul input x, y should be the same dimension size and should be '
  514. + f'equal to 2, while x size = {len(x)}, y size= {len(y)}')
  515. def infer_shape(self, x, y, bias=None):
  516. self.check_shape_size(x, y)
  517. cls_name = self.name
  518. # expected dimension of x, y, x:[...,a,b] y:[..., c,d], the dim size should be the same except the last two
  519. for i in range(len(x) - 2):
  520. if x[i] != y[i]:
  521. raise ValueError(f'For \'{cls_name}\' shape in dim[{i}] not the same, while x is {x[i]}, y is {y[i]}')
  522. # validate whether last two dims satifing matrix multiply
  523. x_last = x[-2:]
  524. y_last = y[-2:]
  525. x_col = x_last[not self.transpose_a] # x_col = x_last[1] if (not transpose_a) else x_last[0]
  526. y_row = y_last[self.transpose_b] # y_row = y_last[0] if (not transpose_b) else y_last[1]
  527. if x_col != y_row:
  528. raise ValueError(f'For \'{cls_name}\' evaluator shapes of inputs can not do this operator,'
  529. + f' got {x_col} and {y_row}, with x shape {x}(transpose_a={self.transpose_a})'
  530. + f', y shape {y}(transpose_b={self.transpose_b}).')
  531. # set attribute
  532. self.add_prim_attr('transpose_x1', self.transpose_a)
  533. self.add_prim_attr('transpose_x2', self.transpose_b)
  534. ret_dims = x[: -2] + [x_last[self.transpose_a], y_last[not self.transpose_b]]
  535. return ret_dims
  536. def infer_dtype(self, x, y, bias=None):
  537. args = {"x": x, "y": y}
  538. validator.check_tensor_type_same(args, mstype.float_type + mstype.int_type, self.name)
  539. if x.element_type() == mstype.int8:
  540. return mstype.tensor_type(mstype.int32)
  541. return x
  542. class BatchMatMul(MatMul):
  543. """
  544. Computes matrix multiplication between two tensors by batch
  545. `result[..., :, :] = tensor(a[..., :, :]) * tensor(b[..., :, :])`.
  546. The two input tensors must have the same rank and the rank must be not less than `3`.
  547. Args:
  548. transpose_a (bool): If True, the last two dimensions of `a` is transposed before multiplication.
  549. Default: False.
  550. transpose_b (bool): If True, the last two dimensions of `b` is transposed before multiplication.
  551. Default: False.
  552. Inputs:
  553. - **input_x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(*B, N, C)`,
  554. where :math:`*B` represents the batch size which can be multidimensional, :math:`N` and :math:`C` are the
  555. size of the last two dimensions. If `transpose_a` is True, its shape should be :math:`(*B, C, N)`.
  556. - **input_y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(*B, C, M)`. If
  557. `transpose_b` is True, its shape should be :math:`(*B, M, C)`.
  558. Outputs:
  559. Tensor, the shape of the output tensor is :math:`(*B, N, M)`.
  560. Examples:
  561. >>> input_x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32)
  562. >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
  563. >>> batmatmul = P.BatchMatMul()
  564. >>> output = batmatmul(input_x, input_y)
  565. >>>
  566. >>> input_x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
  567. >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
  568. >>> batmatmul = P.BatchMatMul(transpose_a=True)
  569. >>> output = batmatmul(input_x, input_y)
  570. """
  571. @prim_attr_register
  572. def __init__(self, transpose_a=False, transpose_b=False):
  573. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
  574. cls_name = self.name
  575. validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
  576. validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
  577. def check_shape_size(self, x, y):
  578. if len(x) != len(y) or len(x) < 3:
  579. raise ValueError('For \'BatchMatMul\' input x, y should be the same dimension size and should be '
  580. 'greater or equal to 3,' + f' while x size = {len(x)}, y size= {len(y)}')
  581. class CumSum(PrimitiveWithInfer):
  582. """
  583. Computes the cumulative sum of input tensor along axis.
  584. Args:
  585. exclusive (bool): If True, perform exclusive mode. Default: False.
  586. reverse (bool): If True, perform inverse cumulative sum. Default: False.
  587. Inputs:
  588. - **input** (Tensor) - The input tensor to accumulate.
  589. - **axis** (int) - The axis to accumulate the tensor's value. Only constant value is allowed.
  590. Must be in the range [-rank(input), rank(input)).
  591. Outputs:
  592. Tensor, the shape of the output tensor is consistent with the input tensor's.
  593. Examples:
  594. >>> input = Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float32))
  595. >>> cumsum = P.CumSum()
  596. >>> output = cumsum(input, 1)
  597. [[ 3. 7. 13. 23.]
  598. [ 1. 7. 14. 23.]
  599. [ 4. 7. 15. 22.]
  600. [ 1. 4. 11. 20.]]
  601. """
  602. @prim_attr_register
  603. def __init__(self, exclusive=False, reverse=False):
  604. """init cumsum"""
  605. cls_name = self.name
  606. validator.check_value_type('exclusive', exclusive, [bool], cls_name)
  607. validator.check_value_type('reverse', reverse, [bool], cls_name)
  608. self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
  609. def __infer__(self, x, axis):
  610. cls_name = self.name
  611. x_shp = x['shape']
  612. if axis['value'] is None:
  613. raise ValueError(f"For {self.name}, axis must be const.")
  614. validator.check_value_type('axis', axis['value'], [int], cls_name)
  615. valid_types = [mstype.uint8, mstype.int8, mstype.int32, mstype.float16, mstype.float32]
  616. validator.check_tensor_type_same({'x': x['dtype']}, valid_types, cls_name)
  617. return {'shape': x_shp,
  618. 'dtype': x['dtype'],
  619. 'value': None}
  620. class AddN(PrimitiveWithInfer):
  621. """
  622. Computes addition of all input tensors element-wise.
  623. All input tensors should have the same shape.
  624. Inputs:
  625. - **input_x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
  626. is made up of multiple tensors whose dtype is number or bool to be added together.
  627. Outputs:
  628. Tensor, has the same shape and dtype as each entry of the `input_x`.
  629. Examples:
  630. >>> class NetAddN(nn.Cell):
  631. >>> def __init__(self):
  632. >>> super(NetAddN, self).__init__()
  633. >>> self.addN = P.AddN()
  634. >>>
  635. >>> def construct(self, *z):
  636. >>> return self.addN(z)
  637. >>>
  638. >>> net = NetAddN()
  639. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  640. >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.float32)
  641. >>> net(input_x, input_y, input_x, input_y)
  642. [10.0, 14.0, 18.0]
  643. """
  644. @prim_attr_register
  645. def __init__(self):
  646. self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
  647. def check_elim(self, inputs):
  648. if len(inputs) != 1:
  649. return (False, None)
  650. if isinstance(inputs[0], Tensor):
  651. return (True, inputs[0])
  652. raise TypeError("Expecting Tensor, got : {}".format(type(inputs[0])))
  653. def infer_shape(self, inputs):
  654. cls_name = self.name
  655. validator.check_integer("inputs", len(inputs), 1, Rel.GE, cls_name)
  656. self.add_prim_attr('n', len(inputs))
  657. shp0 = inputs[0]
  658. for i, shp in enumerate(inputs):
  659. validator.check(f"shape of inputs[{i}]", shp, 'shape of inputs[0]', shp0, Rel.EQ, cls_name)
  660. return shp0
  661. def infer_dtype(self, inputs):
  662. cls_name = self.name
  663. validator.check_value_type("inputs", inputs, [tuple, list], cls_name)
  664. validator.check_integer("inputs", len(inputs), 1, Rel.GE, cls_name)
  665. args = {}
  666. contains_undetermined = False
  667. for i, dtype in enumerate(inputs):
  668. args[f"inputs[{i}]"] = dtype
  669. if dtype == mstype.undetermined:
  670. contains_undetermined = True
  671. if not contains_undetermined:
  672. validator.check_tensor_type_same(args, mstype.number_type + (mstype.bool_,), cls_name)
  673. return inputs[0]
  674. def infer_value(self, inputs):
  675. if inputs is None:
  676. return None
  677. for x in inputs:
  678. if x is None:
  679. return None
  680. added = copy.deepcopy(inputs[0].asnumpy())
  681. for x in inputs[1:]:
  682. added += x.asnumpy()
  683. out = np.array(added, inputs[0].asnumpy().dtype)
  684. return Tensor(out)
  685. class AccumulateNV2(PrimitiveWithInfer):
  686. """
  687. Computes accumulation of all input tensors element-wise.
  688. AccumulateNV2 is similar to AddN, but there is a significant difference
  689. among them: AccumulateNV2 will not wait for all of its inputs to be ready
  690. before summing. That is to say, AccumulateNV2 is able to save
  691. memory when inputs are ready at different time since the minimum temporary
  692. storage is proportional to the output size rather than the input size.
  693. Inputs:
  694. - **input_x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
  695. is made up of multiple tensors whose dtype is number to be added together.
  696. Outputs:
  697. Tensor, has the same shape and dtype as each entry of the `input_x`.
  698. Examples:
  699. >>> class NetAccumulateNV2(nn.Cell):
  700. >>> def __init__(self):
  701. >>> super(NetAccumulateNV2, self).__init__()
  702. >>> self.accumulateNV2 = P.AccumulateNV2()
  703. >>>
  704. >>> def construct(self, *z):
  705. >>> return self.accumulateNV2(z)
  706. >>>
  707. >>> net = NetAccumulateNV2()
  708. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  709. >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.float32)
  710. >>> net(input_x, input_y, input_x, input_y)
  711. Tensor([10., 14., 18.], shape=(3,), dtype=mindspore.float32)
  712. """
  713. @prim_attr_register
  714. def __init__(self):
  715. self.__setattr_flag__ = True
  716. self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
  717. def infer_shape(self, inputs):
  718. cls_name = self.name
  719. validator.check_integer("inputs", len(inputs), 1, Rel.GE, cls_name)
  720. self.add_prim_attr('n', len(inputs))
  721. shp0 = inputs[0]
  722. for i, shp in enumerate(inputs):
  723. validator.check(f"shape of inputs[{i}]", shp, 'shape of inputs[0]', shp0, Rel.EQ, cls_name)
  724. return shp0
  725. def infer_dtype(self, inputs):
  726. cls_name = self.name
  727. validator.check_value_type("inputs", inputs, [tuple, list], cls_name)
  728. validator.check_integer("inputs", len(inputs), 1, Rel.GE, cls_name)
  729. args = {}
  730. for i, dtype in enumerate(inputs):
  731. args[f"inputs[{i}]"] = dtype
  732. validator.check_tensor_type_same(args, mstype.number_type + (mstype.bool_,), cls_name)
  733. return inputs[0]
  734. class Neg(PrimitiveWithInfer):
  735. """
  736. Returns a tensor with negative values of the input tensor element-wise.
  737. Inputs:
  738. - **input_x** (Tensor) - The input tensor whose dtype is number.
  739. Outputs:
  740. Tensor, has the same shape and dtype as input.
  741. Examples:
  742. >>> neg = P.Neg()
  743. >>> input_x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
  744. >>> result = neg(input_x)
  745. [-1. -2. 1. -2. 0. 3.5]
  746. """
  747. @prim_attr_register
  748. def __init__(self):
  749. """init Neg"""
  750. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  751. def infer_shape(self, input_x):
  752. return input_x
  753. def infer_dtype(self, input_x):
  754. validator.check_tensor_type_same({"input_x": input_x}, mstype.number_type, self.name)
  755. return input_x
  756. def infer_value(self, input_x):
  757. if input_x is not None:
  758. input_x = input_x.asnumpy()
  759. out = np.array(-input_x, input_x.dtype)
  760. return Tensor(out)
  761. return None
  762. class InplaceAdd(PrimitiveWithInfer):
  763. """
  764. Adds v into specified rows of x. Computes y = x; y[i,] += v.
  765. Args:
  766. indices (Union[int, tuple]): Indices into the left-most dimension of x, and determines which rows of x
  767. to add with v. It is an integer or a tuple, whose value is in [0, the first dimension size of x).
  768. Inputs:
  769. - **input_x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32.
  770. - **input_v** (Tensor) - The second input is a tensor that has the same dimension sizes as x except
  771. the first dimension, which must be the same as indices's size. It has the same data type with `input_x`.
  772. Outputs:
  773. Tensor, has the same shape and dtype as input.
  774. Examples:
  775. >>> indices = (0, 1)
  776. >>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
  777. >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
  778. >>> inplaceAdd = P.InplaceAdd(indices)
  779. >>> inplaceAdd(input_x, input_v)
  780. [[1.5 3.]
  781. [4. 5.5]
  782. [5. 6.]]
  783. """
  784. @prim_attr_register
  785. def __init__(self, indices):
  786. """init InplaceAdd"""
  787. self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
  788. self.indices = indices
  789. validator.check_value_type('indices', indices, [tuple, int], self.name)
  790. if isinstance(indices, int):
  791. self.indices = (indices,)
  792. for item in self.indices:
  793. validator.check_value_type("item of indices", item, [int], self.name)
  794. def infer_dtype(self, x_dtype, v_dtype):
  795. args = {'x': x_dtype, 'v': v_dtype}
  796. valid_type = [mstype.int32, mstype.float16, mstype.float32]
  797. validator.check_tensor_type_same(args, valid_type, self.name)
  798. return x_dtype
  799. def infer_shape(self, x_shape, v_shape):
  800. validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
  801. validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
  802. Rel.EQ, self.name)
  803. for i in self.indices:
  804. if i < 0 or i >= x_shape[0]:
  805. raise ValueError(f'The value of indices must be in [0, {x_shape[0]}), but got {i}.')
  806. x_rank = len(x_shape)
  807. for idx in range(x_rank)[1:]:
  808. validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
  809. return x_shape
  810. class InplaceSub(PrimitiveWithInfer):
  811. """
  812. Subtracts v into specified rows of x. Computes y = x; y[i, :] -= v; return y.
  813. Args:
  814. indices (Union[int, tuple]): Indices into the left-most dimension of x, and determines which rows of x
  815. to subtract with v. It is a int or tuple, whose value is in [0, the first dimension size of x).
  816. Inputs:
  817. - **input_x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32.
  818. - **input_v** (Tensor) - The second input is a tensor who has the same dimension sizes as x except
  819. the first dimension, which must be the same as indices's size. It has the same data type with `input_x`.
  820. Outputs:
  821. Tensor, has the same shape and dtype as input.
  822. Examples:
  823. >>> indices = (0, 1)
  824. >>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
  825. >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
  826. >>> inplaceSub = P.InplaceSub(indices)
  827. >>> inplaceSub(input_x, input_v)
  828. [[0.5 1.]
  829. [2. 2.5]
  830. [5. 6.]]
  831. """
  832. @prim_attr_register
  833. def __init__(self, indices):
  834. """init InplaceSub"""
  835. self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
  836. self.indices = indices
  837. validator.check_value_type('indices', indices, [tuple, int], self.name)
  838. if isinstance(indices, int):
  839. self.indices = (indices,)
  840. for item in self.indices:
  841. validator.check_value_type("item of indices", item, [int], self.name)
  842. def infer_dtype(self, x_dtype, v_dtype):
  843. args = {'x': x_dtype, 'v': v_dtype}
  844. valid_type = [mstype.int32, mstype.float16, mstype.float32]
  845. validator.check_tensor_type_same(args, valid_type, self.name)
  846. return x_dtype
  847. def infer_shape(self, x_shape, v_shape):
  848. validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
  849. validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
  850. Rel.EQ, self.name)
  851. for i in self.indices:
  852. if i < 0 or i >= x_shape[0]:
  853. raise ValueError(f'The value of indices must be in [0, {x_shape[0]}), but got {i}.')
  854. x_rank = len(x_shape)
  855. for idx in range(x_rank)[1:]:
  856. validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
  857. return x_shape
  858. class Sub(_MathBinaryOp):
  859. """
  860. Subtracts the second input tensor from the first input tensor element-wise.
  861. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  862. The inputs must be two tensors or one tensor and one scalar.
  863. When the inputs are two tensors,
  864. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  865. When the inputs are one tensor and one scalar,
  866. the scalar could only be a constant.
  867. Inputs:
  868. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  869. a bool or a tensor whose data type is number or bool.
  870. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  871. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  872. Outputs:
  873. Tensor, the shape is the same as the one after broadcasting,
  874. and the data type is the one with high precision or high digits among the two inputs.
  875. Examples:
  876. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  877. >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.int32)
  878. >>> sub = P.Sub()
  879. >>> sub(input_x, input_y)
  880. [-3, -3, -3]
  881. """
  882. def infer_value(self, x, y):
  883. if x is not None and y is not None:
  884. x = x.asnumpy()
  885. y = y.asnumpy()
  886. out = x - y
  887. out = np.array(out, x.dtype)
  888. return Tensor(out)
  889. return None
  890. class Mul(_MathBinaryOp):
  891. """
  892. Multiplies two tensors element-wise.
  893. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  894. The inputs must be two tensors or one tensor and one scalar.
  895. When the inputs are two tensors,
  896. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  897. When the inputs are one tensor and one scalar,
  898. the scalar could only be a constant.
  899. Inputs:
  900. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  901. a bool or a tensor whose data type is number or bool.
  902. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  903. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  904. Outputs:
  905. Tensor, the shape is the same as the one after broadcasting,
  906. and the data type is the one with high precision or high digits among the two inputs.
  907. Examples:
  908. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  909. >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
  910. >>> mul = P.Mul()
  911. >>> mul(input_x, input_y)
  912. [4, 10, 18]
  913. """
  914. def infer_value(self, x, y):
  915. if x is not None and y is not None:
  916. x = x.asnumpy()
  917. y = y.asnumpy()
  918. out = x * y
  919. out = np.array(out, x.dtype)
  920. return Tensor(out)
  921. return None
  922. class SquaredDifference(_MathBinaryOp):
  923. """
  924. Subtracts the second input tensor from the first input tensor element-wise and returns square of it.
  925. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  926. The inputs must be two tensors or one tensor and one scalar.
  927. When the inputs are two tensors,
  928. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  929. When the inputs are one tensor and one scalar,
  930. the scalar could only be a constant.
  931. Inputs:
  932. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  933. a bool or a tensor whose data type is float16, float32, int32 or bool.
  934. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  935. a bool when the first input is a tensor or a tensor whose data type is
  936. float16, float32, int32 or bool.
  937. Outputs:
  938. Tensor, the shape is the same as the one after broadcasting,
  939. and the data type is the one with high precision or high digits among the two inputs.
  940. Examples:
  941. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  942. >>> input_y = Tensor(np.array([2.0, 4.0, 6.0]), mindspore.float32)
  943. >>> squared_difference = P.SquaredDifference()
  944. >>> squared_difference(input_x, input_y)
  945. [1.0, 4.0, 9.0]
  946. """
  947. def infer_dtype(self, x_dtype, y_dtype):
  948. valid_type = [mstype.float16, mstype.float32, mstype.int32]
  949. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, valid_type, self.name)
  950. class Square(PrimitiveWithInfer):
  951. """
  952. Returns square of a tensor element-wise.
  953. Inputs:
  954. - **input_x** (Tensor) - The input tensor whose dtype is number.
  955. Outputs:
  956. Tensor, has the same shape and dtype as the `input_x`.
  957. Examples:
  958. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  959. >>> square = P.Square()
  960. >>> square(input_x)
  961. [1.0, 4.0, 9.0]
  962. """
  963. @prim_attr_register
  964. def __init__(self):
  965. """init Square"""
  966. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  967. def infer_shape(self, x_shape):
  968. return x_shape
  969. def infer_dtype(self, x_type):
  970. validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.name)
  971. return x_type
  972. def infer_value(self, x):
  973. if x is not None:
  974. x = x.asnumpy()
  975. out = x * x
  976. out = np.array(out, x.dtype)
  977. return Tensor(out)
  978. return None
  979. class Rsqrt(PrimitiveWithInfer):
  980. """
  981. Computes reciprocal of square root of input tensor element-wise.
  982. Inputs:
  983. - **input_x** (Tensor) - The input of Rsqrt. Each element should be a non-negative number.
  984. Outputs:
  985. Tensor, has the same type and shape as `input_x`.
  986. Examples:
  987. >>> input_tensor = Tensor([[4, 4], [9, 9]], mindspore.float32)
  988. >>> rsqrt = P.Rsqrt()
  989. >>> rsqrt(input_tensor)
  990. [[0.5, 0.5], [0.333333, 0.333333]]
  991. """
  992. @prim_attr_register
  993. def __init__(self):
  994. """init Rsqrt"""
  995. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  996. def infer_shape(self, x_shape):
  997. return x_shape
  998. def infer_dtype(self, x_type):
  999. validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.name)
  1000. return x_type
  1001. def infer_value(self, x):
  1002. if x is not None:
  1003. x = x.asnumpy()
  1004. out = 1.0 / np.sqrt(x)
  1005. out = np.array(out, x.dtype)
  1006. return Tensor(out)
  1007. return None
  1008. class Sqrt(PrimitiveWithCheck):
  1009. """
  1010. Returns square root of a tensor element-wise.
  1011. Inputs:
  1012. - **input_x** (Tensor) - The input tensor whose dtype is number.
  1013. Outputs:
  1014. Tensor, has the same shape as the `input_x`.
  1015. Examples:
  1016. >>> input_x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
  1017. >>> sqrt = P.Sqrt()
  1018. >>> sqrt(input_x)
  1019. [1.0, 2.0, 3.0]
  1020. """
  1021. @prim_attr_register
  1022. def __init__(self):
  1023. """init Sqrt"""
  1024. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1025. def check_dtype(self, x_type):
  1026. validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.name)
  1027. def infer_value(self, x):
  1028. if x is not None:
  1029. x = x.asnumpy()
  1030. out = np.sqrt(x)
  1031. out = np.array(out, x.dtype)
  1032. return Tensor(out)
  1033. return None
  1034. class Reciprocal(PrimitiveWithInfer):
  1035. """
  1036. Returns reciprocal of a tensor element-wise.
  1037. Inputs:
  1038. - **input_x** (Tensor) - The input tensor.
  1039. Outputs:
  1040. Tensor, has the same shape as the `input_x`.
  1041. Examples:
  1042. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1043. >>> reciprocal = P.Reciprocal()
  1044. >>> reciprocal(input_x)
  1045. [1.0, 0.5, 0.25]
  1046. """
  1047. @prim_attr_register
  1048. def __init__(self):
  1049. """init Reciprocal"""
  1050. if context.get_context("device_target") == "GPU":
  1051. self.target = "GPU"
  1052. else:
  1053. self.target = "OTHER"
  1054. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1055. def infer_shape(self, x):
  1056. return x
  1057. def infer_dtype(self, x):
  1058. validator.check_subclass("x", x, mstype.tensor, self.name)
  1059. return x
  1060. def infer_value(self, x):
  1061. if x is not None:
  1062. x = x.asnumpy()
  1063. out = 1.0 / x
  1064. out = np.array(out, x.dtype)
  1065. return Tensor(out)
  1066. return None
  1067. class Pow(_MathBinaryOp):
  1068. """
  1069. Computes a tensor to the power of the second input.
  1070. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1071. The inputs must be two tensors or one tensor and one scalar.
  1072. When the inputs are two tensors,
  1073. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1074. When the inputs are one tensor and one scalar,
  1075. the scalar could only be a constant.
  1076. Inputs:
  1077. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1078. a bool or a tensor whose data type is number or bool.
  1079. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1080. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1081. Outputs:
  1082. Tensor, the shape is the same as the one after broadcasting,
  1083. and the data type is the one with high precision or high digits among the two inputs.
  1084. Examples:
  1085. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1086. >>> input_y = 3.0
  1087. >>> pow = P.Pow()
  1088. >>> pow(input_x, input_y)
  1089. [1.0, 8.0, 64.0]
  1090. >>>
  1091. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1092. >>> input_y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
  1093. >>> pow = P.Pow()
  1094. >>> pow(input_x, input_y)
  1095. [1.0, 16.0, 64.0]
  1096. """
  1097. def infer_value(self, x, power):
  1098. if x is not None and power is not None:
  1099. x = x.asnumpy()
  1100. power = power.asnumpy()
  1101. out = np.power(x, power)
  1102. out = np.array(out, x.dtype)
  1103. return Tensor(out)
  1104. return None
  1105. class Exp(PrimitiveWithInfer):
  1106. """
  1107. Returns exponential of a tensor element-wise.
  1108. Inputs:
  1109. - **input_x** (Tensor) - The input tensor. The data type mast be float16 or float32.
  1110. Outputs:
  1111. Tensor, has the same shape and dtype as the `input_x`.
  1112. Examples:
  1113. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1114. >>> exp = P.Exp()
  1115. >>> exp(input_x)
  1116. [ 2.71828183, 7.3890561 , 54.59815003]
  1117. """
  1118. @prim_attr_register
  1119. def __init__(self):
  1120. """init Exp"""
  1121. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1122. def infer_shape(self, x_shape):
  1123. return x_shape
  1124. def infer_dtype(self, x_type):
  1125. validator.check_subclass("x", x_type, mstype.tensor, self.name)
  1126. return x_type
  1127. def infer_value(self, x):
  1128. if x is not None:
  1129. x = x.asnumpy()
  1130. out = np.exp(x)
  1131. out = np.array(out, x.dtype)
  1132. return Tensor(out)
  1133. return None
  1134. class Expm1(PrimitiveWithInfer):
  1135. """
  1136. Returns exponential then minus 1 of a tensor element-wise.
  1137. Inputs:
  1138. - **input_x** (Tensor) - The input tensor. With float16 or float32 data type.
  1139. Outputs:
  1140. Tensor, has the same shape as the `input_x`.
  1141. Examples:
  1142. >>> input_x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
  1143. >>> expm1 = P.Expm1()
  1144. >>> expm1(input_x)
  1145. [ 0., 1.71828183, 6.3890561 , 53.59815003]
  1146. """
  1147. @prim_attr_register
  1148. def __init__(self):
  1149. """init Exp"""
  1150. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1151. def infer_shape(self, x_shape):
  1152. return x_shape
  1153. def infer_dtype(self, x_type):
  1154. validator.check_subclass("x", x_type, mstype.tensor, self.name)
  1155. validator.check_tensor_type_same({"x": x_type}, [mstype.float16, mstype.float32], self.name)
  1156. return x_type
  1157. class HistogramFixedWidth(PrimitiveWithInfer):
  1158. """
  1159. Returns a rank 1 histogram counting the number of entries in values that fall into every bin. The bins are equal
  1160. width and determined by the arguments range and nbins.
  1161. Args:
  1162. dtype (str): An optional attribute. Must be one of the following types: "int32", "int64". Default: "int32".
  1163. nbins (int): The number of histogram bins, the type is a positive integer.
  1164. Inputs:
  1165. - **x** (Tensor) - Numeric Tensor. Must be one of the following types: int32, float32, float16.
  1166. - **range** (Tensor) - Must has the same data type as `x`, and the shape is [2].
  1167. x <= range[0] will be mapped to hist[0], x >= range[1] will be mapped to hist[-1].
  1168. Outputs:
  1169. Tensor, the type is int32.
  1170. Examples:
  1171. >>> x = Tensor([-1.0, 0.0, 1.5, 2.0, 5.0, 15], mindspore.float16)
  1172. >>> range = Tensor([0.0, 5.0], mindspore.float16)
  1173. >>> hist = P.HistogramFixedWidth(5)
  1174. >>> hist(x, range)
  1175. [2 1 1 0 2]
  1176. """
  1177. @prim_attr_register
  1178. def __init__(self, nbins, dtype='int32'):
  1179. self.nbins = validator.check_value_type("nbins", nbins, [int], self.name)
  1180. validator.check_integer("nbins", nbins, 1, Rel.GE, self.name)
  1181. valid_values = ['int32', 'int64']
  1182. self.dtype = validator.check_string("dtype", dtype, valid_values, self.name)
  1183. self.init_prim_io_names(inputs=['x', 'range'], outputs=['y'])
  1184. def infer_shape(self, x_shape, range_shape):
  1185. return (self.nbins,)
  1186. def infer_dtype(self, x_dtype, range_dtype):
  1187. validator.check_subclass("x", x_dtype, mstype.tensor, self.name)
  1188. valid_types = (mstype.float16, mstype.float32, mstype.int32)
  1189. validator.check_tensor_type_same({"x": x_dtype}, valid_types, self.name)
  1190. validator.check_tensor_type_same({"range": range_dtype}, valid_types, self.name)
  1191. y_dtype = mstype.int32
  1192. return y_dtype
  1193. class Log(PrimitiveWithInfer):
  1194. """
  1195. Returns the natural logarithm of a tensor element-wise.
  1196. Inputs:
  1197. - **input_x** (Tensor) - The input tensor.
  1198. Outputs:
  1199. Tensor, has the same shape as the `input_x`.
  1200. Examples:
  1201. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1202. >>> log = P.Log()
  1203. >>> log(input_x)
  1204. [0.0, 0.69314718, 1.38629436]
  1205. """
  1206. @prim_attr_register
  1207. def __init__(self):
  1208. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1209. def infer_shape(self, x):
  1210. return x
  1211. def infer_dtype(self, x):
  1212. validator.check_subclass("x", x, mstype.tensor, self.name)
  1213. return x
  1214. def infer_value(self, x):
  1215. if x is not None:
  1216. x = x.asnumpy()
  1217. out = np.log(x)
  1218. out = np.array(out, x.dtype)
  1219. return Tensor(out)
  1220. return None
  1221. class Log1p(PrimitiveWithInfer):
  1222. """
  1223. Returns the natural logarithm of one plus the input tensor element-wise.
  1224. Inputs:
  1225. - **input_x** (Tensor) - The input tensor. With float16 or float32 data type.
  1226. Outputs:
  1227. Tensor, has the same shape as the `input_x`.
  1228. Examples:
  1229. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1230. >>> log1p = P.Log1p()
  1231. >>> log1p(input_x)
  1232. [0.6931472, 1.0986123, 1.609438]
  1233. """
  1234. @prim_attr_register
  1235. def __init__(self):
  1236. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1237. def infer_shape(self, x):
  1238. return x
  1239. def infer_dtype(self, x):
  1240. validator.check_subclass("x", x, mstype.tensor, self.name)
  1241. validator.check_tensor_type_same({"x": x}, [mstype.float16, mstype.float32], self.name)
  1242. return x
  1243. class Erf(PrimitiveWithInfer):
  1244. r"""
  1245. Computes the Gauss error function of `input_x` element-wise.
  1246. Inputs:
  1247. - **input_x** (Tensor) - The input tensor. The data type must be float16 or float32.
  1248. Outputs:
  1249. Tensor, has the same shape and dtype as the `input_x`.
  1250. Examples:
  1251. >>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
  1252. >>> erf = P.Erf()
  1253. >>> erf(input_x)
  1254. [-0.8427168, 0., 0.8427168, 0.99530876, 0.99997765]
  1255. """
  1256. @prim_attr_register
  1257. def __init__(self):
  1258. """init Erf"""
  1259. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1260. def infer_shape(self, x_shape):
  1261. return x_shape
  1262. def infer_dtype(self, x_type):
  1263. validator.check_tensor_type_same({"x": x_type}, [mstype.float16, mstype.float32], self.name)
  1264. return x_type
  1265. class Erfc(PrimitiveWithInfer):
  1266. r"""
  1267. Computes the complementary error function of `input_x` element-wise.
  1268. Inputs:
  1269. - **input_x** (Tensor) - The input tensor. The data type must be float16 or float32.
  1270. Outputs:
  1271. Tensor, has the same shape and dtype as the `input_x`.
  1272. Examples:
  1273. >>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
  1274. >>> erfc = P.Erfc()
  1275. >>> erfc(input_x)
  1276. [1.8427168, 0., 0.1572832, 0.00469124, 0.00002235]
  1277. """
  1278. @prim_attr_register
  1279. def __init__(self):
  1280. """init Erfc"""
  1281. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1282. def infer_shape(self, x_shape):
  1283. return x_shape
  1284. def infer_dtype(self, x_type):
  1285. validator.check_tensor_type_same({"x": x_type}, [mstype.float16, mstype.float32], self.name)
  1286. return x_type
  1287. class Minimum(_MathBinaryOp):
  1288. """
  1289. Computes the element-wise minimum of input tensors.
  1290. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1291. The inputs must be two tensors or one tensor and one scalar.
  1292. When the inputs are two tensors,
  1293. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1294. When the inputs are one tensor and one scalar,
  1295. the scalar could only be a constant.
  1296. Inputs:
  1297. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1298. a bool or a tensor whose data type is number or bool.
  1299. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1300. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1301. Outputs:
  1302. Tensor, the shape is the same as the one after broadcasting,
  1303. and the data type is the one with high precision or high digits among the two inputs.
  1304. Examples:
  1305. >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
  1306. >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
  1307. >>> minimum = P.Minimum()
  1308. >>> minimum(input_x, input_y)
  1309. [1.0, 2.0, 3.0]
  1310. """
  1311. def infer_value(self, x, y):
  1312. if x is not None and y is not None:
  1313. x = x.asnumpy()
  1314. y = y.asnumpy()
  1315. out = np.minimum(x, y)
  1316. out = np.array(out, x.dtype)
  1317. return Tensor(out)
  1318. return None
  1319. class Maximum(_MathBinaryOp):
  1320. """
  1321. Computes the element-wise maximum of input tensors.
  1322. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1323. The inputs must be two tensors or one tensor and one scalar.
  1324. When the inputs are two tensors,
  1325. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1326. When the inputs are one tensor and one scalar,
  1327. the scalar could only be a constant.
  1328. Inputs:
  1329. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1330. a bool or a tensor whose data type is number or bool.
  1331. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1332. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1333. Outputs:
  1334. Tensor, the shape is the same as the one after broadcasting,
  1335. and the data type is the one with high precision or high digits among the two inputs.
  1336. Examples:
  1337. >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
  1338. >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
  1339. >>> maximum = P.Maximum()
  1340. >>> maximum(input_x, input_y)
  1341. [4.0, 5.0, 6.0]
  1342. """
  1343. def infer_value(self, x, y):
  1344. if x is not None and y is not None:
  1345. x = x.asnumpy()
  1346. y = y.asnumpy()
  1347. out = np.maximum(x, y)
  1348. out = np.array(out, x.dtype)
  1349. return Tensor(out)
  1350. return None
  1351. class RealDiv(_MathBinaryOp):
  1352. """
  1353. Divide the first input tensor by the second input tensor in floating-point type element-wise.
  1354. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1355. The inputs must be two tensors or one tensor and one scalar.
  1356. When the inputs are two tensors,
  1357. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1358. When the inputs are one tensor and one scalar,
  1359. the scalar could only be a constant.
  1360. Inputs:
  1361. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1362. a bool or a tensor whose data type is number or bool.
  1363. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1364. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1365. Outputs:
  1366. Tensor, the shape is the same as the one after broadcasting,
  1367. and the data type is the one with high precision or high digits among the two inputs.
  1368. Examples:
  1369. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  1370. >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
  1371. >>> realdiv = P.RealDiv()
  1372. >>> realdiv(input_x, input_y)
  1373. [0.25, 0.4, 0.5]
  1374. """
  1375. def infer_value(self, x, y):
  1376. if x is not None and y is not None:
  1377. x = x.asnumpy()
  1378. y = y.asnumpy()
  1379. out = x / y
  1380. out = np.array(out, x.dtype)
  1381. return Tensor(out)
  1382. return None
  1383. class Div(_MathBinaryOp):
  1384. """
  1385. Computes the quotient of dividing the first input tensor by the second input tensor element-wise.
  1386. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1387. The inputs must be two tensors or one tensor and one scalar.
  1388. When the inputs are two tensors,
  1389. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1390. When the inputs are one tensor and one scalar,
  1391. the scalar could only be a constant.
  1392. Inputs:
  1393. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1394. a bool or a tensor whose data type is number or bool.
  1395. - **input_y** (Union[Tensor, Number, bool]) - When the first input is a tensor, The second input
  1396. could be a number, a bool, or a tensor whose data type is number or bool. When the first input
  1397. is a number or a bool, the second input should be a tensor whose data type is number or bool.
  1398. Outputs:
  1399. Tensor, the shape is the same as the one after broadcasting,
  1400. and the data type is the one with high precision or high digits among the two inputs.
  1401. Raises:
  1402. ValueError: When `input_x` and `input_y` do not have the same dtype.
  1403. Examples:
  1404. >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
  1405. >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
  1406. >>> div = P.Div()
  1407. >>> div(input_x, input_y)
  1408. [-1.3, 2.5, 2.0]
  1409. """
  1410. def infer_value(self, x, y):
  1411. if x is not None and y is not None:
  1412. x = x.asnumpy()
  1413. y = y.asnumpy()
  1414. out = np.array(x / y, x.dtype)
  1415. return Tensor(out)
  1416. return None
  1417. class DivNoNan(_MathBinaryOp):
  1418. """
  1419. Computes a safe divide which returns 0 if the y is zero.
  1420. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1421. The inputs must be two tensors or one tensor and one scalar.
  1422. When the inputs are two tensors,
  1423. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1424. When the inputs are one tensor and one scalar,
  1425. the scalar could only be a constant.
  1426. Inputs:
  1427. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1428. a bool or a tensor whose data type is number or bool.
  1429. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1430. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1431. Outputs:
  1432. Tensor, the shape is the same as the one after broadcasting,
  1433. and the data type is the one with high precision or high digits among the two inputs.
  1434. Raises:
  1435. ValueError: When `input_x` and `input_y` do not have the same dtype.
  1436. Examples:
  1437. >>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)
  1438. >>> input_y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32)
  1439. >>> div_no_nan = P.DivNoNan()
  1440. >>> div_no_nan(input_x, input_y)
  1441. [0., 0., 0., 2.5, 2.0]
  1442. """
  1443. def infer_value(self, x, y):
  1444. if x is not None and y is not None:
  1445. x = x.asnumpy()
  1446. y = y.asnumpy()
  1447. with np.errstate(divide='ignore', invalid='ignore'):
  1448. out = np.true_divide(x, y)
  1449. out[~np.isfinite(out)] = 0
  1450. return out
  1451. return None
  1452. class FloorDiv(_MathBinaryOp):
  1453. """
  1454. Divide the first input tensor by the second input tensor element-wise and round down to the closest integer.
  1455. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1456. The inputs must be two tensors or one tensor and one scalar.
  1457. When the inputs are two tensors,
  1458. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1459. When the inputs are one tensor and one scalar,
  1460. the scalar could only be a constant.
  1461. Inputs:
  1462. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1463. a bool or a tensor whose data type is number or bool.
  1464. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1465. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1466. Outputs:
  1467. Tensor, the shape is the same as the one after broadcasting,
  1468. and the data type is the one with high precision or high digits among the two inputs.
  1469. Examples:
  1470. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  1471. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  1472. >>> floor_div = P.FloorDiv()
  1473. >>> floor_div(input_x, input_y)
  1474. [0, 1, -1]
  1475. """
  1476. class TruncateDiv(_MathBinaryOp):
  1477. """
  1478. Divide the first input tensor by the second input tensor element-wise for integer types, negative numbers will
  1479. round fractional quantities towards zero.
  1480. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1481. The inputs must be two tensors or one tensor and one scalar.
  1482. When the inputs are two tensors,
  1483. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1484. When the inputs are one tensor and one scalar,
  1485. the scalar could only be a constant.
  1486. Inputs:
  1487. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1488. a bool or a tensor whose data type is number or bool.
  1489. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1490. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1491. Outputs:
  1492. Tensor, the shape is the same as the one after broadcasting,
  1493. and the data type is the one with high precision or high digits among the two inputs.
  1494. Examples:
  1495. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  1496. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  1497. >>> truncate_div = P.TruncateDiv()
  1498. >>> truncate_div(input_x, input_y)
  1499. [0, 1, 0]
  1500. """
  1501. class TruncateMod(_MathBinaryOp):
  1502. """
  1503. Returns element-wise remainder of division.
  1504. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1505. The inputs must be two tensors or one tensor and one scalar.
  1506. When the inputs are two tensors,
  1507. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1508. When the inputs are one tensor and one scalar,
  1509. the scalar could only be a constant.
  1510. Inputs:
  1511. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1512. a bool or a tensor whose data type is number or bool.
  1513. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1514. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1515. Outputs:
  1516. Tensor, the shape is the same as the one after broadcasting,
  1517. and the data type is the one with high precision or high digits among the two inputs.
  1518. Examples:
  1519. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  1520. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  1521. >>> truncate_mod = P.TruncateMod()
  1522. >>> truncate_mod(input_x, input_y)
  1523. [2, 1, -1]
  1524. """
  1525. class Mod(_MathBinaryOp):
  1526. """
  1527. Computes the remainder of dividing the first input tensor by the second input tensor element-wise.
  1528. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1529. The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors,
  1530. both dtypes cannot be bool, and the shapes of them could be broadcast. When the inputs are one tensor
  1531. and one scalar, the scalar could only be a constant.
  1532. Inputs:
  1533. - **input_x** (Union[Tensor, Number]) - The first input is a number or a tensor whose data type is number.
  1534. - **input_y** (Union[Tensor, Number]) - When the first input is a tensor, The second input
  1535. could be a number or a tensor whose data type is number. When the first input is a number,
  1536. the second input should be a tensor whose data type is number.
  1537. Outputs:
  1538. Tensor, the shape is the same as the one after broadcasting,
  1539. and the data type is the one with high precision or high digits among the two inputs.
  1540. Raises:
  1541. ValueError: When `input_x` and `input_y` are not the same dtype.
  1542. Examples:
  1543. >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
  1544. >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
  1545. >>> mod = P.Mod()
  1546. >>> mod(input_x, input_y)
  1547. """
  1548. def infer_value(self, x, y):
  1549. if x is not None and y is not None:
  1550. x = x.asnumpy()
  1551. y = y.asnumpy()
  1552. return Tensor(np.fmod(x, y))
  1553. return None
  1554. class Floor(PrimitiveWithInfer):
  1555. """
  1556. Round a tensor down to the closest integer element-wise.
  1557. Inputs:
  1558. - **input_x** (Tensor) - The input tensor. Its element data type must be float.
  1559. Outputs:
  1560. Tensor, has the same shape as `input_x`.
  1561. Examples:
  1562. >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
  1563. >>> floor = P.Floor()
  1564. >>> floor(input_x)
  1565. [1.0, 2.0, -2.0]
  1566. """
  1567. @prim_attr_register
  1568. def __init__(self):
  1569. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1570. def infer_shape(self, x_shape):
  1571. return x_shape
  1572. def infer_dtype(self, x_dtype):
  1573. validator.check_tensor_type_same({"x": x_dtype}, mstype.float_type, self.name)
  1574. return x_dtype
  1575. class FloorMod(_MathBinaryOp):
  1576. """
  1577. Compute the remainder of division element-wise.
  1578. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1579. The inputs must be two tensors or one tensor and one scalar.
  1580. When the inputs are two tensors,
  1581. dtypes of them cannot be both bool , and the shapes of them could be broadcast.
  1582. When the inputs are one tensor and one scalar,
  1583. the scalar could only be a constant.
  1584. Inputs:
  1585. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1586. a bool or a tensor whose data type is number or bool.
  1587. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1588. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1589. Outputs:
  1590. Tensor, the shape is the same as the one after broadcasting,
  1591. and the data type is the one with high precision or high digits among the two inputs.
  1592. Examples:
  1593. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  1594. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  1595. >>> floor_mod = P.FloorMod()
  1596. >>> floor_mod(input_x, input_y)
  1597. [2, 1, 2]
  1598. """
  1599. class Ceil(PrimitiveWithInfer):
  1600. """
  1601. Round a tensor up to the closest integer element-wise.
  1602. Inputs:
  1603. - **input_x** (Tensor) - The input tensor. It's element data type must be float16 or float32.
  1604. Outputs:
  1605. Tensor, has the same shape as `input_x`.
  1606. Examples:
  1607. >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
  1608. >>> ceil_op = P.Ceil()
  1609. >>> ceil_op(input_x)
  1610. [2.0, 3.0, -1.0]
  1611. """
  1612. @prim_attr_register
  1613. def __init__(self):
  1614. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1615. def infer_shape(self, x_shape):
  1616. return x_shape
  1617. def infer_dtype(self, x_dtype):
  1618. validator.check_tensor_type_same({"x": x_dtype}, [mstype.float16, mstype.float32], self.name)
  1619. return x_dtype
  1620. class Xdivy(_MathBinaryOp):
  1621. """
  1622. Divide the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero.
  1623. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1624. The inputs must be two tensors or one tensor and one scalar.
  1625. When the inputs are two tensors,
  1626. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1627. When the inputs are one tensor and one scalar,
  1628. the scalar could only be a constant.
  1629. Inputs:
  1630. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1631. a bool or a tensor whose data type is float16, float32 or bool.
  1632. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1633. a bool when the first input is a tensor or a tensor whose data type is float16, float32 or bool.
  1634. Outputs:
  1635. Tensor, the shape is the same as the one after broadcasting,
  1636. and the data type is the one with high precision or high digits among the two inputs.
  1637. Examples:
  1638. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.float32)
  1639. >>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32)
  1640. >>> xdivy = P.Xdivy()
  1641. >>> xdivy(input_x, input_y)
  1642. [1.0, 2.0, -0.5]
  1643. """
  1644. def infer_dtype(self, x_dtype, y_dtype):
  1645. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, [mstype.float16, mstype.float32], self.name)
  1646. class Xlogy(_MathBinaryOp):
  1647. """
  1648. Computes first input tensor multiplied by the logarithm of second input tensor element-wise.
  1649. Returns zero when `x` is zero.
  1650. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1651. The inputs must be two tensors or one tensor and one scalar.
  1652. When the inputs are two tensors,
  1653. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1654. When the inputs are one tensor and one scalar,
  1655. the scalar could only be a constant.
  1656. Inputs:
  1657. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1658. a bool or a tensor whose data type is float16, float32 or bool.
  1659. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1660. a bool when the first input is a tensor or a tensor whose data type is float16, float32 or bool.
  1661. The value must be positive.
  1662. Outputs:
  1663. Tensor, the shape is the same as the one after broadcasting,
  1664. and the data type is the one with high precision or high digits among the two inputs.
  1665. Examples:
  1666. >>> input_x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
  1667. >>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32)
  1668. >>> xlogy = P.Xlogy()
  1669. >>> xlogy(input_x, input_y)
  1670. [-3.465736, 0.0, 2.7725887]
  1671. """
  1672. def infer_dtype(self, x_dtype, y_dtype):
  1673. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, [mstype.float16, mstype.float32], self.name)
  1674. class Acosh(PrimitiveWithInfer):
  1675. """
  1676. Compute inverse hyperbolic cosine of the input element-wise.
  1677. Inputs:
  1678. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1679. Outputs:
  1680. Tensor, has the same shape as `input_x`.
  1681. Examples:
  1682. >>> acosh = P.Acosh()
  1683. >>> input_x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
  1684. >>> output = acosh(input_x)
  1685. """
  1686. @prim_attr_register
  1687. def __init__(self):
  1688. """init Acosh"""
  1689. def infer_shape(self, x_shape):
  1690. return x_shape
  1691. def infer_dtype(self, x_dtype):
  1692. validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.name)
  1693. return x_dtype
  1694. class Cosh(PrimitiveWithInfer):
  1695. """
  1696. Computes hyperbolic cosine of input element-wise.
  1697. Inputs:
  1698. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1699. Outputs:
  1700. Tensor, has the same shape as `input_x`.
  1701. Examples:
  1702. >>> cosh = P.Cosh()
  1703. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  1704. >>> output = cosh(input_x)
  1705. [1.0289385 1.364684 1.048436 1.4228927]
  1706. """
  1707. @prim_attr_register
  1708. def __init__(self):
  1709. """init Cosh"""
  1710. def infer_shape(self, x_shape):
  1711. return x_shape
  1712. def infer_dtype(self, x_dtype):
  1713. validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.name)
  1714. return x_dtype
  1715. class Asinh(PrimitiveWithInfer):
  1716. """
  1717. Compute inverse hyperbolic sine of the input element-wise.
  1718. Inputs:
  1719. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1720. Outputs:
  1721. Tensor, has the same shape as `input_x`.
  1722. Examples:
  1723. >>> asinh = P.Asinh()
  1724. >>> input_x = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
  1725. >>> output = asinh(input_x)
  1726. [-2.3212, 1.1976, 1.8184, 5.2983]
  1727. """
  1728. @prim_attr_register
  1729. def __init__(self):
  1730. """init Asinh"""
  1731. def infer_shape(self, x_shape):
  1732. return x_shape
  1733. def infer_dtype(self, x_dtype):
  1734. validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.name)
  1735. return x_dtype
  1736. class Sinh(PrimitiveWithInfer):
  1737. """
  1738. Computes hyperbolic sine of input element-wise.
  1739. Inputs:
  1740. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1741. Outputs:
  1742. Tensor, has the same shape as `input_x`.
  1743. Examples:
  1744. >>> sinh = P.Sinh()
  1745. >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
  1746. >>> output = sinh(input_x)
  1747. [0.6604918 0.28367308 0.44337422 0.6604918]
  1748. """
  1749. @prim_attr_register
  1750. def __init__(self):
  1751. """init Sinh"""
  1752. def infer_shape(self, x_shape):
  1753. return x_shape
  1754. def infer_dtype(self, x_dtype):
  1755. validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.name)
  1756. return x_dtype
  1757. class _LogicBinaryOp(_BinaryOp):
  1758. """
  1759. Define logic binary operators.
  1760. """
  1761. @staticmethod
  1762. def do_infer_dtype(x_dtype, y_dtype, valid_type=mstype.number_type, prim_name=None):
  1763. args_dtype = {"x": x_dtype, "y": y_dtype}
  1764. validator.check_tensor_type_same(args_dtype, valid_type, prim_name)
  1765. return mstype.tensor_type(mstype.bool_)
  1766. def infer_dtype(self, x_dtype, y_dtype):
  1767. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.name)
  1768. class Equal(_LogicBinaryOp):
  1769. """
  1770. Computes the equivalence between two tensors element-wise.
  1771. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1772. The inputs must be two tensors or one tensor and one scalar.
  1773. When the inputs are two tensors, the shapes of them could be broadcast.
  1774. When the inputs are one tensor and one scalar, the scalar could only be a constant.
  1775. Inputs:
  1776. - **input_x** (Union[Tensor, Number]) - The first input is a number or
  1777. a tensor whose data type is number.
  1778. - **input_y** (Union[Tensor, Number]) - The second input is a number
  1779. when the first input is a tensor or a tensor whose data type is number.
  1780. Outputs:
  1781. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  1782. Examples:
  1783. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  1784. >>> equal = P.Equal()
  1785. >>> equal(input_x, 2.0)
  1786. [False, True, False]
  1787. >>>
  1788. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1789. >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  1790. >>> equal = P.Equal()
  1791. >>> equal(input_x, input_y)
  1792. [True, True, False]
  1793. """
  1794. def infer_dtype(self, x_dtype, y_dtype):
  1795. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name)
  1796. class ApproximateEqual(_LogicBinaryOp):
  1797. """
  1798. Returns true if abs(x1-x2) is smaller than tolerance element-wise, otherwise false.
  1799. Inputs of `x1` and `x2` comply with the implicit type conversion rules to make the data types consistent.
  1800. If they have different data types, lower priority data type will be converted to
  1801. relatively highest priority data type.
  1802. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  1803. Args:
  1804. tolerance (float): The maximum deviation that two elements can be considered equal. Default: 1e-05.
  1805. Inputs:
  1806. - **x1** (Tensor) - A tensor. Must be one of the following types: float32, float16.
  1807. - **x2** (Tensor) - A tensor of the same type and shape as 'x1'.
  1808. Outputs:
  1809. Tensor, the shape is the same as the shape of 'x1', and the data type is bool.
  1810. Examples:
  1811. >>> x1 = Tensor(np.array([1, 2, 3]), mindspore.float32)
  1812. >>> x2 = Tensor(np.array([2, 4, 6]), mindspore.float32)
  1813. >>> approximate_equal = P.ApproximateEqual(2.)
  1814. >>> result = approximate_equal(x1, x2)
  1815. [True True False]
  1816. """
  1817. @prim_attr_register
  1818. def __init__(self, tolerance=1e-05):
  1819. """Init ApproximateEqual"""
  1820. validator.check_value_type("tolerance", tolerance, [float], self.name)
  1821. def infer_shape(self, x_shape, y_shape):
  1822. validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
  1823. return x_shape
  1824. def infer_dtype(self, x_dtype, y_dtype):
  1825. args_dtype = {"x": x_dtype, "y": y_dtype}
  1826. valid_type = [mstype.float32, mstype.float16]
  1827. validator.check_tensor_type_same(args_dtype, valid_type, prim_name=self.name)
  1828. return mstype.tensor_type(mstype.bool_)
  1829. class EqualCount(PrimitiveWithInfer):
  1830. """
  1831. Computes the number of the same elements of two tensors.
  1832. The two input tensors should have the same data type and shape.
  1833. Inputs:
  1834. - **input_x** (Tensor) - The first input tensor.
  1835. - **input_y** (Tensor) - The second input tensor.
  1836. Outputs:
  1837. Tensor, with the type same as input tensor and size as (1,).
  1838. Examples:
  1839. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1840. >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  1841. >>> equal_count = P.EqualCount()
  1842. >>> equal_count(input_x, input_y)
  1843. [2]
  1844. """
  1845. @prim_attr_register
  1846. def __init__(self):
  1847. """init EqualCount"""
  1848. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  1849. def infer_shape(self, x_shape, y_shape):
  1850. validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
  1851. output_shape = (1,)
  1852. return output_shape
  1853. def infer_dtype(self, x_dtype, y_dtype):
  1854. args = {'x': x_dtype, 'y': y_dtype}
  1855. validator.check_tensor_type_same(args, mstype.number_type + (mstype.bool_,), self.name)
  1856. return x_dtype
  1857. class NotEqual(_LogicBinaryOp):
  1858. """
  1859. Computes the non-equivalence of two tensors element-wise.
  1860. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1861. The inputs must be two tensors or one tensor and one scalar.
  1862. When the inputs are two tensors, the shapes of them could be broadcast.
  1863. When the inputs are one tensor and one scalar, the scalar could only be a constant.
  1864. Inputs:
  1865. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1866. a bool or a tensor whose data type is number or bool.
  1867. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1868. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1869. Outputs:
  1870. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  1871. Examples:
  1872. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  1873. >>> not_equal = P.NotEqual()
  1874. >>> not_equal(input_x, 2.0)
  1875. [True, False, True]
  1876. >>>
  1877. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1878. >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  1879. >>> not_equal = P.NotEqual()
  1880. >>> not_equal(input_x, input_y)
  1881. [False, False, True]
  1882. """
  1883. def infer_dtype(self, x_dtype, y_dtype):
  1884. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name)
  1885. class Greater(_LogicBinaryOp):
  1886. """
  1887. Computes the boolean value of :math:`x > y` element-wise.
  1888. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1889. The inputs must be two tensors or one tensor and one scalar.
  1890. When the inputs are two tensors,
  1891. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1892. When the inputs are one tensor and one scalar,
  1893. the scalar could only be a constant.
  1894. Inputs:
  1895. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1896. a bool or a tensor whose data type is number or bool.
  1897. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1898. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1899. Outputs:
  1900. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  1901. Examples:
  1902. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1903. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  1904. >>> greater = P.Greater()
  1905. >>> greater(input_x, input_y)
  1906. [False, True, False]
  1907. """
  1908. def infer_value(self, x, y):
  1909. if x is not None and y is not None:
  1910. x = x.asnumpy()
  1911. y = y.asnumpy()
  1912. out = np.array(np.greater(x, y))
  1913. return Tensor(out)
  1914. return None
  1915. class GreaterEqual(_LogicBinaryOp):
  1916. """
  1917. Computes the boolean value of :math:`x >= y` element-wise.
  1918. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1919. The inputs must be two tensors or one tensor and one scalar.
  1920. When the inputs are two tensors,
  1921. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1922. When the inputs are one tensor and one scalar,
  1923. the scalar could only be a constant.
  1924. Inputs:
  1925. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1926. a bool or a tensor whose data type is number or bool.
  1927. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1928. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1929. Outputs:
  1930. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  1931. Examples:
  1932. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1933. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  1934. >>> greater_equal = P.GreaterEqual()
  1935. >>> greater_equal(input_x, input_y)
  1936. [True, True, False]
  1937. """
  1938. def infer_value(self, x, y):
  1939. if x is not None and y is not None:
  1940. x = x.asnumpy()
  1941. y = y.asnumpy()
  1942. out = np.array(np.greater_equal(x, y))
  1943. return Tensor(out)
  1944. return None
  1945. class Less(_LogicBinaryOp):
  1946. """
  1947. Computes the boolean value of :math:`x < y` element-wise.
  1948. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1949. The inputs must be two tensors or one tensor and one scalar.
  1950. When the inputs are two tensors,
  1951. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1952. When the inputs are one tensor and one scalar,
  1953. the scalar could only be a constant.
  1954. Inputs:
  1955. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1956. a bool or a tensor whose data type is number or bool.
  1957. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1958. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1959. Outputs:
  1960. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  1961. Examples:
  1962. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1963. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  1964. >>> less = P.Less()
  1965. >>> less(input_x, input_y)
  1966. [False, False, True]
  1967. """
  1968. def infer_value(self, x, y):
  1969. if x is not None and y is not None:
  1970. x = x.asnumpy()
  1971. y = y.asnumpy()
  1972. out = np.array(np.less(x, y))
  1973. return Tensor(out)
  1974. return None
  1975. class LessEqual(_LogicBinaryOp):
  1976. """
  1977. Computes the boolean value of :math:`x <= y` element-wise.
  1978. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1979. The inputs must be two tensors or one tensor and one scalar.
  1980. When the inputs are two tensors,
  1981. dtypes of them cannot be both bool , and the shapes of them could be broadcast.
  1982. When the inputs are one tensor and one scalar,
  1983. the scalar could only be a constant.
  1984. Inputs:
  1985. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1986. a bool or a tensor whose data type is number or bool.
  1987. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1988. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1989. Outputs:
  1990. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  1991. Examples:
  1992. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1993. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  1994. >>> less_equal = P.LessEqual()
  1995. >>> less_equal(input_x, input_y)
  1996. [True, False, True]
  1997. """
  1998. def infer_value(self, x, y):
  1999. if x is not None and y is not None:
  2000. x = x.asnumpy()
  2001. y = y.asnumpy()
  2002. out = np.array(np.less_equal(x, y))
  2003. return Tensor(out)
  2004. return None
  2005. class LogicalNot(PrimitiveWithInfer):
  2006. """
  2007. Computes the "logical NOT" of a tensor element-wise.
  2008. Inputs:
  2009. - **input_x** (Tensor) - The input tensor whose dtype is bool.
  2010. Outputs:
  2011. Tensor, the shape is the same as the `input_x`, and the dtype is bool.
  2012. Examples:
  2013. >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
  2014. >>> logical_not = P.LogicalNot()
  2015. >>> logical_not(input_x)
  2016. [False, True, False]
  2017. """
  2018. @prim_attr_register
  2019. def __init__(self):
  2020. """init LogicalNot"""
  2021. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2022. def infer_shape(self, x_shape):
  2023. return x_shape
  2024. def infer_dtype(self, x_dtype):
  2025. validator.check_tensor_type_same({"x": x_dtype}, [mstype.bool_], self.name)
  2026. return mstype.tensor_type(mstype.bool_)
  2027. class LogicalAnd(_LogicBinaryOp):
  2028. """
  2029. Computes the "logical AND" of two tensors element-wise.
  2030. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2031. The inputs must be two tensors or one tensor and one bool.
  2032. When the inputs are two tensors, the shapes of them could be broadcast,
  2033. and the data types of them should be bool.
  2034. When the inputs are one tensor and one bool, the bool object could only be a constant,
  2035. and the data type of the tensor should be bool.
  2036. Inputs:
  2037. - **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool.
  2038. - **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
  2039. a tensor whose data type is bool.
  2040. Outputs:
  2041. Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
  2042. Examples:
  2043. >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
  2044. >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_)
  2045. >>> logical_and = P.LogicalAnd()
  2046. >>> logical_and(input_x, input_y)
  2047. [True, False, False]
  2048. """
  2049. def infer_dtype(self, x_dtype, y_dtype):
  2050. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name)
  2051. class LogicalOr(_LogicBinaryOp):
  2052. """
  2053. Computes the "logical OR" of two tensors element-wise.
  2054. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2055. The inputs must be two tensors or one tensor and one bool.
  2056. When the inputs are two tensors, the shapes of them could be broadcast,
  2057. and the data types of them should be bool.
  2058. When the inputs are one tensor and one bool, the bool object could only be a constant,
  2059. and the data type of the tensor should be bool.
  2060. Inputs:
  2061. - **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool.
  2062. - **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
  2063. a tensor whose data type is bool.
  2064. Outputs:
  2065. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2066. Examples:
  2067. >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
  2068. >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_)
  2069. >>> logical_or = P.LogicalOr()
  2070. >>> logical_or(input_x, input_y)
  2071. [True, True, True]
  2072. """
  2073. def infer_dtype(self, x_dtype, y_dtype):
  2074. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name)
  2075. class IsNan(PrimitiveWithInfer):
  2076. """
  2077. Judge which elements are nan for each position.
  2078. Inputs:
  2079. - **input_x** (Tensor) - The input tensor.
  2080. Outputs:
  2081. Tensor, has the same shape of input, and the dtype is bool.
  2082. Examples:
  2083. >>> is_nan = P.IsNan()
  2084. >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  2085. >>> result = is_nan(input_x)
  2086. """
  2087. @prim_attr_register
  2088. def __init__(self):
  2089. """init IsNan"""
  2090. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2091. def infer_shape(self, x_shape):
  2092. return x_shape
  2093. def infer_dtype(self, x_dtype):
  2094. return mstype.bool_
  2095. class IsInf(PrimitiveWithInfer):
  2096. """
  2097. Judging which elements are inf or -inf for each position
  2098. Inputs:
  2099. - **input_x** (Tensor) - The input tensor.
  2100. Outputs:
  2101. Tensor, has the same shape of input, and the dtype is bool.
  2102. Examples:
  2103. >>> is_inf = P.IsInf()
  2104. >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  2105. >>> result = is_inf(input_x)
  2106. """
  2107. @prim_attr_register
  2108. def __init__(self):
  2109. """init IsInf"""
  2110. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2111. def infer_shape(self, x_shape):
  2112. return x_shape
  2113. def infer_dtype(self, x_dtype):
  2114. return mstype.bool_
  2115. class IsFinite(PrimitiveWithInfer):
  2116. """
  2117. Judge which elements are finite for each position.
  2118. Inputs:
  2119. - **input_x** (Tensor) - The input tensor.
  2120. Outputs:
  2121. Tensor, has the same shape of input, and the dtype is bool.
  2122. Examples:
  2123. >>> is_finite = P.IsFinite()
  2124. >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  2125. >>> result = is_finite(input_x)
  2126. [False True False]
  2127. """
  2128. @prim_attr_register
  2129. def __init__(self):
  2130. """init IsFinite"""
  2131. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2132. def infer_shape(self, x_shape):
  2133. return x_shape
  2134. def infer_dtype(self, x_dtype):
  2135. validator.check_subclass("x", x_dtype, mstype.tensor, self.name)
  2136. validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type + (mstype.bool_,), self.name)
  2137. return mstype.bool_
  2138. class FloatStatus(PrimitiveWithInfer):
  2139. """
  2140. Determine if the elements contain Not a Number(NaN), infinite or negative infinite. 0 for normal, 1 for overflow.
  2141. Inputs:
  2142. - **input_x** (Tensor) - The input tensor. The data type must be float16 or float32.
  2143. Outputs:
  2144. Tensor, has the shape of `(1,)`, and has the same dtype of input `mindspore.dtype.float32` or
  2145. `mindspore.dtype.float16`.
  2146. Examples:
  2147. >>> float_status = P.FloatStatus()
  2148. >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  2149. >>> result = float_status(input_x)
  2150. """
  2151. @prim_attr_register
  2152. def __init__(self):
  2153. """init FloatStatus"""
  2154. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2155. def infer_shape(self, x_shape):
  2156. return [1]
  2157. def infer_dtype(self, x_dtype):
  2158. validator.check_tensor_type_same({'x': x_dtype}, [mstype.float32, mstype.float16], self.name)
  2159. return x_dtype
  2160. class NPUAllocFloatStatus(PrimitiveWithInfer):
  2161. """
  2162. Allocates a flag to store the overflow status.
  2163. The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
  2164. Note:
  2165. Examples: see `NPUGetFloatStatus`.
  2166. Outputs:
  2167. Tensor, has the shape of `(8,)`.
  2168. Examples:
  2169. >>> alloc_status = P.NPUAllocFloatStatus()
  2170. >>> init = alloc_status()
  2171. Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32)
  2172. """
  2173. @prim_attr_register
  2174. def __init__(self):
  2175. """init NPUAllocFloatStatus"""
  2176. self.add_prim_attr("_side_effect_flag", True)
  2177. def infer_shape(self):
  2178. return [8]
  2179. def infer_dtype(self):
  2180. return mstype.float32
  2181. class NPUGetFloatStatus(PrimitiveWithInfer):
  2182. """
  2183. Updates the flag which is the output tensor of `NPUAllocFloatStatus` with latest overflow status.
  2184. The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
  2185. If the sum of the flag equals 0, there is no overflow happened. If the sum of the flag is bigger than 0, there
  2186. is overflow happened.
  2187. Inputs:
  2188. - **input_x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
  2189. The data type must be float16 or float32.
  2190. Outputs:
  2191. Tensor, has the same shape as `input_x`. All the elements in the tensor will be zero.
  2192. Examples:
  2193. >>> alloc_status = P.NPUAllocFloatStatus()
  2194. >>> get_status = P.NPUGetFloatStatus()
  2195. >>> init = alloc_status()
  2196. >>> flag = get_status(init)
  2197. Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32)
  2198. """
  2199. @prim_attr_register
  2200. def __init__(self):
  2201. """init NPUGetFloatStatus"""
  2202. self.add_prim_attr("_side_effect_flag", True)
  2203. def infer_shape(self, x_shape):
  2204. cls_name = self.name
  2205. validator.check_integer("len(x_shape)", len(x_shape), 1, Rel.EQ, cls_name)
  2206. validator.check_integer("x_shape[0]", x_shape[0], 8, Rel.EQ, cls_name)
  2207. return [8]
  2208. def infer_dtype(self, x_dtype):
  2209. validator.check_tensor_type_same({'x': x_dtype}, [mstype.float16, mstype.float32], self.name)
  2210. return mstype.float32
  2211. class NPUClearFloatStatus(PrimitiveWithInfer):
  2212. """
  2213. Clear the flag which stores the overflow status.
  2214. Note:
  2215. The flag is in the register on the `Ascend` device. It will be reset and can not be reused again after the
  2216. `NPUClearFloatStatus` is called.
  2217. Examples: see `NPUGetFloatStatus`.
  2218. Inputs:
  2219. - **input_x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
  2220. The data type must be float16 or float32.
  2221. Outputs:
  2222. Tensor, has the same shape as `input_x`. All the elements in the tensor will be zero.
  2223. Examples:
  2224. >>> alloc_status = P.NPUAllocFloatStatus()
  2225. >>> get_status = P.NPUGetFloatStatus()
  2226. >>> clear_status = P.NPUClearFloatStatus()
  2227. >>> init = alloc_status()
  2228. >>> flag = get_status(init)
  2229. >>> clear = clear_status(init)
  2230. Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32)
  2231. """
  2232. @prim_attr_register
  2233. def __init__(self):
  2234. """init NPUClearFloatStatus"""
  2235. self.add_prim_attr("_side_effect_flag", True)
  2236. def infer_shape(self, x_shape):
  2237. cls_name = self.name
  2238. validator.check_integer("len(x_shape)", len(x_shape), 1, Rel.EQ, cls_name)
  2239. validator.check_integer("x_shape[0]", x_shape[0], 8, Rel.EQ, cls_name)
  2240. return [8]
  2241. def infer_dtype(self, x_dtype):
  2242. validator.check_tensor_type_same({'x': x_dtype}, [mstype.float16, mstype.float32], self.name)
  2243. return mstype.float32
  2244. class Cos(PrimitiveWithInfer):
  2245. """
  2246. Computes cosine of input element-wise.
  2247. Inputs:
  2248. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2249. Outputs:
  2250. Tensor, has the same shape as `input_x`.
  2251. Examples:
  2252. >>> cos = P.Cos()
  2253. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  2254. >>> output = cos(input_x)
  2255. """
  2256. @prim_attr_register
  2257. def __init__(self):
  2258. """init Cos"""
  2259. def infer_shape(self, x_shape):
  2260. return x_shape
  2261. def infer_dtype(self, x_dtype):
  2262. validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.name)
  2263. return x_dtype
  2264. class ACos(PrimitiveWithInfer):
  2265. """
  2266. Computes arccosine of input element-wise.
  2267. Inputs:
  2268. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2269. Outputs:
  2270. Tensor, has the same shape as `input_x`.
  2271. Examples:
  2272. >>> acos = P.ACos()
  2273. >>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
  2274. >>> output = acos(input_x)
  2275. """
  2276. @prim_attr_register
  2277. def __init__(self):
  2278. """init ACos"""
  2279. def infer_shape(self, x_shape):
  2280. return x_shape
  2281. def infer_dtype(self, x_dtype):
  2282. validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.name)
  2283. return x_dtype
  2284. class Sin(PrimitiveWithInfer):
  2285. """
  2286. Computes sine of input element-wise.
  2287. Inputs:
  2288. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2289. Outputs:
  2290. Tensor, has the same shape as `input_x`.
  2291. Examples:
  2292. >>> sin = P.Sin()
  2293. >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
  2294. >>> output = sin(input_x)
  2295. """
  2296. @prim_attr_register
  2297. def __init__(self):
  2298. """Init Sin."""
  2299. def infer_shape(self, x_shape):
  2300. return x_shape
  2301. def infer_dtype(self, x_dtype):
  2302. validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.name)
  2303. return x_dtype
  2304. class Asin(PrimitiveWithInfer):
  2305. """
  2306. Computes arcsine of input element-wise.
  2307. Inputs:
  2308. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2309. Outputs:
  2310. Tensor, has the same shape as `input_x`.
  2311. Examples:
  2312. >>> asin = P.Asin()
  2313. >>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
  2314. >>> output = asin(input_x)
  2315. [0.8331, 0.0400, 0.3047, 0.5944]
  2316. """
  2317. @prim_attr_register
  2318. def __init__(self):
  2319. """init Asin"""
  2320. def infer_shape(self, x_shape):
  2321. return x_shape
  2322. def infer_dtype(self, x_dtype):
  2323. validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.name)
  2324. return x_dtype
  2325. class NMSWithMask(PrimitiveWithInfer):
  2326. """
  2327. Select some bounding boxes in descending order of score.
  2328. Args:
  2329. iou_threshold (float): Specifies the threshold of overlap boxes with respect to
  2330. IOU. Default: 0.5.
  2331. Raises:
  2332. ValueError: If the iou_threshold is not a float number, or if the first dimension
  2333. of input Tensor is less than or equal to 0, or if the data type of the input
  2334. Tensor is not float16 or float32.
  2335. Inputs:
  2336. - **bboxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. Input bounding boxes.
  2337. `N` is the number of input bounding boxes. Every bounding box
  2338. contains 5 values, the first 4 values are the coordinates of bounding
  2339. box, and the last value is the score of this bounding box.
  2340. The data type must be float16 or float32.
  2341. Outputs:
  2342. tuple[Tensor], tuple of three tensors, they are selected_boxes, selected_idx and selected_mask.
  2343. - **selected_boxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. Bounding boxes
  2344. list after non-max suppression calculation.
  2345. - **selected_idx** (Tensor) - The shape of tensor is :math:`(N,)`. The indexes list of
  2346. valid input bounding boxes.
  2347. - **selected_mask** (Tensor) - The shape of tensor is :math:`(N,)`. A mask list of
  2348. valid output bounding boxes.
  2349. Examples:
  2350. >>> bbox = np.random.rand(128, 5)
  2351. >>> bbox[:, 2] += bbox[:, 0]
  2352. >>> bbox[:, 3] += bbox[:, 1]
  2353. >>> inputs = Tensor(bbox, mindspore.float32)
  2354. >>> nms = P.NMSWithMask(0.5)
  2355. >>> output_boxes, indices, mask = nms(inputs)
  2356. """
  2357. @prim_attr_register
  2358. def __init__(self, iou_threshold=0.5):
  2359. """Init NMSWithMask"""
  2360. validator.check_value_type("iou_threshold", iou_threshold, [float], self.name)
  2361. self.init_prim_io_names(inputs=['bboxes'], outputs=['selected_boxes', 'selected_idx', 'selected_mask'])
  2362. self.is_ge = context.get_context("enable_ge")
  2363. def infer_shape(self, bboxes_shape):
  2364. cls_name = self.name
  2365. validator.check_integer("bboxes rank", len(bboxes_shape), 2, Rel.EQ, cls_name)
  2366. validator.check_integer("bboxes.shape[0]", bboxes_shape[0], 0, Rel.GT, cls_name)
  2367. validator.check_integer("bboxes.shape[1]", bboxes_shape[1], 5, Rel.EQ, cls_name)
  2368. num = bboxes_shape[0]
  2369. return (bboxes_shape, (num,), (num,))
  2370. def infer_dtype(self, bboxes_dtype):
  2371. validator.check_tensor_type_same({"bboxes": bboxes_dtype}, [mstype.float16, mstype.float32], self.name)
  2372. return (bboxes_dtype, mstype.int32, mstype.bool_)
  2373. class Abs(PrimitiveWithInfer):
  2374. """
  2375. Returns absolute value of a tensor element-wise.
  2376. Inputs:
  2377. - **input_x** (Tensor) - The input tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2378. Outputs:
  2379. Tensor, has the same shape as the `input_x`.
  2380. Examples:
  2381. >>> input_x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
  2382. >>> abs = P.Abs()
  2383. >>> abs(input_x)
  2384. [1.0, 1.0, 0.0]
  2385. """
  2386. @prim_attr_register
  2387. def __init__(self):
  2388. """init Abs"""
  2389. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  2390. def infer_shape(self, x_shape):
  2391. return x_shape
  2392. def infer_dtype(self, x_type):
  2393. validator.check_tensor_type_same({'x': x_type}, mstype.number_type, self.name)
  2394. return x_type
  2395. def infer_value(self, x):
  2396. if x is not None:
  2397. x = x.asnumpy()
  2398. out = np.array(np.abs(x, dtype=x.dtype))
  2399. return Tensor(out)
  2400. return None
  2401. class Sign(PrimitiveWithInfer):
  2402. r"""
  2403. Perform :math:`sign` on tensor element-wise.
  2404. Note:
  2405. .. math::
  2406. sign(x) = \begin{cases} -1, &if\ x < 0 \cr
  2407. 0, &if\ x == 0 \cr
  2408. 1, &if\ x > 0\end{cases}
  2409. Inputs:
  2410. - **input_x** (Tensor) - The input tensor.
  2411. Outputs:
  2412. Tensor, has the same shape and type as the `input_x`.
  2413. Examples:
  2414. >>> input_x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
  2415. >>> sign = P.Sign()
  2416. >>> output = sign(input_x)
  2417. [[1.0, 0.0, -1.0]]
  2418. """
  2419. @prim_attr_register
  2420. def __init__(self):
  2421. pass
  2422. def infer_shape(self, x_shape):
  2423. return x_shape
  2424. def infer_dtype(self, x_dtype):
  2425. validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.name)
  2426. return x_dtype
  2427. class Round(PrimitiveWithInfer):
  2428. """
  2429. Returns half to even of a tensor element-wise.
  2430. Inputs:
  2431. - **input_x** (Tensor) - The input tensor.
  2432. Outputs:
  2433. Tensor, has the same shape and type as the `input_x`.
  2434. Examples:
  2435. >>> input_x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
  2436. >>> round = P.Round()
  2437. >>> round(input_x)
  2438. [1.0, 2.0, 2.0, 2.0, -4.0]
  2439. """
  2440. @prim_attr_register
  2441. def __init__(self):
  2442. """init Round"""
  2443. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  2444. def infer_shape(self, x_shape):
  2445. return x_shape
  2446. def infer_dtype(self, x_type):
  2447. validator.check_tensor_type_same({'x': x_type}, mstype.number_type, self.name)
  2448. return x_type
  2449. class Tan(PrimitiveWithInfer):
  2450. """
  2451. Computes tangent of `input_x` element-wise.
  2452. Inputs:
  2453. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. Data type should be
  2454. float16, float32 or int32.
  2455. Outputs:
  2456. Tensor, has the same shape as `input_x`.
  2457. Examples:
  2458. >>> tan = P.Tan()
  2459. >>> input_x = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
  2460. >>> output = tan(input_x)
  2461. """
  2462. @prim_attr_register
  2463. def __init__(self):
  2464. """init Tan"""
  2465. def infer_shape(self, x_shape):
  2466. return x_shape
  2467. def infer_dtype(self, x_type):
  2468. valid_types = [mstype.float16, mstype.float32, mstype.int32]
  2469. validator.check_tensor_type_same({'x': x_type}, valid_types, self.name)
  2470. return x_type
  2471. class Atan(PrimitiveWithInfer):
  2472. """
  2473. Computes the trigonometric inverse tangent of the input element-wise.
  2474. Inputs:
  2475. - **input_x** (Tensor): The input tensor.
  2476. Outputs:
  2477. A Tensor, has the same type as the input.
  2478. Examples:
  2479. >>> input_x = Tensor(np.array([1.047, 0.785]), mindspore.float32)
  2480. >>> tan = P.Tan()
  2481. >>> output_y = tan(input_x)
  2482. >>> atan = P.Atan()
  2483. >>> atan(output_y)
  2484. [[1.047, 07850001]]
  2485. """
  2486. @prim_attr_register
  2487. def __init__(self):
  2488. pass
  2489. def infer_shape(self, x_shape):
  2490. return x_shape
  2491. def infer_dtype(self, x_type):
  2492. validator.check_tensor_type_same({'x': x_type}, mstype.number_type, self.name)
  2493. return x_type
  2494. class Atanh(PrimitiveWithInfer):
  2495. """
  2496. Computes inverse hyperbolic tangent of the input element-wise.
  2497. Inputs:
  2498. - **input_x** (Tensor): The input tensor.
  2499. Outputs:
  2500. A Tensor, has the same type as the input.
  2501. Examples:
  2502. >>> input_x = Tensor(np.array([1.047, 0.785]), mindspore.float32)
  2503. >>> atanh = P.Atanh()
  2504. >>> atanh(input_x)
  2505. [[1.8869909 1.058268]]
  2506. """
  2507. @prim_attr_register
  2508. def __init__(self):
  2509. pass
  2510. def infer_shape(self, x_shape):
  2511. return x_shape
  2512. def infer_dtype(self, x_type):
  2513. validator.check_tensor_type_same({'x': x_type}, mstype.number_type, self.name)
  2514. return x_type
  2515. class Atan2(_MathBinaryOp):
  2516. r"""
  2517. Returns arctangent of input_x/input_y element-wise.
  2518. It returns :math:`\theta\ \in\ [-\pi, \pi]`
  2519. such that :math:`x = r*\sin(\theta), y = r*\cos(\theta)`, where :math:`r = \sqrt{x^2 + y^2}`.
  2520. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2521. If they have different data types, lower priority data type will be converted to
  2522. relatively highest priority data type.
  2523. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2524. Inputs:
  2525. - **input_x** (Tensor) - The input tensor.
  2526. - **input_y** (Tensor) - The input tensor.
  2527. Outputs:
  2528. Tensor, the shape is the same as the one after broadcasting,and the data type is same as `input_x`.
  2529. Examples:
  2530. >>> input_x = Tensor(np.array([[0, 1]]), mindspore.float32)
  2531. >>> input_y = Tensor(np.array([[1, 1]]), mindspore.float32)
  2532. >>> atan2 = P.Atan2()
  2533. >>> atan2(input_x, input_y)
  2534. [[0. 0.7853982]]
  2535. """
  2536. class SquareSumAll(PrimitiveWithInfer):
  2537. """
  2538. Returns square sum all of a tensor element-wise
  2539. Inputs:
  2540. - **input_x1** (Tensor) - The input tensor. The data type must be float16 or float32.
  2541. - **input_x2** (Tensor) - The input tensor same type and shape as the `input_x1`.
  2542. Note:
  2543. SquareSumAll only supports float16 and float32 data type.
  2544. Outputs:
  2545. - **output_y1** (Tensor) - The same type as the `input_x1`.
  2546. - **output_y2** (Tensor) - The same type as the `input_x1`.
  2547. Examples:
  2548. >>> input_x1 = Tensor(np.random.randint([3, 2, 5, 7]), mindspore.float32)
  2549. >>> input_x2 = Tensor(np.random.randint([3, 2, 5, 7]), mindspore.float32)
  2550. >>> square_sum_all = P.SquareSumAll()
  2551. >>> square_sum_all(input_x1, input_x2)
  2552. """
  2553. @prim_attr_register
  2554. def __init__(self):
  2555. """init SquareSumAll"""
  2556. def infer_shape(self, x_shape, y_shape):
  2557. validator.check("x1_shape", x_shape, "x2_shape", y_shape, Rel.EQ, self.name)
  2558. return [], []
  2559. def infer_dtype(self, x_type, y_type):
  2560. validator.check_tensor_type_same({'x1_type': x_type}, [mstype.float16, mstype.float32], self.name)
  2561. validator.check_tensor_type_same({'x2_type': y_type}, [mstype.float16, mstype.float32], self.name)
  2562. return x_type, y_type
  2563. class BitwiseAnd(_BitwiseBinaryOp):
  2564. """
  2565. Returns bitwise `and` of two tensors element-wise.
  2566. Inputs of `input_x1` and `input_x2` comply with the implicit type conversion rules to
  2567. make the data types consistent.
  2568. If they have different data types, lower priority data type will be converted to
  2569. relatively highest priority data type.
  2570. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2571. Inputs:
  2572. - **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type.
  2573. - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
  2574. Outputs:
  2575. - **y** (Tensor) - The same type as the `input_x1`.
  2576. Examples:
  2577. >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16)
  2578. >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)
  2579. >>> bitwise_and = P.BitwiseAnd()
  2580. >>> bitwise_and(input_x1, input_x2)
  2581. [0, 0, 1, -1, 1, 0, 1]
  2582. """
  2583. class BitwiseOr(_BitwiseBinaryOp):
  2584. """
  2585. Returns bitwise `or` of two tensors element-wise.
  2586. Inputs of `input_x1` and `input_x2` comply with the implicit type conversion rules to
  2587. make the data types consistent.
  2588. If they have different data types, lower priority data type will be converted to
  2589. relatively highest priority data type.
  2590. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2591. Inputs:
  2592. - **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type.
  2593. - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
  2594. Outputs:
  2595. - **y** (Tensor) - The same type as the `input_x1`.
  2596. Examples:
  2597. >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16)
  2598. >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)
  2599. >>> bitwise_or = P.BitwiseOr()
  2600. >>> bitwise_or(input_x1, input_x2)
  2601. [0, 1, 1, -1, -1, 3, 3]
  2602. """
  2603. class BitwiseXor(_BitwiseBinaryOp):
  2604. """
  2605. Returns bitwise `xor` of two tensors element-wise.
  2606. Inputs of `input_x1` and `input_x2` comply with the implicit type conversion rules to
  2607. make the data types consistent.
  2608. If they have different data types, lower priority data type will be converted to
  2609. relatively highest priority data type.
  2610. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2611. Inputs:
  2612. - **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type.
  2613. - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
  2614. Outputs:
  2615. - **y** (Tensor) - The same type as the `input_x1`.
  2616. Examples:
  2617. >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16)
  2618. >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)
  2619. >>> bitwise_xor = P.BitwiseXor()
  2620. >>> bitwise_xor(input_x1, input_x2)
  2621. [0, 1, 0, 0, -2, 3, 2]
  2622. """
  2623. class BesselI0e(PrimitiveWithInfer):
  2624. """
  2625. Computes BesselI0e of input element-wise.
  2626. Inputs:
  2627. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2628. Outputs:
  2629. Tensor, has the same shape as `input_x`. Data type should be float16 or float32.
  2630. Examples:
  2631. >>> bessel_i0e = P.BesselI0e()
  2632. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  2633. >>> output = bessel_i0e(input_x)
  2634. [0.7979961, 0.5144438, 0.75117415, 0.9157829]
  2635. """
  2636. @prim_attr_register
  2637. def __init__(self):
  2638. """init BesselI0e"""
  2639. def infer_shape(self, x):
  2640. return x
  2641. def infer_dtype(self, x):
  2642. validator.check_tensor_type_same({'x': x}, mstype.number_type, self.name)
  2643. return x
  2644. class BesselI1e(PrimitiveWithInfer):
  2645. """
  2646. Computes BesselI1e of input element-wise.
  2647. Inputs:
  2648. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2649. Outputs:
  2650. Tensor, has the same shape as `input_x`. Data type should be float16 or float32.
  2651. Examples:
  2652. >>> bessel_i1e = P.BesselI1e()
  2653. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  2654. >>> output = bessel_i1e(input_x)
  2655. [0.09507662, 0.19699717, 0.11505538, 0.04116856]
  2656. """
  2657. @prim_attr_register
  2658. def __init__(self):
  2659. """init BesselI1e"""
  2660. def infer_shape(self, x):
  2661. return x
  2662. def infer_dtype(self, x):
  2663. validator.check_tensor_type_same({'x': x}, mstype.number_type, self.name)
  2664. return x
  2665. class Inv(PrimitiveWithInfer):
  2666. """
  2667. Computes Inv(Reciprocal) of input tensor element-wise.
  2668. Inputs:
  2669. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2670. Must be one of the following types: float16, float32, int32.
  2671. Outputs:
  2672. Tensor, has the same shape and data type as `input_x`.
  2673. Examples:
  2674. >>> inv = P.Inv()
  2675. >>> input_x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
  2676. >>> output = inv(input_x)
  2677. [4., 2.5, 3.2258065, 1.923077]
  2678. """
  2679. @prim_attr_register
  2680. def __init__(self):
  2681. pass
  2682. def infer_shape(self, x_shape):
  2683. return x_shape
  2684. def infer_dtype(self, x_dtype):
  2685. validator.check_tensor_type_same({'x_dtype': x_dtype}, [mstype.float16, mstype.float32,
  2686. mstype.int32], self.name)
  2687. return x_dtype
  2688. class Invert(PrimitiveWithInfer):
  2689. """
  2690. Flips all bits of input tensor element-wise.
  2691. Inputs:
  2692. - **input_x** (Tensor[int16], Tensor[uint16]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2693. Outputs:
  2694. Tensor, has the same shape as `input_x`.
  2695. Examples:
  2696. >>> invert = P.Invert()
  2697. >>> input_x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
  2698. >>> output = invert(input_x)
  2699. [-26, -5, -14, -10]
  2700. """
  2701. @prim_attr_register
  2702. def __init__(self):
  2703. pass
  2704. def infer_shape(self, x_shape):
  2705. return x_shape
  2706. def infer_dtype(self, x_dtype):
  2707. validator.check_tensor_type_same({'x_dtype': x_dtype}, [mstype.int16, mstype.uint16], self.name)
  2708. return x_dtype
  2709. class Eps(PrimitiveWithInfer):
  2710. """
  2711. Creates a tensor filled with `input_x` dtype minimum val.
  2712. Inputs:
  2713. - **input_x** (Tensor) - Input tensor. The data type must be float16 or float32.
  2714. Outputs:
  2715. Tensor, has the same type and shape as `input_x`, but filled with `input_x` dtype minimum val.
  2716. Examples:
  2717. >>> out = P.Eps()(input_x)
  2718. """
  2719. @prim_attr_register
  2720. def __init__(self):
  2721. """init Eps"""
  2722. self.init_prim_io_names(inputs=['input_x'], outputs=['y'])
  2723. def __infer__(self, input_x):
  2724. valid_types = [mstype.float16, mstype.float32]
  2725. validator.check_tensor_type_same({'input_x': input_x['dtype']}, valid_types, self.name)
  2726. x_nptype = mstype.dtype_to_nptype(input_x['dtype'].element_type())
  2727. if x_nptype == np.float16:
  2728. min_val = 2 ** (-14)
  2729. else:
  2730. min_val = 2 ** (-16)
  2731. res = np.full(input_x['shape'], min_val, x_nptype)
  2732. out = {
  2733. 'value': Tensor(res),
  2734. 'shape': input_x['shape'],
  2735. 'dtype': input_x['dtype'],
  2736. }
  2737. return out