You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math_ops.py 143 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
6 years ago
6 years ago
6 years ago
6 years ago
5 years ago
6 years ago
6 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Operators for math."""
  16. import copy
  17. import numpy as np
  18. from ... import context
  19. from .. import signature as sig
  20. from ..._checkparam import Validator as validator
  21. from ..._checkparam import Rel
  22. from ...common import dtype as mstype
  23. from ...common.tensor import Tensor
  24. from ...common._decorator import deprecated
  25. from .._utils import get_broadcast_shape
  26. from ..primitive import PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
  27. def _infer_shape_reduce(x, axis, keep_dims, prim_name):
  28. """Common infer for reduce operator"""
  29. def reduce_one_axis(one_axis):
  30. validator.check_int_range(one_axis, -dim, dim, Rel.INC_LEFT, 'axis', prim_name)
  31. if one_axis < 0:
  32. one_axis += dim
  33. axis_reduce.add(one_axis)
  34. validator.check_value_type('axis', axis, [int, tuple, list], prim_name)
  35. dim = len(x)
  36. axis_reduce = set()
  37. if isinstance(axis, int):
  38. reduce_one_axis(axis)
  39. else:
  40. if not axis:
  41. if keep_dims:
  42. return [1] * dim
  43. return []
  44. for index, one_axis in enumerate(axis):
  45. validator.check_value_type('axis[%d]' % index, one_axis, [int], prim_name)
  46. reduce_one_axis(one_axis)
  47. out_shape = []
  48. for i in range(dim):
  49. if i in axis_reduce:
  50. if keep_dims:
  51. out_shape.append(1)
  52. else:
  53. out_shape.append(x[i])
  54. return out_shape
  55. class _BinaryOp(PrimitiveWithInfer):
  56. """
  57. Define binary operators.
  58. """
  59. __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
  60. @prim_attr_register
  61. def __init__(self):
  62. """Initialize _BinaryOp"""
  63. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  64. def infer_shape(self, x_shape, y_shape):
  65. return get_broadcast_shape(x_shape, y_shape, self.name)
  66. class _MathBinaryOp(_BinaryOp):
  67. """
  68. Define math binary operators.
  69. """
  70. @staticmethod
  71. def do_infer_dtype(x_dtype, y_dtype, valid_dtype=mstype.number_type, prim_name=None):
  72. args_type = {"x": x_dtype, "y": y_dtype}
  73. validator.check_tensors_dtypes_same_and_valid(args_type, valid_dtype, prim_name)
  74. return x_dtype
  75. def infer_dtype(self, x_dtype, y_dtype):
  76. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.name)
  77. class _BitwiseBinaryOp(_MathBinaryOp):
  78. """
  79. Define bitwise binary operators.
  80. """
  81. @prim_attr_register
  82. def __init__(self):
  83. """Initialize _BitwiseBinaryOp"""
  84. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
  85. @staticmethod
  86. def _check_bitwise_op_input_type(x1_type, x2_type, prim):
  87. args = {'x1': x1_type, 'x2': x2_type}
  88. valid_dtypes = mstype.int_type + mstype.uint_type
  89. validator.check_tensors_dtypes_same_and_valid(args, valid_dtypes, prim)
  90. return x1_type
  91. def infer_dtype(self, x1_type, x2_type):
  92. return _BitwiseBinaryOp._check_bitwise_op_input_type(x1_type, x2_type, self.name)
  93. class Add(_MathBinaryOp):
  94. r"""
  95. Adds two input tensors element-wise.
  96. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  97. The inputs must be two tensors or one tensor and one scalar.
  98. When the inputs are two tensors,
  99. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  100. When the inputs are one tensor and one scalar,
  101. the scalar could only be a constant.
  102. .. math::
  103. out_{i} = x_{i} + y_{i}
  104. Inputs:
  105. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  106. or a tensor whose data type is number or bool.
  107. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  108. is a tensor, or a tensor whose data type is number or bool.
  109. Outputs:
  110. Tensor, the shape is the same as the one after broadcasting,
  111. and the data type is the one with higher precision or higher digits among the two inputs.
  112. Supported Platforms:
  113. ``Ascend`` ``GPU`` ``CPU``
  114. Examples:
  115. >>> add = ops.Add()
  116. >>> input_x = Tensor(np.array([1, 2, 3]).astype(np.float32))
  117. >>> input_y = Tensor(np.array([4, 5, 6]).astype(np.float32))
  118. >>> output = add(input_x, input_y)
  119. >>> print(output)
  120. [5. 7. 9.]
  121. """
  122. def infer_value(self, x, y):
  123. if x is not None and y is not None:
  124. x = x.asnumpy()
  125. y = y.asnumpy()
  126. out = x + y
  127. out = np.array(out, x.dtype)
  128. return Tensor(out)
  129. return None
  130. class TensorAdd(_MathBinaryOp):
  131. """
  132. Same as operator Add. TensorAdd will be deprecated in the future.
  133. Please use Add instead.
  134. """
  135. #deprecate_new_name = "Add"
  136. @deprecated("1.1", "Add", True)
  137. @prim_attr_register
  138. def __init__(self):
  139. _MathBinaryOp.__init__(self)
  140. def infer_value(self, x, y):
  141. if x is not None and y is not None:
  142. x = x.asnumpy()
  143. y = y.asnumpy()
  144. out = x + y
  145. out = np.array(out, x.dtype)
  146. return Tensor(out)
  147. return None
  148. class AssignAdd(PrimitiveWithInfer):
  149. """
  150. Updates a `Parameter` by adding a value to it.
  151. Inputs of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
  152. If they have different data types, lower priority data type will be converted to
  153. relatively highest priority data type.
  154. If `value` is a number, the number is automatically converted to Tensor,
  155. and the data type is consistent with the Tensor data type involved in the operation.
  156. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  157. Inputs:
  158. - **variable** (Parameter) - The `Parameter`.
  159. - **value** (Union[numbers.Number, Tensor]) - The value to be added to the `variable`.
  160. It must have the same shape as `variable` if it is a Tensor.
  161. Supported Platforms:
  162. ``Ascend`` ``GPU`` ``CPU``
  163. Examples:
  164. >>> class Net(nn.Cell):
  165. ... def __init__(self):
  166. ... super(Net, self).__init__()
  167. ... self.AssignAdd = ops.AssignAdd()
  168. ... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
  169. ...
  170. ... def construct(self, x):
  171. ... self.AssignAdd(self.variable, x)
  172. ... return self.variable
  173. ...
  174. >>> net = Net()
  175. >>> value = Tensor(np.ones([1]).astype(np.int64)*100)
  176. >>> output = net(value)
  177. >>> print(output)
  178. Parameter (name=global_step, shape=(1,), dtype=Int64, requires_grad=True)
  179. """
  180. __mindspore_signature__ = (
  181. sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
  182. sig.make_sig('value', dtype=sig.sig_dtype.T)
  183. )
  184. @prim_attr_register
  185. def __init__(self):
  186. """Initialize AssignAdd"""
  187. self.init_prim_io_names(inputs=['ref', 'value'], outputs=['output'])
  188. self.add_prim_attr('side_effect_mem', True)
  189. def infer_shape(self, variable, value):
  190. return value
  191. def infer_dtype(self, variable, value):
  192. args = {"variable": variable, "value": value}
  193. validator.check_scalar_or_tensor_types_same(args, mstype.number_type, self.name)
  194. return value
  195. class AssignSub(PrimitiveWithInfer):
  196. """
  197. Updates a `Parameter` by subtracting a value from it.
  198. Inputs of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
  199. If they have different data types, lower priority data type will be converted to
  200. relatively highest priority data type.
  201. If `value` is a number, the number is automatically converted to Tensor,
  202. and the data type is consistent with the Tensor data type involved in the operation.
  203. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  204. Inputs:
  205. - **variable** (Parameter) - The `Parameter`.
  206. - **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`.
  207. It must have the same shape as `variable` if it is a Tensor.
  208. Supported Platforms:
  209. ``Ascend``
  210. Examples:
  211. >>> class Net(nn.Cell):
  212. ... def __init__(self):
  213. ... super(Net, self).__init__()
  214. ... self.AssignSub = ops.AssignSub()
  215. ... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
  216. ...
  217. ... def construct(self, x):
  218. ... self.AssignSub(self.variable, x)
  219. ... return self.variable
  220. ...
  221. >>> net = Net()
  222. >>> value = Tensor(np.ones([1]).astype(np.int32)*100)
  223. >>> output = net(value)
  224. >>> print(output)
  225. Parameter (name=global_step, shape=(1,), dtype=Int32, requires_grad=True)
  226. """
  227. __mindspore_signature__ = (
  228. sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
  229. sig.make_sig('value', dtype=sig.sig_dtype.T)
  230. )
  231. @prim_attr_register
  232. def __init__(self):
  233. """Initialize AssignSub"""
  234. self.init_prim_io_names(inputs=['ref', 'value'], outputs=['output'])
  235. self.add_prim_attr('side_effect_mem', True)
  236. def infer_shape(self, variable, value):
  237. return value
  238. def infer_dtype(self, variable, value):
  239. args = {"variable": variable, "value": value}
  240. validator.check_scalar_or_tensor_types_same(args, mstype.number_type, self.name)
  241. return value
  242. class _Reduce(PrimitiveWithInfer):
  243. """
  244. Definition of base class of reduction class operators.
  245. Args:
  246. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  247. If false, don't keep these dimensions.
  248. """
  249. __mindspore_signature__ = (
  250. sig.make_sig('input_x'),
  251. sig.make_sig('axis', default=())
  252. )
  253. @prim_attr_register
  254. def __init__(self, keep_dims=False):
  255. """Initialize Reduce"""
  256. validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
  257. self.init_prim_io_names(inputs=['input_x', 'axis'], outputs=['y'])
  258. def __call__(self, x, axis=()):
  259. args = [x, axis]
  260. output = _run_op(self, self.name, args)
  261. return output
  262. def do_infer(self, input_x, axis, valid_dtype=mstype.number_type):
  263. """ return meta infos of input parameters """
  264. axis_v = axis['value']
  265. input_shp = input_x['shape']
  266. args = {'input_x': input_x['dtype']}
  267. validator.check_tensors_dtypes_same_and_valid(args, valid_dtype, self.name)
  268. if axis_v is None:
  269. raise ValueError(f"For {self.name}, axis must be const.")
  270. input_shp = _infer_shape_reduce(input_shp, axis_v, self.keep_dims, self.name)
  271. value = None
  272. if input_x['value'] is not None:
  273. prim_map = {
  274. 'ReduceSum': np.sum,
  275. 'ReduceMax': np.max,
  276. 'ReduceMin': np.min,
  277. }
  278. np_reduce_func = prim_map.get(self.name, None)
  279. if np_reduce_func is not None:
  280. value = input_x['value'].asnumpy()
  281. if not axis_v:
  282. axis_v = [i for i in range(len(input_x['shape']))]
  283. axis_v = tuple(axis_v)
  284. value = np_reduce_func(value, axis_v, keepdims=self.keep_dims)
  285. value = np.array(value)
  286. value = Tensor(value)
  287. if 'max_shape' and 'min_shape' in input_x:
  288. output_max_shape = _infer_shape_reduce(input_x['max_shape'], axis_v, self.keep_dims, self.name)
  289. output_min_shape = _infer_shape_reduce(input_x['min_shape'], axis_v, self.keep_dims, self.name)
  290. else:
  291. output_max_shape = input_shp
  292. output_min_shape = input_shp
  293. return {'shape': input_shp,
  294. 'min_shape': output_min_shape,
  295. 'max_shape': output_max_shape,
  296. 'dtype': input_x['dtype'],
  297. 'value': value}
  298. def __infer__(self, input_x, axis):
  299. return self.do_infer(input_x, axis)
  300. class ReduceMean(_Reduce):
  301. """
  302. Reduces a dimension of a tensor by averaging all elements in the dimension.
  303. The dtype of the tensor to be reduced is number.
  304. Args:
  305. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  306. If false, don't keep these dimensions. Default: False.
  307. Inputs:
  308. - **input_x** (Tensor[Number]) - The input tensor.
  309. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  310. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  311. Outputs:
  312. Tensor, has the same dtype as the `input_x`.
  313. - If axis is (), and keep_dims is False,
  314. the output is a 0-D tensor representing the mean of all elements in the input tensor.
  315. - If axis is int, set as 2, and keep_dims is False,
  316. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  317. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  318. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  319. Supported Platforms:
  320. ``Ascend`` ``GPU`` ``CPU``
  321. Examples:
  322. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  323. >>> op = ops.ReduceMean(keep_dims=True)
  324. >>> output = op(input_x, 1)
  325. >>> result = output.shape
  326. >>> print(result)
  327. (3, 1, 5, 6)
  328. """
  329. class ReduceSum(_Reduce):
  330. """
  331. Reduces a dimension of a tensor by summing all elements in the dimension.
  332. The dtype of the tensor to be reduced is number.
  333. Args:
  334. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  335. If false, don't keep these dimensions. Default: False.
  336. Inputs:
  337. - **input_x** (Tensor[Number]) - The input tensor.
  338. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  339. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  340. Outputs:
  341. Tensor, has the same dtype as the `input_x`.
  342. - If axis is (), and keep_dims is False,
  343. the output is a 0-D tensor representing the sum of all elements in the input tensor.
  344. - If axis is int, set as 2, and keep_dims is False,
  345. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  346. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  347. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  348. Supported Platforms:
  349. ``Ascend`` ``GPU`` ``CPU``
  350. Examples:
  351. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  352. >>> op = ops.ReduceSum(keep_dims=True)
  353. >>> output = op(input_x, 1)
  354. >>> output.shape
  355. (3, 1, 5, 6)
  356. """
  357. @prim_attr_register
  358. def __init__(self, keep_dims=False):
  359. """Initialize ReduceSum"""
  360. super(ReduceSum, self).__init__(keep_dims)
  361. self.__setattr_flag__ = True
  362. class ReduceAll(_Reduce):
  363. """
  364. Reduces a dimension of a tensor by the "logicalAND" of all elements in the dimension.
  365. The dtype of the tensor to be reduced is bool.
  366. Args:
  367. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  368. If false, don't keep these dimensions.
  369. Default : False, don't keep these reduced dimensions.
  370. Inputs:
  371. - **input_x** (Tensor[bool]) - The input tensor.
  372. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  373. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  374. Outputs:
  375. Tensor, the dtype is bool.
  376. - If axis is (), and keep_dims is False,
  377. the output is a 0-D tensor representing the "logical and" of all elements in the input tensor.
  378. - If axis is int, set as 2, and keep_dims is False,
  379. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  380. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  381. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  382. Supported Platforms:
  383. ``Ascend`` ``GPU`` ``CPU``
  384. Examples:
  385. >>> input_x = Tensor(np.array([[True, False], [True, True]]))
  386. >>> op = ops.ReduceAll(keep_dims=True)
  387. >>> output = op(input_x, 1)
  388. >>> print(output)
  389. [[False]
  390. [ True]]
  391. """
  392. def __infer__(self, input_x, axis):
  393. return self.do_infer(input_x, axis, (mstype.bool_,))
  394. class ReduceAny(_Reduce):
  395. """
  396. Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension.
  397. The dtype of the tensor to be reduced is bool.
  398. Args:
  399. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  400. If false, don't keep these dimensions.
  401. Default : False, don't keep these reduced dimensions.
  402. Inputs:
  403. - **input_x** (Tensor[bool]) - The input tensor.
  404. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  405. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  406. Outputs:
  407. Tensor, the dtype is bool.
  408. - If axis is (), and keep_dims is False,
  409. the output is a 0-D tensor representing the "logical or" of all elements in the input tensor.
  410. - If axis is int, set as 2, and keep_dims is False,
  411. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  412. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  413. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  414. Supported Platforms:
  415. ``Ascend`` ``GPU`` ``CPU``
  416. Examples:
  417. >>> input_x = Tensor(np.array([[True, False], [True, True]]))
  418. >>> op = ops.ReduceAny(keep_dims=True)
  419. >>> output = op(input_x, 1)
  420. >>> print(output)
  421. [[ True]
  422. [ True]]
  423. """
  424. def __infer__(self, input_x, axis):
  425. return self.do_infer(input_x, axis, (mstype.bool_,))
  426. class ReduceMax(_Reduce):
  427. """
  428. Reduces a dimension of a tensor by the maximum value in this dimension.
  429. The dtype of the tensor to be reduced is number.
  430. Args:
  431. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  432. If false, don't keep these dimensions.
  433. Default : False, don't keep these reduced dimensions.
  434. Inputs:
  435. - **input_x** (Tensor[Number]) - The input tensor.
  436. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  437. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  438. Outputs:
  439. Tensor, has the same dtype as the `input_x`.
  440. - If axis is (), and keep_dims is False,
  441. the output is a 0-D tensor representing the maximum of all elements in the input tensor.
  442. - If axis is int, set as 2, and keep_dims is False,
  443. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  444. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  445. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  446. Supported Platforms:
  447. ``Ascend`` ``GPU`` ``CPU``
  448. Examples:
  449. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  450. >>> op = ops.ReduceMax(keep_dims=True)
  451. >>> output = op(input_x, 1)
  452. >>> result = output.shape
  453. >>> print(result)
  454. (3, 1, 5, 6)
  455. """
  456. @prim_attr_register
  457. def __init__(self, keep_dims=False):
  458. """ReduceMax"""
  459. super(ReduceMax, self).__init__(keep_dims)
  460. self.__setattr_flag__ = True
  461. def __infer__(self, input_x, axis):
  462. return self.do_infer(input_x, axis, mstype.number_type + (mstype.bool_,))
  463. class ReduceMin(_Reduce):
  464. """
  465. Reduces a dimension of a tensor by the minimum value in the dimension.
  466. The dtype of the tensor to be reduced is number.
  467. Args:
  468. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  469. If false, don't keep these dimensions.
  470. Default : False, don't keep these reduced dimensions.
  471. Inputs:
  472. - **input_x** (Tensor[Number]) - The input tensor.
  473. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  474. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  475. Outputs:
  476. Tensor, has the same dtype as the `input_x`.
  477. - If axis is (), and keep_dims is False,
  478. the output is a 0-D tensor representing the minimum of all elements in the input tensor.
  479. - If axis is int, set as 2, and keep_dims is False,
  480. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  481. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  482. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  483. Supported Platforms:
  484. ``Ascend`` ``GPU`` ``CPU``
  485. Examples:
  486. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  487. >>> op = ops.ReduceMin(keep_dims=True)
  488. >>> output = op(input_x, 1)
  489. >>> result = output.shape
  490. >>> print(result)
  491. (3, 1, 5, 6)
  492. """
  493. class ReduceProd(_Reduce):
  494. """
  495. Reduces a dimension of a tensor by multiplying all elements in the dimension.
  496. The dtype of the tensor to be reduced is number.
  497. Args:
  498. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  499. If false, don't keep these dimensions.
  500. Default : False, don't keep these reduced dimensions.
  501. Inputs:
  502. - **input_x** (Tensor[Number]) - The input tensor.
  503. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  504. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  505. Outputs:
  506. Tensor, has the same dtype as the `input_x`.
  507. - If axis is (), and keep_dims is False,
  508. the output is a 0-D tensor representing the product of all elements in the input tensor.
  509. - If axis is int, set as 2, and keep_dims is False,
  510. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  511. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  512. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  513. Supported Platforms:
  514. ``Ascend``
  515. Examples:
  516. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  517. >>> op = ops.ReduceProd(keep_dims=True)
  518. >>> output = op(input_x, 1)
  519. >>> result = output.shape
  520. >>> print(result)
  521. (3, 1, 5, 6)
  522. """
  523. class CumProd(PrimitiveWithInfer):
  524. """
  525. Computes the cumulative product of the tensor x along axis.
  526. Args:
  527. exclusive (bool): If true, perform exclusive cumulative product. Default: False.
  528. reverse (bool): If true, reverse the result along axis. Default: False
  529. Inputs:
  530. - **input_x** (Tensor[Number]) - The input tensor.
  531. - **axis** (int) - The dimensions to compute the cumulative product.
  532. Only constant value is allowed.
  533. Outputs:
  534. Tensor, has the same shape and dtype as the `input_x`.
  535. Supported Platforms:
  536. ``Ascend``
  537. Examples:
  538. >>> a, b, c, = 1, 2, 3
  539. >>> input_x = Tensor(np.array([a, b, c]).astype(np.float32))
  540. >>> op0 = ops.CumProd()
  541. >>> output0 = op0(input_x, 0) # output=[a, a * b, a * b * c]
  542. >>> op1 = ops.CumProd(exclusive=True)
  543. >>> output1 = op1(input_x, 0) # output=[1, a, a * b]
  544. >>> op2 = ops.CumProd(reverse=True)
  545. >>> output2 = op2(input_x, 0) # output=[a * b * c, b * c, c]
  546. >>> op3 = ops.CumProd(exclusive=True, reverse=True)
  547. >>> output3 = op3(input_x, 0) # output=[b * c, c, 1]
  548. >>> print(output0)
  549. [1. 2. 6.]
  550. >>> print(output1)
  551. [1. 1. 2.]
  552. >>> print(output2)
  553. [6. 6. 3.]
  554. >>> print(output3)
  555. [6. 3. 1.]
  556. """
  557. @prim_attr_register
  558. def __init__(self, exclusive=False, reverse=False):
  559. cls_name = self.name
  560. self.exclusive = validator.check_value_type("exclusive", exclusive, [bool], cls_name)
  561. self.reverse = validator.check_value_type("reverse", reverse, [bool], cls_name)
  562. self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
  563. def infer_shape(self, x_shape, axis_shape):
  564. return x_shape
  565. def infer_dtype(self, x_type, axis_type):
  566. cls_name = self.name
  567. validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, cls_name)
  568. validator.check_subclass("axis", axis_type, mstype.int_, cls_name)
  569. return x_type
  570. def infer_value(self, x, axis):
  571. if axis is None:
  572. raise ValueError(f"For {self.name}, axis must be const.")
  573. class MatMul(PrimitiveWithCheck):
  574. r"""
  575. Multiplies matrix `a` and matrix `b`.
  576. The rank of input tensors must equal to `2`.
  577. Args:
  578. transpose_a (bool): If true, `a` is transposed before multiplication. Default: False.
  579. transpose_b (bool): If true, `b` is transposed before multiplication. Default: False.
  580. Inputs:
  581. - **input_x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If
  582. `transpose_a` is True, its shape must be :math:`(N, C)` after transpose.
  583. - **input_y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If
  584. `transpose_b` is True, its shape must be :math:`(C, M)` after transpose.
  585. Outputs:
  586. Tensor, the shape of the output tensor is :math:`(N, M)`.
  587. Supported Platforms:
  588. ``Ascend`` ``GPU`` ``CPU``
  589. Examples:
  590. >>> input_x1 = Tensor(np.ones(shape=[1, 3]), mindspore.float32)
  591. >>> input_x2 = Tensor(np.ones(shape=[3, 4]), mindspore.float32)
  592. >>> matmul = ops.MatMul()
  593. >>> output = matmul(input_x1, input_x2)
  594. >>> print(output)
  595. [[3. 3. 3. 3.]]
  596. """
  597. @prim_attr_register
  598. def __init__(self, transpose_a=False, transpose_b=False):
  599. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
  600. cls_name = self.name
  601. validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
  602. validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
  603. def check_shape_size(self, x1, x2):
  604. if len(x1) != 2 or len(x2) != 2:
  605. raise ValueError('P.MatMul inputs x1, x2 should have the same dimension size and '
  606. + f'equal to 2, while x1 size is ({len(x1)}) and x2 size is ({len(x2)}).')
  607. def check_shape(self, x1, x2):
  608. self.check_shape_size(x1, x2)
  609. cls_name = self.name
  610. # expected dimension of x, y, x:[...,a,b] y:[..., c,d], the dim size should be the same except the last two
  611. for i in range(len(x1) - 2):
  612. if x1[i] != x2[i]:
  613. raise ValueError(f'For \'{cls_name}\' shape in dim[{i}] not the same, '
  614. + f'while x1 is {x1[i]}, x2 is {x2[i]}')
  615. # validate whether last two dims satisfying matrix multiply
  616. x1_last = x1[-2:]
  617. x2_last = x2[-2:]
  618. x1_col = x1_last[not self.transpose_a]
  619. x2_row = x2_last[self.transpose_b]
  620. if np.all(np.array(x1) != -1) and np.all(np.array(x2) != -1):
  621. if x1_col != x2_row:
  622. raise ValueError(f'For \'{cls_name}\' evaluator shapes of inputs can not do this operator,'
  623. + f' got {x1_col} and {x2_row}, with x1 shape {x1}(transpose_a={self.transpose_a})'
  624. + f', x2 shape {x2}(transpose_b={self.transpose_b}).')
  625. # set attribute
  626. self.add_prim_attr('transpose_x1', self.transpose_a)
  627. self.add_prim_attr('transpose_x2', self.transpose_b)
  628. def check_dtype(self, x1, x2):
  629. args = {"x1": x1, "x2": x2}
  630. validator.check_tensors_dtypes_same_and_valid(args, mstype.float_type + mstype.int_type, self.name)
  631. class BatchMatMul(MatMul):
  632. """
  633. Computes matrix multiplication between two tensors by batch.
  634. .. math::
  635. \text{output}[..., :, :] = \text{matrix}(a[..., :, :]) * \text{matrix}(b[..., :, :])
  636. The two input tensors must have the same rank and the rank must be not less than `3`.
  637. Args:
  638. transpose_a (bool): If true, the last two dimensions of `a` is transposed before multiplication.
  639. Default: False.
  640. transpose_b (bool): If true, the last two dimensions of `b` is transposed before multiplication.
  641. Default: False.
  642. Inputs:
  643. - **input_x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(*B, N, C)`,
  644. where :math:`*B` represents the batch size which can be multidimensional, :math:`N` and :math:`C` are the
  645. size of the last two dimensions. If `transpose_a` is True, its shape must be :math:`(*B, C, N)`.
  646. - **input_y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(*B, C, M)`. If
  647. `transpose_b` is True, its shape must be :math:`(*B, M, C)`.
  648. Outputs:
  649. Tensor, the shape of the output tensor is :math:`(*B, N, M)`.
  650. Supported Platforms:
  651. ``Ascend`` ``GPU`` ``CPU``
  652. Examples:
  653. >>> input_x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32)
  654. >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
  655. >>> batmatmul = ops.BatchMatMul()
  656. >>> output = batmatmul(input_x, input_y)
  657. >>> print(output)
  658. [[[[3. 3. 3. 3.]]
  659. [[3. 3. 3. 3.]]
  660. [[3. 3. 3. 3.]]
  661. [[3. 3. 3. 3.]]],
  662. [[[3. 3. 3. 3.]]
  663. [[3. 3. 3. 3.]]
  664. [[3. 3. 3. 3.]]
  665. [[3. 3. 3. 3.]]]]
  666. >>>
  667. >>> input_x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
  668. >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
  669. >>> batmatmul = ops.BatchMatMul(transpose_a=True)
  670. >>> output = batmatmul(input_x, input_y)
  671. >>> print(output)
  672. [[[[3. 3. 3. 3.]]
  673. [[3. 3. 3. 3.]]
  674. [[3. 3. 3. 3.]]
  675. [[3. 3. 3. 3.]]],
  676. [[[3. 3. 3. 3.]]
  677. [[3. 3. 3. 3.]]
  678. [[3. 3. 3. 3.]]
  679. [[3. 3. 3. 3.]]]]
  680. """
  681. @prim_attr_register
  682. def __init__(self, transpose_a=False, transpose_b=False):
  683. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
  684. cls_name = self.name
  685. validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
  686. validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
  687. def check_shape_size(self, x, y):
  688. if len(x) != len(y) or len(x) < 3:
  689. raise ValueError('For \'BatchMatMul\' input x, y should be the same dimension size and should be '
  690. 'greater or equal to 3,' + f' while x size = {len(x)}, y size= {len(y)}')
  691. class CumSum(PrimitiveWithInfer):
  692. """
  693. Computes the cumulative sum of input tensor along axis.
  694. .. math::
  695. y_i = x_1 + x_2 + x_3 + ... + x_i
  696. Args:
  697. exclusive (bool): If true, perform exclusive mode. Default: False.
  698. reverse (bool): If true, perform inverse cumulative sum. Default: False.
  699. Inputs:
  700. - **input** (Tensor) - The input tensor to accumulate.
  701. - **axis** (int) - The axis to accumulate the tensor's value. Only constant value is allowed.
  702. Must be in the range [-rank(input), rank(input)).
  703. Outputs:
  704. Tensor, the shape of the output tensor is consistent with the input tensor's.
  705. Supported Platforms:
  706. ``Ascend`` ``GPU`` ``CPU``
  707. Examples:
  708. >>> input = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
  709. >>> cumsum = ops.CumSum()
  710. >>> output = cumsum(input, 1)
  711. >>> print(output)
  712. [[ 3. 7. 13. 23.]
  713. [ 1. 7. 14. 23.]
  714. [ 4. 7. 15. 22.]
  715. [ 1. 4. 11. 20.]]
  716. """
  717. @prim_attr_register
  718. def __init__(self, exclusive=False, reverse=False):
  719. """Initialize cumsum"""
  720. cls_name = self.name
  721. validator.check_value_type('exclusive', exclusive, [bool], cls_name)
  722. validator.check_value_type('reverse', reverse, [bool], cls_name)
  723. self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
  724. def __infer__(self, x, axis):
  725. cls_name = self.name
  726. x_shp = x['shape']
  727. if axis['value'] is None:
  728. raise ValueError(f"For {self.name}, axis must be const.")
  729. validator.check_value_type('axis', axis['value'], [int], cls_name)
  730. valid_dtypes = [mstype.uint8, mstype.int8, mstype.int32, mstype.float16, mstype.float32]
  731. validator.check_tensor_dtype_valid('x', x['dtype'], valid_dtypes, cls_name)
  732. return {'shape': x_shp,
  733. 'dtype': x['dtype'],
  734. 'value': None}
  735. class AddN(PrimitiveWithInfer):
  736. """
  737. Computes addition of all input tensors element-wise.
  738. All input tensors must have the same shape.
  739. Inputs:
  740. - **input_x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
  741. is made up of multiple tensors whose dtype is number or bool to be added together.
  742. Outputs:
  743. Tensor, has the same shape and dtype as each entry of the `input_x`.
  744. Supported Platforms:
  745. ``Ascend`` ``GPU`` ``CPU``
  746. Examples:
  747. >>> class NetAddN(nn.Cell):
  748. ... def __init__(self):
  749. ... super(NetAddN, self).__init__()
  750. ... self.addN = ops.AddN()
  751. ...
  752. ... def construct(self, *z):
  753. ... return self.addN(z)
  754. ...
  755. >>> net = NetAddN()
  756. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  757. >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.float32)
  758. >>> output = net(input_x, input_y, input_x, input_y)
  759. >>> print(output)
  760. [10. 14. 18.]
  761. """
  762. @prim_attr_register
  763. def __init__(self):
  764. self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
  765. def check_elim(self, inputs):
  766. if len(inputs) != 1:
  767. return (False, None)
  768. if isinstance(inputs[0], Tensor):
  769. return (True, inputs[0])
  770. raise TypeError("Expecting Tensor, got : {}".format(type(inputs[0])))
  771. def infer_shape(self, inputs):
  772. cls_name = self.name
  773. validator.check_int(len(inputs), 1, Rel.GE, "inputs", cls_name)
  774. self.add_prim_attr('n', len(inputs))
  775. shp0 = inputs[0]
  776. for i, shp in enumerate(inputs):
  777. validator.check(f"shape of inputs[{i}]", shp, 'shape of inputs[0]', shp0, Rel.EQ, cls_name)
  778. return shp0
  779. def infer_dtype(self, inputs):
  780. cls_name = self.name
  781. validator.check_value_type("inputs", inputs, [tuple, list], cls_name)
  782. validator.check_int(len(inputs), 1, Rel.GE, "inputs", cls_name)
  783. args = {}
  784. contains_undetermined = False
  785. for i, dtype in enumerate(inputs):
  786. args[f"inputs[{i}]"] = dtype
  787. if dtype == mstype.undetermined:
  788. contains_undetermined = True
  789. if not contains_undetermined:
  790. validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type + (mstype.bool_,), cls_name)
  791. return inputs[0]
  792. def infer_value(self, inputs):
  793. if inputs is None:
  794. return None
  795. for x in inputs:
  796. if x is None:
  797. return None
  798. added = copy.deepcopy(inputs[0].asnumpy())
  799. for x in inputs[1:]:
  800. added += x.asnumpy()
  801. out = np.array(added, inputs[0].asnumpy().dtype)
  802. return Tensor(out)
  803. class AccumulateNV2(PrimitiveWithInfer):
  804. """
  805. Computes accumulation of all input tensors element-wise.
  806. AccumulateNV2 is similar to AddN, but there is a significant difference
  807. among them: AccumulateNV2 will not wait for all of its inputs to be ready
  808. before summing. That is to say, AccumulateNV2 is able to save
  809. memory when inputs are ready at different time since the minimum temporary
  810. storage is proportional to the output size rather than the input size.
  811. Inputs:
  812. - **input_x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
  813. is made up of multiple tensors whose dtype is number to be added together.
  814. Outputs:
  815. Tensor, has the same shape and dtype as each entry of the `input_x`.
  816. Supported Platforms:
  817. ``Ascend``
  818. Examples:
  819. >>> class NetAccumulateNV2(nn.Cell):
  820. ... def __init__(self):
  821. ... super(NetAccumulateNV2, self).__init__()
  822. ... self.accumulateNV2 = ops.AccumulateNV2()
  823. ...
  824. ... def construct(self, *z):
  825. ... return self.accumulateNV2(z)
  826. ...
  827. >>> net = NetAccumulateNV2()
  828. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  829. >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.float32)
  830. >>> output = net(input_x, input_y, input_x, input_y)
  831. >>> print(output)
  832. [10. 14. 18.]
  833. """
  834. @prim_attr_register
  835. def __init__(self):
  836. self.__setattr_flag__ = True
  837. self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
  838. def check_elim(self, inputs):
  839. if len(inputs) != 1:
  840. return (False, None)
  841. if isinstance(inputs[0], Tensor):
  842. return (True, inputs[0])
  843. raise TypeError("Expecting Tensor, got : {}".format(type(inputs[0])))
  844. def infer_shape(self, inputs):
  845. cls_name = self.name
  846. validator.check_int(len(inputs), 1, Rel.GE, "inputs", cls_name)
  847. self.add_prim_attr('n', len(inputs))
  848. shp0 = inputs[0]
  849. for i, shp in enumerate(inputs):
  850. validator.check(f"shape of inputs[{i}]", shp, 'shape of inputs[0]', shp0, Rel.EQ, cls_name)
  851. return shp0
  852. def infer_dtype(self, inputs):
  853. cls_name = self.name
  854. validator.check_value_type("inputs", inputs, [tuple, list], cls_name)
  855. validator.check_int(len(inputs), 1, Rel.GE, "inputs", cls_name)
  856. args = {}
  857. for i, dtype in enumerate(inputs):
  858. args[f"inputs[{i}]"] = dtype
  859. validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type + (mstype.bool_,), cls_name)
  860. return inputs[0]
  861. class Neg(PrimitiveWithInfer):
  862. """
  863. Returns a tensor with negative values of the input tensor element-wise.
  864. Inputs:
  865. - **input_x** (Tensor) - The input tensor whose dtype is number.
  866. Outputs:
  867. Tensor, has the same shape and dtype as input.
  868. Supported Platforms:
  869. ``Ascend`` ``GPU`` ``CPU``
  870. Examples:
  871. >>> neg = ops.Neg()
  872. >>> input_x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
  873. >>> output = neg(input_x)
  874. >>> print(output)
  875. [-1. -2. 1. -2. 0. 3.5]
  876. """
  877. @prim_attr_register
  878. def __init__(self):
  879. """Initialize Neg"""
  880. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  881. def infer_shape(self, x_shape):
  882. return x_shape
  883. def infer_dtype(self, x_dtype):
  884. validator.check_tensor_dtype_valid("x", x_dtype, mstype.number_type, self.name)
  885. return x_dtype
  886. def infer_value(self, input_x):
  887. if input_x is not None:
  888. input_x = input_x.asnumpy()
  889. out = np.array(-input_x, input_x.dtype)
  890. return Tensor(out)
  891. return None
  892. class InplaceAdd(PrimitiveWithInfer):
  893. """
  894. Adds v into specified rows of x. Computes y = x; y[i,] += v.
  895. Args:
  896. indices (Union[int, tuple]): Indices into the left-most dimension of x, and determines which rows of x
  897. to add with v. It is an integer or a tuple, whose value is in [0, the first dimension size of x).
  898. Inputs:
  899. - **input_x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32.
  900. - **input_v** (Tensor) - The second input is a tensor that has the same dimension sizes as x except
  901. the first dimension, which must be the same as indices's size. It has the same data type with `input_x`.
  902. Outputs:
  903. Tensor, has the same shape and dtype as input_x.
  904. Supported Platforms:
  905. ``Ascend``
  906. Examples:
  907. >>> indices = (0, 1)
  908. >>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
  909. >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
  910. >>> inplaceAdd = ops.InplaceAdd(indices)
  911. >>> output = inplaceAdd(input_x, input_v)
  912. >>> print(output)
  913. [[1.5 3. ]
  914. [4. 5.5]
  915. [5. 6. ]]
  916. """
  917. @prim_attr_register
  918. def __init__(self, indices):
  919. """Initialize InplaceAdd"""
  920. self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
  921. self.indices = indices
  922. validator.check_value_type('indices', indices, [tuple, int], self.name)
  923. if isinstance(indices, int):
  924. self.indices = (indices,)
  925. for item in self.indices:
  926. validator.check_value_type("item of indices", item, [int], self.name)
  927. def infer_dtype(self, x_dtype, v_dtype):
  928. args = {'x': x_dtype, 'v': v_dtype}
  929. valid_type = [mstype.int32, mstype.float16, mstype.float32]
  930. validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
  931. return x_dtype
  932. def infer_shape(self, x_shape, v_shape):
  933. validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
  934. validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
  935. Rel.EQ, self.name)
  936. for i in self.indices:
  937. if i < 0 or i >= x_shape[0]:
  938. raise ValueError(f'The value of indices must be in [0, {x_shape[0]}), but got {i}.')
  939. x_rank = len(x_shape)
  940. for idx in range(x_rank)[1:]:
  941. validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
  942. return x_shape
  943. class InplaceSub(PrimitiveWithInfer):
  944. """
  945. Subtracts v into specified rows of x. Computes y = x; y[i, :] -= v.
  946. Args:
  947. indices (Union[int, tuple]): Indices into the left-most dimension of x, and determines which rows of x
  948. to subtract with v. It is a int or tuple, whose value is in [0, the first dimension size of x).
  949. Inputs:
  950. - **input_x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32.
  951. - **input_v** (Tensor) - The second input is a tensor who has the same dimension sizes as x except
  952. the first dimension, which must be the same as indices's size. It has the same data type with `input_x`.
  953. Outputs:
  954. Tensor, has the same shape and dtype as input_x.
  955. Supported Platforms:
  956. ``Ascend``
  957. Examples:
  958. >>> indices = (0, 1)
  959. >>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
  960. >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
  961. >>> inplaceSub = ops.InplaceSub(indices)
  962. >>> output = inplaceSub(input_x, input_v)
  963. >>> print(output)
  964. [[0.5 1. ]
  965. [2. 2.5]
  966. [5. 6. ]]
  967. """
  968. @prim_attr_register
  969. def __init__(self, indices):
  970. """Initialize InplaceSub"""
  971. self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
  972. self.indices = indices
  973. validator.check_value_type('indices', indices, [tuple, int], self.name)
  974. if isinstance(indices, int):
  975. self.indices = (indices,)
  976. for item in self.indices:
  977. validator.check_value_type("item of indices", item, [int], self.name)
  978. def infer_dtype(self, x_dtype, v_dtype):
  979. args = {'x': x_dtype, 'v': v_dtype}
  980. valid_type = [mstype.int32, mstype.float16, mstype.float32]
  981. validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
  982. return x_dtype
  983. def infer_shape(self, x_shape, v_shape):
  984. validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
  985. validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
  986. Rel.EQ, self.name)
  987. for i in self.indices:
  988. if i < 0 or i >= x_shape[0]:
  989. raise ValueError(f'The value of indices must be in [0, {x_shape[0]}), but got {i}.')
  990. x_rank = len(x_shape)
  991. for idx in range(x_rank)[1:]:
  992. validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
  993. return x_shape
  994. class Sub(_MathBinaryOp):
  995. """
  996. Subtracts the second input tensor from the first input tensor element-wise.
  997. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  998. The inputs must be two tensors or one tensor and one scalar.
  999. When the inputs are two tensors,
  1000. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1001. When the inputs are one tensor and one scalar,
  1002. the scalar could only be a constant.
  1003. Inputs:
  1004. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  1005. or a tensor whose data type is number or bool.
  1006. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  1007. is a tensor, or a tensor whose data type is number or bool.
  1008. Outputs:
  1009. Tensor, the shape is the same as the one after broadcasting,
  1010. and the data type is the one with higher precision or higher digits among the two inputs.
  1011. Supported Platforms:
  1012. ``Ascend`` ``GPU`` ``CPU``
  1013. Examples:
  1014. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1015. >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.int32)
  1016. >>> sub = ops.Sub()
  1017. >>> output = sub(input_x, input_y)
  1018. >>> print(output)
  1019. [-3 -3 -3]
  1020. """
  1021. def infer_value(self, x, y):
  1022. if x is not None and y is not None:
  1023. x = x.asnumpy()
  1024. y = y.asnumpy()
  1025. out = x - y
  1026. out = np.array(out, x.dtype)
  1027. return Tensor(out)
  1028. return None
  1029. class Mul(_MathBinaryOp):
  1030. """
  1031. Multiplies two tensors element-wise.
  1032. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1033. The inputs must be two tensors or one tensor and one scalar.
  1034. When the inputs are two tensors,
  1035. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1036. When the inputs are one tensor and one scalar,
  1037. the scalar could only be a constant.
  1038. .. math::
  1039. out_{i} = x_{i} * y_{i}
  1040. Inputs:
  1041. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1042. a bool or a tensor whose data type is number or bool.
  1043. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1044. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1045. Outputs:
  1046. Tensor, the shape is the same as the one after broadcasting,
  1047. and the data type is the one with higher precision or higher digits among the two inputs.
  1048. Supported Platforms:
  1049. ``Ascend`` ``GPU`` ``CPU``
  1050. Examples:
  1051. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  1052. >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
  1053. >>> mul = ops.Mul()
  1054. >>> output = mul(input_x, input_y)
  1055. >>> print(output)
  1056. [ 4. 10. 18.]
  1057. """
  1058. def infer_value(self, x, y):
  1059. if x is not None and y is not None:
  1060. x = x.asnumpy()
  1061. y = y.asnumpy()
  1062. out = x * y
  1063. out = np.array(out, x.dtype)
  1064. return Tensor(out)
  1065. return None
  1066. class SquaredDifference(_MathBinaryOp):
  1067. """
  1068. Subtracts the second input tensor from the first input tensor element-wise and returns square of it.
  1069. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1070. The inputs must be two tensors or one tensor and one scalar.
  1071. When the inputs are two tensors,
  1072. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1073. When the inputs are one tensor and one scalar,
  1074. the scalar could only be a constant.
  1075. Inputs:
  1076. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  1077. or a tensor whose data type is float16, float32, int32 or bool.
  1078. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  1079. is a tensor or a tensor whose data type is float16, float32, int32 or bool.
  1080. Outputs:
  1081. Tensor, the shape is the same as the one after broadcasting,
  1082. and the data type is the one with higher precision or higher digits among the two inputs.
  1083. Supported Platforms:
  1084. ``Ascend`` ``GPU`` ``CPU``
  1085. Examples:
  1086. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  1087. >>> input_y = Tensor(np.array([2.0, 4.0, 6.0]), mindspore.float32)
  1088. >>> squared_difference = ops.SquaredDifference()
  1089. >>> output = squared_difference(input_x, input_y)
  1090. >>> print(output)
  1091. [1. 4. 9.]
  1092. """
  1093. def infer_dtype(self, x_dtype, y_dtype):
  1094. valid_type = [mstype.float16, mstype.float32, mstype.int32]
  1095. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, valid_type, self.name)
  1096. class Square(PrimitiveWithCheck):
  1097. """
  1098. Returns square of a tensor element-wise.
  1099. Inputs:
  1100. - **input_x** (Tensor) - The input tensor whose dtype is number.
  1101. Outputs:
  1102. Tensor, has the same shape and dtype as the `input_x`.
  1103. Supported Platforms:
  1104. ``Ascend`` ``GPU`` ``CPU``
  1105. Examples:
  1106. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  1107. >>> square = ops.Square()
  1108. >>> output = square(input_x)
  1109. >>> print(output)
  1110. [1. 4. 9.]
  1111. """
  1112. @prim_attr_register
  1113. def __init__(self):
  1114. """Initialize Square"""
  1115. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  1116. def __check__(self, x):
  1117. x_dtype = x["dtype"]
  1118. validator.check_tensor_dtype_valid("x", x_dtype, mstype.number_type, self.name)
  1119. def infer_value(self, x):
  1120. if x is not None:
  1121. x = x.asnumpy()
  1122. out = x * x
  1123. out = np.array(out, x.dtype)
  1124. return Tensor(out)
  1125. return None
  1126. class Rsqrt(PrimitiveWithInfer):
  1127. """
  1128. Computes reciprocal of square root of input tensor element-wise.
  1129. Inputs:
  1130. - **input_x** (Tensor) - The input of Rsqrt. Each element must be a non-negative number.
  1131. Outputs:
  1132. Tensor, has the same type and shape as `input_x`.
  1133. Supported Platforms:
  1134. ``Ascend`` ``GPU``
  1135. Examples:
  1136. >>> input_tensor = Tensor([[4, 4], [9, 9]], mindspore.float32)
  1137. >>> rsqrt = ops.Rsqrt()
  1138. >>> output = rsqrt(input_tensor)
  1139. >>> print(output)
  1140. [[0.5 0.5 ]
  1141. [0.33333334 0.33333334]]
  1142. """
  1143. @prim_attr_register
  1144. def __init__(self):
  1145. """Initialize Rsqrt"""
  1146. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1147. def infer_shape(self, x_shape):
  1148. return x_shape
  1149. def infer_dtype(self, x_dtype):
  1150. validator.check_tensor_dtype_valid("x", x_dtype, mstype.number_type, self.name)
  1151. return x_dtype
  1152. def infer_value(self, x):
  1153. if x is not None:
  1154. x = x.asnumpy()
  1155. out = 1.0 / np.sqrt(x)
  1156. out = np.array(out, x.dtype)
  1157. return Tensor(out)
  1158. return None
  1159. class Sqrt(PrimitiveWithCheck):
  1160. """
  1161. Returns square root of a tensor element-wise.
  1162. Inputs:
  1163. - **input_x** (Tensor) - The input tensor whose dtype is number.
  1164. Outputs:
  1165. Tensor, has the same shape as the `input_x`.
  1166. Supported Platforms:
  1167. ``Ascend`` ``GPU`` ``CPU``
  1168. Examples:
  1169. >>> input_x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
  1170. >>> sqrt = ops.Sqrt()
  1171. >>> output = sqrt(input_x)
  1172. >>> print(output)
  1173. [1. 2. 3.]
  1174. """
  1175. @prim_attr_register
  1176. def __init__(self):
  1177. """Initialize Sqrt"""
  1178. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1179. def check_dtype(self, x_type):
  1180. validator.check_tensor_dtype_valid("x", x_type, mstype.number_type, self.name)
  1181. def infer_value(self, x):
  1182. if x is not None:
  1183. x = x.asnumpy()
  1184. out = np.sqrt(x)
  1185. out = np.array(out, x.dtype)
  1186. return Tensor(out)
  1187. return None
  1188. class Reciprocal(PrimitiveWithInfer):
  1189. """
  1190. Returns reciprocal of a tensor element-wise.
  1191. Inputs:
  1192. - **input_x** (Tensor) - The input tensor.
  1193. Outputs:
  1194. Tensor, has the same shape as the `input_x`.
  1195. Supported Platforms:
  1196. ``Ascend`` ``GPU`` ``CPU``
  1197. Examples:
  1198. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1199. >>> reciprocal = ops.Reciprocal()
  1200. >>> output = reciprocal(input_x)
  1201. >>> print(output)
  1202. [1. 0.5 0.25]
  1203. """
  1204. @prim_attr_register
  1205. def __init__(self):
  1206. """Initialize Reciprocal"""
  1207. if context.get_context("device_target") == "GPU":
  1208. self.target = "GPU"
  1209. else:
  1210. self.target = "OTHER"
  1211. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1212. def infer_shape(self, x):
  1213. return x
  1214. def infer_dtype(self, x):
  1215. validator.check_subclass("x", x, mstype.tensor, self.name)
  1216. return x
  1217. def infer_value(self, x):
  1218. if x is not None:
  1219. x = x.asnumpy()
  1220. out = 1.0 / x
  1221. out = np.array(out, x.dtype)
  1222. return Tensor(out)
  1223. return None
  1224. class Pow(_MathBinaryOp):
  1225. """
  1226. Computes a tensor to the power of the second input.
  1227. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1228. The inputs must be two tensors or one tensor and one scalar.
  1229. When the inputs are two tensors,
  1230. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1231. When the inputs are one tensor and one scalar,
  1232. the scalar could only be a constant.
  1233. Inputs:
  1234. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1235. a bool or a tensor whose data type is number or bool.
  1236. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1237. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1238. Outputs:
  1239. Tensor, the shape is the same as the one after broadcasting,
  1240. and the data type is the one with higher precision or higher digits among the two inputs.
  1241. Supported Platforms:
  1242. ``Ascend`` ``GPU`` ``CPU``
  1243. Examples:
  1244. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1245. >>> input_y = 3.0
  1246. >>> pow = ops.Pow()
  1247. >>> output = pow(input_x, input_y)
  1248. >>> print(output)
  1249. [ 1. 8. 64.]
  1250. >>>
  1251. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1252. >>> input_y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
  1253. >>> pow = ops.Pow()
  1254. >>> output = pow(input_x, input_y)
  1255. >>> print(output)
  1256. [ 1. 16. 64.]
  1257. """
  1258. def infer_value(self, x, power):
  1259. if x is not None and power is not None:
  1260. x = x.asnumpy()
  1261. power = power.asnumpy()
  1262. out = np.power(x, power)
  1263. out = np.array(out, x.dtype)
  1264. return Tensor(out)
  1265. return None
  1266. class Exp(PrimitiveWithInfer):
  1267. r"""
  1268. Returns exponential of a tensor element-wise.
  1269. .. math::
  1270. out_i = e^{x_i}
  1271. Inputs:
  1272. - **input_x** (Tensor) - The input tensor. The data type mast be float16 or float32.
  1273. Outputs:
  1274. Tensor, has the same shape and dtype as the `input_x`.
  1275. Supported Platforms:
  1276. ``Ascend`` ``GPU`` ``CPU``
  1277. Examples:
  1278. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1279. >>> exp = ops.Exp()
  1280. >>> output = exp(input_x)
  1281. >>> print(output)
  1282. [ 2.718282 7.389056 54.598152]
  1283. """
  1284. @prim_attr_register
  1285. def __init__(self):
  1286. """Initialize Exp"""
  1287. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1288. def infer_shape(self, x_shape):
  1289. return x_shape
  1290. def infer_dtype(self, x_type):
  1291. validator.check_subclass("x", x_type, mstype.tensor, self.name)
  1292. return x_type
  1293. def infer_value(self, x):
  1294. if x is not None:
  1295. x = x.asnumpy()
  1296. out = np.exp(x)
  1297. out = np.array(out, x.dtype)
  1298. return Tensor(out)
  1299. return None
  1300. class Expm1(PrimitiveWithInfer):
  1301. r"""
  1302. Returns exponential then minus 1 of a tensor element-wise.
  1303. .. math::
  1304. out_i = e^{x_i} - 1
  1305. Inputs:
  1306. - **input_x** (Tensor) - The input tensor. With float16 or float32 data type.
  1307. Outputs:
  1308. Tensor, has the same shape as the `input_x`.
  1309. Supported Platforms:
  1310. ``Ascend`` ``GPU`` ``CPU``
  1311. Examples:
  1312. >>> input_x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
  1313. >>> expm1 = ops.Expm1()
  1314. >>> output = expm1(input_x)
  1315. >>> print(output)
  1316. [ 0. 1.718282 6.389056 53.598152]
  1317. """
  1318. @prim_attr_register
  1319. def __init__(self):
  1320. """Initialize Exp"""
  1321. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1322. def infer_shape(self, x_shape):
  1323. return x_shape
  1324. def infer_dtype(self, x_type):
  1325. validator.check_tensor_dtype_valid("x", x_type, [mstype.float16, mstype.float32], self.name)
  1326. return x_type
  1327. class HistogramFixedWidth(PrimitiveWithInfer):
  1328. """
  1329. Returns a rank 1 histogram counting the number of entries in values that fall into every bin. The bins are equal
  1330. width and determined by the arguments range and nbins.
  1331. Args:
  1332. dtype (str): An optional attribute. Must be one of the following types: "int32", "int64". Default: "int32".
  1333. nbins (int): The number of histogram bins, the type is a positive integer.
  1334. Inputs:
  1335. - **x** (Tensor) - Numeric Tensor. Must be one of the following types: int32, float32, float16.
  1336. - **range** (Tensor) - Must has the same data type as `x`, and the shape is [2].
  1337. x <= range[0] will be mapped to hist[0], x >= range[1] will be mapped to hist[-1].
  1338. Outputs:
  1339. Tensor, the type is int32.
  1340. Supported Platforms:
  1341. ``Ascend``
  1342. Examples:
  1343. >>> x = Tensor([-1.0, 0.0, 1.5, 2.0, 5.0, 15], mindspore.float16)
  1344. >>> range = Tensor([0.0, 5.0], mindspore.float16)
  1345. >>> hist = ops.HistogramFixedWidth(5)
  1346. >>> output = hist(x, range)
  1347. >>> print(output)
  1348. [2 1 1 0 2]
  1349. """
  1350. @prim_attr_register
  1351. def __init__(self, nbins, dtype='int32'):
  1352. self.nbins = validator.check_value_type("nbins", nbins, [int], self.name)
  1353. validator.check_int(nbins, 1, Rel.GE, "nbins", self.name)
  1354. valid_values = ['int32', 'int64']
  1355. self.dtype = validator.check_string(dtype, valid_values, "dtype", self.name)
  1356. self.init_prim_io_names(inputs=['x', 'range'], outputs=['y'])
  1357. def infer_shape(self, x_shape, range_shape):
  1358. return (self.nbins,)
  1359. def infer_dtype(self, x_dtype, range_dtype):
  1360. valid_dtypes = (mstype.float16, mstype.float32, mstype.int32)
  1361. validator.check_tensor_dtype_valid("x", x_dtype, valid_dtypes, self.name)
  1362. validator.check_tensor_dtype_valid("range", range_dtype, valid_dtypes, self.name)
  1363. y_dtype = mstype.int32
  1364. return y_dtype
  1365. class Log(PrimitiveWithInfer):
  1366. """
  1367. Returns the natural logarithm of a tensor element-wise.
  1368. Inputs:
  1369. - **input_x** (Tensor) - The input tensor. With float16 or float32 data type. The value must be greater than 0.
  1370. Outputs:
  1371. Tensor, has the same shape as the `input_x`.
  1372. Supported Platforms:
  1373. ``Ascend`` ``GPU`` ``CPU``
  1374. Examples:
  1375. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1376. >>> log = ops.Log()
  1377. >>> output = log(input_x)
  1378. >>> print(output)
  1379. [0. 0.6931472 1.3862944]
  1380. """
  1381. @prim_attr_register
  1382. def __init__(self):
  1383. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1384. def infer_shape(self, x):
  1385. return x
  1386. def infer_dtype(self, x):
  1387. validator.check_subclass("x", x, mstype.tensor, self.name)
  1388. return x
  1389. def infer_value(self, x):
  1390. if x is not None:
  1391. x = x.asnumpy()
  1392. out = np.log(x)
  1393. out = np.array(out, x.dtype)
  1394. return Tensor(out)
  1395. return None
  1396. class Log1p(PrimitiveWithInfer):
  1397. """
  1398. Returns the natural logarithm of one plus the input tensor element-wise.
  1399. Inputs:
  1400. - **input_x** (Tensor) - The input tensor. With float16 or float32 data type. The value must be greater than -1.
  1401. Outputs:
  1402. Tensor, has the same shape as the `input_x`.
  1403. Supported Platforms:
  1404. ``Ascend``
  1405. Examples:
  1406. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1407. >>> log1p = ops.Log1p()
  1408. >>> output = log1p(input_x)
  1409. >>> print(output)
  1410. [0.6931472 1.0986123 1.609438 ]
  1411. """
  1412. @prim_attr_register
  1413. def __init__(self):
  1414. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1415. def infer_shape(self, x_shape):
  1416. return x_shape
  1417. def infer_dtype(self, x_dtype):
  1418. validator.check_subclass("x", x_dtype, mstype.tensor, self.name)
  1419. validator.check_tensor_dtype_valid("x", x_dtype, [mstype.float16, mstype.float32], self.name)
  1420. return x_dtype
  1421. class Erf(PrimitiveWithInfer):
  1422. r"""
  1423. Computes the Gauss error function of `input_x` element-wise.
  1424. .. math::
  1425. erf(x)=\frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
  1426. Inputs:
  1427. - **input_x** (Tensor) - The input tensor. The data type must be float16 or float32.
  1428. Outputs:
  1429. Tensor, has the same shape and dtype as the `input_x`.
  1430. Supported Platforms:
  1431. ``Ascend``
  1432. Examples:
  1433. >>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
  1434. >>> erf = ops.Erf()
  1435. >>> output = erf(input_x)
  1436. >>> print(output)
  1437. [-0.8427168 0. 0.8427168 0.99530876 0.99997765]
  1438. """
  1439. @prim_attr_register
  1440. def __init__(self):
  1441. """Initialize Erf"""
  1442. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1443. def infer_shape(self, x_shape):
  1444. return x_shape
  1445. def infer_dtype(self, x_dtype):
  1446. validator.check_tensor_dtype_valid("x", x_dtype, [mstype.float16, mstype.float32], self.name)
  1447. return x_dtype
  1448. class Erfc(PrimitiveWithInfer):
  1449. r"""
  1450. Computes the complementary error function of `input_x` element-wise.
  1451. .. math::
  1452. erfc(x) = 1 - \frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
  1453. Inputs:
  1454. - **input_x** (Tensor) - The input tensor. The data type must be float16 or float32.
  1455. Outputs:
  1456. Tensor, has the same shape and dtype as the `input_x`.
  1457. Supported Platforms:
  1458. ``Ascend``
  1459. Examples:
  1460. >>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
  1461. >>> erfc = ops.Erfc()
  1462. >>> output = erfc(input_x)
  1463. >>> print(output)
  1464. [1.8427168e+00 1.0000000e+00 1.5728319e-01 4.6912432e-03 2.2351742e-05]
  1465. """
  1466. @prim_attr_register
  1467. def __init__(self):
  1468. """Initialize Erfc"""
  1469. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1470. def infer_shape(self, x_shape):
  1471. return x_shape
  1472. def infer_dtype(self, x_type):
  1473. validator.check_tensor_dtype_valid("x", x_type, [mstype.float16, mstype.float32], self.name)
  1474. return x_type
  1475. class Minimum(_MathBinaryOp):
  1476. """
  1477. Computes the minimum of input tensors element-wise.
  1478. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1479. The inputs must be two tensors or one tensor and one scalar.
  1480. When the inputs are two tensors,
  1481. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1482. When the inputs are one tensor and one scalar,
  1483. the scalar could only be a constant.
  1484. Inputs:
  1485. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1486. a bool or a tensor whose data type is number or bool.
  1487. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1488. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1489. Outputs:
  1490. Tensor, the shape is the same as the one after broadcasting,
  1491. and the data type is the one with higher precision or higher digits among the two inputs.
  1492. Supported Platforms:
  1493. ``Ascend`` ``GPU`` ``CPU``
  1494. Examples:
  1495. >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
  1496. >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
  1497. >>> minimum = ops.Minimum()
  1498. >>> output = minimum(input_x, input_y)
  1499. >>> print(output)
  1500. [1. 2. 3.]
  1501. """
  1502. def infer_value(self, x, y):
  1503. if x is not None and y is not None:
  1504. x = x.asnumpy()
  1505. y = y.asnumpy()
  1506. out = np.minimum(x, y)
  1507. out = np.array(out, x.dtype)
  1508. return Tensor(out)
  1509. return None
  1510. class Maximum(_MathBinaryOp):
  1511. """
  1512. Computes the maximum of input tensors element-wise.
  1513. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1514. The inputs must be two tensors or one tensor and one scalar.
  1515. When the inputs are two tensors,
  1516. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1517. When the inputs are one tensor and one scalar,
  1518. the scalar could only be a constant.
  1519. Inputs:
  1520. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1521. a bool or a tensor whose data type is number or bool.
  1522. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1523. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1524. Outputs:
  1525. Tensor, the shape is the same as the one after broadcasting,
  1526. and the data type is the one with higher precision or higher digits among the two inputs.
  1527. Supported Platforms:
  1528. ``Ascend`` ``GPU`` ``CPU``
  1529. Examples:
  1530. >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
  1531. >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
  1532. >>> maximum = ops.Maximum()
  1533. >>> output = maximum(input_x, input_y)
  1534. >>> print(output)
  1535. [4. 5. 6.]
  1536. """
  1537. def infer_value(self, x, y):
  1538. if x is not None and y is not None:
  1539. x = x.asnumpy()
  1540. y = y.asnumpy()
  1541. out = np.maximum(x, y)
  1542. out = np.array(out, x.dtype)
  1543. return Tensor(out)
  1544. return None
  1545. class RealDiv(_MathBinaryOp):
  1546. """
  1547. Divides the first input tensor by the second input tensor in floating-point type element-wise.
  1548. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1549. The inputs must be two tensors or one tensor and one scalar.
  1550. When the inputs are two tensors,
  1551. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1552. When the inputs are one tensor and one scalar,
  1553. the scalar could only be a constant.
  1554. Inputs:
  1555. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1556. a bool or a tensor whose data type is number or bool.
  1557. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1558. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1559. Outputs:
  1560. Tensor, the shape is the same as the one after broadcasting,
  1561. and the data type is the one with higher precision or higher digits among the two inputs.
  1562. Supported Platforms:
  1563. ``Ascend`` ``GPU`` ``CPU``
  1564. Examples:
  1565. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  1566. >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
  1567. >>> realdiv = ops.RealDiv()
  1568. >>> output = realdiv(input_x, input_y)
  1569. >>> print(output)
  1570. [0.25 0.4 0.5 ]
  1571. """
  1572. def infer_value(self, x, y):
  1573. if x is not None and y is not None:
  1574. x = x.asnumpy()
  1575. y = y.asnumpy()
  1576. out = x / y
  1577. out = np.array(out, x.dtype)
  1578. return Tensor(out)
  1579. return None
  1580. class Div(_MathBinaryOp):
  1581. r"""
  1582. Computes the quotient of dividing the first input tensor by the second input tensor element-wise.
  1583. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1584. The inputs must be two tensors or one tensor and one scalar.
  1585. When the inputs are two tensors,
  1586. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1587. When the inputs are one tensor and one scalar,
  1588. the scalar could only be a constant.
  1589. .. math::
  1590. out_{i} = \frac{x_i}{y_i}
  1591. Inputs:
  1592. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1593. a bool or a tensor whose data type is number or bool.
  1594. - **input_y** (Union[Tensor, Number, bool]) - When the first input is a tensor, The second input
  1595. could be a number, a bool, or a tensor whose data type is number or bool. When the first input
  1596. is a number or a bool, the second input must be a tensor whose data type is number or bool.
  1597. Outputs:
  1598. Tensor, the shape is the same as the one after broadcasting,
  1599. and the data type is the one with higher precision or higher digits among the two inputs.
  1600. Supported Platforms:
  1601. ``Ascend`` ``GPU`` ``CPU``
  1602. Examples:
  1603. >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
  1604. >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
  1605. >>> div = ops.Div()
  1606. >>> output = div(input_x, input_y)
  1607. >>> print(output)
  1608. [-1.3333334 2.5 2. ]
  1609. """
  1610. def infer_value(self, x, y):
  1611. if x is not None and y is not None:
  1612. x = x.asnumpy()
  1613. y = y.asnumpy()
  1614. out = np.array(x / y, x.dtype)
  1615. return Tensor(out)
  1616. return None
  1617. class DivNoNan(_MathBinaryOp):
  1618. """
  1619. Computes a safe divide and returns 0 if the y is zero.
  1620. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1621. The inputs must be two tensors or one tensor and one scalar.
  1622. When the inputs are two tensors,
  1623. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1624. When the inputs are one tensor and one scalar,
  1625. the scalar could only be a constant.
  1626. Inputs:
  1627. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1628. a bool or a tensor whose data type is number or bool.
  1629. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1630. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1631. Outputs:
  1632. Tensor, the shape is the same as the one after broadcasting,
  1633. and the data type is the one with higher precision or higher digits among the two inputs.
  1634. Supported Platforms:
  1635. ``Ascend`` ``GPU``
  1636. Examples:
  1637. >>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)
  1638. >>> input_y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32)
  1639. >>> div_no_nan = ops.DivNoNan()
  1640. >>> output = div_no_nan(input_x, input_y)
  1641. >>> print(output)
  1642. [0. 0. 0. 2.5 2. ]
  1643. """
  1644. @prim_attr_register
  1645. def __init__(self):
  1646. """Initialize _BinaryOp"""
  1647. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  1648. def infer_value(self, x, y):
  1649. if x is not None and y is not None:
  1650. x = x.asnumpy()
  1651. y = y.asnumpy()
  1652. with np.errstate(divide='ignore', invalid='ignore'):
  1653. out = np.true_divide(x, y)
  1654. out[~np.isfinite(out)] = 0
  1655. return out
  1656. return None
  1657. class MulNoNan(_MathBinaryOp):
  1658. r"""
  1659. Computes x * y element-wise. if y is zero, No matter what x is, it will return 0.
  1660. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1661. The inputs must be two tensors or one tensor and one scalar.
  1662. When the inputs are two tensors, the shapes of them could be broadcast.
  1663. When the inputs are one tensor and one scalar, the scalar could only be a constant.
  1664. Note:
  1665. The shapes of X and y should be same or can be broadcasting.
  1666. Inputs:
  1667. - **input_x** (Union[Tensor]) - The first input is a tensor whose data type is number.
  1668. - **input_y** (Union[Tensor]) - The second input is a tensor whose data type is number.
  1669. Outputs:
  1670. Tensor, the shape is the same as the one after broadcasting,
  1671. and the data type is the one with higher precision or higher digits among the two inputs.
  1672. Supported Platforms:
  1673. ``Ascend``
  1674. Raise:
  1675. TypeError: If x or y is a bool tensor.
  1676. Examples:
  1677. >>> x = Tensor(np.array([[-1.0, 6.0, np.inf], [np.nan, -7.0, 4.0]]), ms.float32)
  1678. >>> y = Tensor(np.array([[-1.0, 4.0, 0], [0, -3.0, 1.0]]), ms.float32)
  1679. >>> mul_no_nan = ops.MulNoNan()
  1680. >>> output = mul_no_nan(x, y)
  1681. >>> print(output)
  1682. [[ 1. 24. 0.]
  1683. [ 0. 21. 4.]]
  1684. """
  1685. @prim_attr_register
  1686. def __init__(self):
  1687. """Initialize _BinaryOp"""
  1688. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  1689. def infer_value(self, x, y):
  1690. if x is not None and y is not None:
  1691. x = x.asnumpy()
  1692. y = y.asnumpy()
  1693. with np.errstate(divide='ignore', invalid='ignore'):
  1694. out = np.multiply(x, y)
  1695. out[y == 0] = 0
  1696. return out
  1697. return None
  1698. class FloorDiv(_MathBinaryOp):
  1699. """
  1700. Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
  1701. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1702. The inputs must be two tensors or one tensor and one scalar.
  1703. When the inputs are two tensors,
  1704. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1705. When the inputs are one tensor and one scalar,
  1706. the scalar could only be a constant.
  1707. Inputs:
  1708. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1709. a bool or a tensor whose data type is number or bool.
  1710. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1711. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1712. Outputs:
  1713. Tensor, the shape is the same as the one after broadcasting,
  1714. and the data type is the one with higher precision or higher digits among the two inputs.
  1715. Supported Platforms:
  1716. ``Ascend`` ``GPU`` ``CPU``
  1717. Examples:
  1718. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  1719. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  1720. >>> floor_div = ops.FloorDiv()
  1721. >>> output = floor_div(input_x, input_y)
  1722. >>> print(output)
  1723. [ 0 1 -1]
  1724. """
  1725. class TruncateDiv(_MathBinaryOp):
  1726. """
  1727. Divides the first input tensor by the second input tensor element-wise for integer types, negative numbers will
  1728. round fractional quantities towards zero.
  1729. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1730. The inputs must be two tensors or one tensor and one scalar.
  1731. When the inputs are two tensors,
  1732. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1733. When the inputs are one tensor and one scalar,
  1734. the scalar could only be a constant.
  1735. Inputs:
  1736. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  1737. or a tensor whose data type is number or bool.
  1738. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  1739. is a tensor, or a tensor whose data type is number or bool.
  1740. Outputs:
  1741. Tensor, the shape is the same as the one after broadcasting,
  1742. and the data type is the one with higher precision or higher digits among the two inputs.
  1743. Supported Platforms:
  1744. ``Ascend``
  1745. Examples:
  1746. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  1747. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  1748. >>> truncate_div = ops.TruncateDiv()
  1749. >>> output = truncate_div(input_x, input_y)
  1750. >>> print(output)
  1751. [0 1 0]
  1752. """
  1753. class TruncateMod(_MathBinaryOp):
  1754. """
  1755. Returns the remainder of division element-wise.
  1756. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1757. The inputs must be two tensors or one tensor and one scalar.
  1758. When the inputs are two tensors,
  1759. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1760. When the inputs are one tensor and one scalar,
  1761. the scalar could only be a constant.
  1762. Inputs:
  1763. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  1764. or a tensor whose data type is number or bool.
  1765. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  1766. is a tensor, or a tensor whose data type is number or bool.
  1767. Outputs:
  1768. Tensor, the shape is the same as the one after broadcasting,
  1769. and the data type is the one with higher precision or higher digits among the two inputs.
  1770. Supported Platforms:
  1771. ``Ascend``
  1772. Examples:
  1773. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  1774. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  1775. >>> truncate_mod = ops.TruncateMod()
  1776. >>> output = truncate_mod(input_x, input_y)
  1777. >>> print(output)
  1778. [ 2 1 -1]
  1779. """
  1780. class Mod(_MathBinaryOp):
  1781. """
  1782. Computes the remainder of dividing the first input tensor by the second input tensor element-wise.
  1783. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1784. The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors,
  1785. both dtypes cannot be bool, and the shapes of them could be broadcast. When the inputs are one tensor
  1786. and one scalar, the scalar could only be a constant.
  1787. Inputs:
  1788. - **input_x** (Union[Tensor, Number]) - The first input is a number or a tensor whose data type is number.
  1789. - **input_y** (Union[Tensor, Number]) - When the first input is a tensor, The second input
  1790. could be a number or a tensor whose data type is number. When the first input is a number,
  1791. the second input must be a tensor whose data type is number.
  1792. Outputs:
  1793. Tensor, the shape is the same as the one after broadcasting,
  1794. and the data type is the one with higher precision or higher digits among the two inputs.
  1795. Raises:
  1796. ValueError: When `input_x` and `input_y` are not the same dtype.
  1797. Supported Platforms:
  1798. ``Ascend`` ``CPU``
  1799. Examples:
  1800. >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
  1801. >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
  1802. >>> mod = ops.Mod()
  1803. >>> output = mod(input_x, input_y)
  1804. >>> print(output)
  1805. [-1. 1. 0.]
  1806. """
  1807. def infer_value(self, x, y):
  1808. if x is not None and y is not None:
  1809. x = x.asnumpy()
  1810. y = y.asnumpy()
  1811. return Tensor(np.fmod(x, y))
  1812. return None
  1813. class Floor(PrimitiveWithInfer):
  1814. r"""
  1815. Rounds a tensor down to the closest integer element-wise.
  1816. .. math::
  1817. out_i = \lfloor x_i \rfloor
  1818. Inputs:
  1819. - **input_x** (Tensor) - The input tensor. Its element data type must be float.
  1820. Outputs:
  1821. Tensor, has the same shape as `input_x`.
  1822. Supported Platforms:
  1823. ``Ascend`` ``GPU`` ``CPU``
  1824. Examples:
  1825. >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
  1826. >>> floor = ops.Floor()
  1827. >>> output = floor(input_x)
  1828. >>> print(output)
  1829. [ 1. 2. -2.]
  1830. """
  1831. @prim_attr_register
  1832. def __init__(self):
  1833. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1834. def infer_shape(self, x_shape):
  1835. return x_shape
  1836. def infer_dtype(self, x_dtype):
  1837. validator.check_tensor_dtype_valid("x", x_dtype, mstype.float_type, self.name)
  1838. return x_dtype
  1839. class FloorMod(_MathBinaryOp):
  1840. """
  1841. Computes the remainder of division element-wise.
  1842. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1843. The inputs must be two tensors or one tensor and one scalar.
  1844. When the inputs are two tensors,
  1845. dtypes of them cannot be both bool , and the shapes of them could be broadcast.
  1846. When the inputs are one tensor and one scalar,
  1847. the scalar could only be a constant.
  1848. Inputs:
  1849. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1850. a bool or a tensor whose data type is number or bool.
  1851. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1852. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1853. Outputs:
  1854. Tensor, the shape is the same as the one after broadcasting,
  1855. and the data type is the one with higher precision or higher digits among the two inputs.
  1856. Supported Platforms:
  1857. ``Ascend``
  1858. Examples:
  1859. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  1860. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  1861. >>> floor_mod = ops.FloorMod()
  1862. >>> output = floor_mod(input_x, input_y)
  1863. >>> print(output)
  1864. [2 1 2]
  1865. """
  1866. class Ceil(PrimitiveWithInfer):
  1867. r"""
  1868. Rounds a tensor up to the closest integer element-wise.
  1869. .. math::
  1870. out_i = \lceil x_i \rceil = \lfloor x_i \rfloor + 1
  1871. Inputs:
  1872. - **input_x** (Tensor) - The input tensor. It's element data type must be float16 or float32.
  1873. Outputs:
  1874. Tensor, has the same shape as `input_x`.
  1875. Supported Platforms:
  1876. ``Ascend``
  1877. Examples:
  1878. >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
  1879. >>> ceil_op = ops.Ceil()
  1880. >>> output = ceil_op(input_x)
  1881. >>> print(output)
  1882. [ 2. 3. -1.]
  1883. """
  1884. @prim_attr_register
  1885. def __init__(self):
  1886. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1887. def infer_shape(self, x_shape):
  1888. return x_shape
  1889. def infer_dtype(self, x_dtype):
  1890. validator.check_tensor_dtype_valid("x", x_dtype, [mstype.float16, mstype.float32], self.name)
  1891. return x_dtype
  1892. class Xdivy(_MathBinaryOp):
  1893. """
  1894. Divides the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero.
  1895. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1896. The inputs must be two tensors or one tensor and one scalar.
  1897. When the inputs are two tensors,
  1898. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1899. When the inputs are one tensor and one scalar,
  1900. the scalar could only be a constant.
  1901. Inputs:
  1902. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  1903. or a tensor whose data type is float16, float32 or bool.
  1904. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number,
  1905. or a bool when the first input is a tensor, or a tensor whose data type is float16, float32 or bool.
  1906. Outputs:
  1907. Tensor, the shape is the same as the one after broadcasting,
  1908. and the data type is the one with higher precision or higher digits among the two inputs.
  1909. Supported Platforms:
  1910. ``Ascend``
  1911. Examples:
  1912. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.float32)
  1913. >>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32)
  1914. >>> xdivy = ops.Xdivy()
  1915. >>> output = xdivy(input_x, input_y)
  1916. >>> print(output)
  1917. [ 1. 2. -0.5]
  1918. """
  1919. def infer_dtype(self, x_dtype, y_dtype):
  1920. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, [mstype.float16, mstype.float32], self.name)
  1921. class Xlogy(_MathBinaryOp):
  1922. """
  1923. Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
  1924. Returns zero when `x` is zero.
  1925. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1926. The inputs must be two tensors or one tensor and one scalar.
  1927. When the inputs are two tensors,
  1928. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1929. When the inputs are one tensor and one scalar,
  1930. the scalar could only be a constant.
  1931. Inputs:
  1932. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1933. a bool or a tensor whose data type is float16, float32 or bool.
  1934. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1935. a bool when the first input is a tensor or a tensor whose data type is float16, float32 or bool.
  1936. The value must be positive.
  1937. Outputs:
  1938. Tensor, the shape is the same as the one after broadcasting,
  1939. and the data type is the one with higher precision or higher digits among the two inputs.
  1940. Supported Platforms:
  1941. ``Ascend``
  1942. Examples:
  1943. >>> input_x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
  1944. >>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32)
  1945. >>> xlogy = ops.Xlogy()
  1946. >>> output = xlogy(input_x, input_y)
  1947. >>> print(output)
  1948. [-3.465736 0. 2.7725887]
  1949. """
  1950. def infer_dtype(self, x_dtype, y_dtype):
  1951. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, [mstype.float16, mstype.float32], self.name)
  1952. class Acosh(PrimitiveWithInfer):
  1953. """
  1954. Computes inverse hyperbolic cosine of the input element-wise.
  1955. .. math::
  1956. out_i = cosh^{-1}(input_i)
  1957. Inputs:
  1958. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type should be one of
  1959. the following types: float16, float32.
  1960. Outputs:
  1961. Tensor, has the same shape and type as `input_x`.
  1962. Supported Platforms:
  1963. ``Ascend`` ``GPU``
  1964. Examples:
  1965. >>> acosh = ops.Acosh()
  1966. >>> input_x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
  1967. >>> output = acosh(input_x)
  1968. >>> print(output)
  1969. [0. 0.9624236 1.7627472 5.298292]
  1970. """
  1971. @prim_attr_register
  1972. def __init__(self):
  1973. """Initialize Acosh"""
  1974. def infer_shape(self, x_shape):
  1975. return x_shape
  1976. def infer_dtype(self, x_dtype):
  1977. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  1978. return x_dtype
  1979. class Cosh(PrimitiveWithInfer):
  1980. """
  1981. Computes hyperbolic cosine of input element-wise.
  1982. Inputs:
  1983. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1984. Outputs:
  1985. Tensor, has the same shape as `input_x`.
  1986. Supported Platforms:
  1987. ``Ascend``
  1988. Examples:
  1989. >>> cosh = ops.Cosh()
  1990. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  1991. >>> output = cosh(input_x)
  1992. >>> print(output)
  1993. [1.0289385 1.364684 1.048436 1.0040528]
  1994. """
  1995. @prim_attr_register
  1996. def __init__(self):
  1997. """Initialize Cosh"""
  1998. def infer_shape(self, x_shape):
  1999. return x_shape
  2000. def infer_dtype(self, x_dtype):
  2001. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2002. return x_dtype
  2003. class Asinh(PrimitiveWithInfer):
  2004. r"""
  2005. Computes inverse hyperbolic sine of the input element-wise.
  2006. .. math::
  2007. out_i = sinh^{-1}(input_i)
  2008. Inputs:
  2009. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type should be one of
  2010. the following types: float16, float32.
  2011. Outputs:
  2012. Tensor, has the same shape and type as `input_x`.
  2013. Supported Platforms:
  2014. ``Ascend`` ``GPU``
  2015. Examples:
  2016. >>> asinh = ops.Asinh()
  2017. >>> input_x = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
  2018. >>> output = asinh(input_x)
  2019. >>> print(output)
  2020. [-2.3124385 1.1947632 1.8184465 5.298342 ]
  2021. """
  2022. @prim_attr_register
  2023. def __init__(self):
  2024. """Initialize Asinh"""
  2025. def infer_shape(self, x_shape):
  2026. return x_shape
  2027. def infer_dtype(self, x_dtype):
  2028. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2029. return x_dtype
  2030. class Sinh(PrimitiveWithInfer):
  2031. """
  2032. Computes hyperbolic sine of the input element-wise.
  2033. Inputs:
  2034. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2035. Outputs:
  2036. Tensor, has the same shape as `input_x`.
  2037. Supported Platforms:
  2038. ``Ascend``
  2039. Examples:
  2040. >>> sinh = ops.Sinh()
  2041. >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
  2042. >>> output = sinh(input_x)
  2043. >>> print(output)
  2044. [0.6604918 0.28367308 0.44337422 0.6604918 ]
  2045. """
  2046. @prim_attr_register
  2047. def __init__(self):
  2048. """Initialize Sinh"""
  2049. def infer_shape(self, x_shape):
  2050. return x_shape
  2051. def infer_dtype(self, x_dtype):
  2052. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2053. return x_dtype
  2054. class _LogicBinaryOp(_BinaryOp):
  2055. """
  2056. Define logic binary operators.
  2057. """
  2058. @staticmethod
  2059. def do_infer_dtype(x_dtype, y_dtype, valid_type=mstype.number_type, prim_name=None):
  2060. args_dtype = {"x": x_dtype, "y": y_dtype}
  2061. validator.check_tensors_dtypes_same_and_valid(args_dtype, valid_type, prim_name)
  2062. return mstype.tensor_type(mstype.bool_)
  2063. def infer_dtype(self, x_dtype, y_dtype):
  2064. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.name)
  2065. class Equal(_LogicBinaryOp):
  2066. """
  2067. Computes the equivalence between two tensors element-wise.
  2068. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2069. The inputs must be two tensors or one tensor and one scalar.
  2070. When the inputs are two tensors, the shapes of them could be broadcast.
  2071. When the inputs are one tensor and one scalar, the scalar could only be a constant.
  2072. Inputs:
  2073. - **input_x** (Union[Tensor, Number]) - The first input is a number or
  2074. a tensor whose data type is number.
  2075. - **input_y** (Union[Tensor, Number]) - The second input is a number
  2076. when the first input is a tensor or a tensor whose data type is number.
  2077. The data type is the same as the first input.
  2078. Outputs:
  2079. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2080. Supported Platforms:
  2081. ``Ascend`` ``GPU`` ``CPU``
  2082. Examples:
  2083. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  2084. >>> equal = ops.Equal()
  2085. >>> output = equal(input_x, 2.0)
  2086. >>> print(output)
  2087. [False True False]
  2088. >>>
  2089. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2090. >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  2091. >>> equal = ops.Equal()
  2092. >>> output = equal(input_x, input_y)
  2093. >>> print(output)
  2094. [ True True False]
  2095. """
  2096. def infer_dtype(self, x_dtype, y_dtype):
  2097. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name)
  2098. def infer_value(self, x, y):
  2099. if x is None or y is None:
  2100. return None
  2101. if isinstance(x, Tensor) and x.has_init:
  2102. x = x.init_data()
  2103. if isinstance(y, Tensor) and y.has_init:
  2104. y = y.init_data()
  2105. return Tensor(x.asnumpy() == y.asnumpy())
  2106. class ApproximateEqual(_LogicBinaryOp):
  2107. """
  2108. Returns True if abs(x1-x2) is smaller than tolerance element-wise, otherwise False.
  2109. Inputs of `x1` and `x2` comply with the implicit type conversion rules to make the data types consistent.
  2110. If they have different data types, lower priority data type will be converted to
  2111. relatively highest priority data type.
  2112. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2113. Args:
  2114. tolerance (float): The maximum deviation that two elements can be considered equal. Default: 1e-05.
  2115. Inputs:
  2116. - **x1** (Tensor) - A tensor. Must be one of the following types: float32, float16.
  2117. - **x2** (Tensor) - A tensor of the same type and shape as 'x1'.
  2118. Outputs:
  2119. Tensor, the shape is the same as the shape of 'x1', and the data type is bool.
  2120. Supported Platforms:
  2121. ``Ascend``
  2122. Examples:
  2123. >>> x1 = Tensor(np.array([1, 2, 3]), mindspore.float32)
  2124. >>> x2 = Tensor(np.array([2, 4, 6]), mindspore.float32)
  2125. >>> approximate_equal = ops.ApproximateEqual(2.)
  2126. >>> output = approximate_equal(x1, x2)
  2127. >>> print(output)
  2128. [ True True False]
  2129. """
  2130. @prim_attr_register
  2131. def __init__(self, tolerance=1e-05):
  2132. """Initialize ApproximateEqual"""
  2133. validator.check_value_type("tolerance", tolerance, [float], self.name)
  2134. def infer_shape(self, x_shape, y_shape):
  2135. validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
  2136. return x_shape
  2137. def infer_dtype(self, x_dtype, y_dtype):
  2138. args_dtype = {"x": x_dtype, "y": y_dtype}
  2139. valid_type = [mstype.float32, mstype.float16]
  2140. validator.check_tensors_dtypes_same_and_valid(args_dtype, valid_type, prim_name=self.name)
  2141. return mstype.tensor_type(mstype.bool_)
  2142. class EqualCount(PrimitiveWithInfer):
  2143. """
  2144. Computes the number of the same elements of two tensors.
  2145. The two input tensors must have the same data type and shape.
  2146. Inputs:
  2147. - **input_x** (Tensor) - The first input tensor.
  2148. - **input_y** (Tensor) - The second input tensor.
  2149. Outputs:
  2150. Tensor, with the type same as input tensor and size as (1,).
  2151. Supported Platforms:
  2152. ``GPU`` ``CPU``
  2153. Examples:
  2154. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2155. >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  2156. >>> equal_count = ops.EqualCount()
  2157. >>> output = equal_count(input_x, input_y)
  2158. >>> print(output)
  2159. [2]
  2160. """
  2161. @prim_attr_register
  2162. def __init__(self):
  2163. """Initialize EqualCount"""
  2164. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  2165. def infer_shape(self, x_shape, y_shape):
  2166. validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
  2167. output_shape = (1,)
  2168. return output_shape
  2169. def infer_dtype(self, x_dtype, y_dtype):
  2170. args = {'x': x_dtype, 'y': y_dtype}
  2171. validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type + (mstype.bool_,), self.name)
  2172. return x_dtype
  2173. class NotEqual(_LogicBinaryOp):
  2174. """
  2175. Computes the non-equivalence of two tensors element-wise.
  2176. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2177. The inputs must be two tensors or one tensor and one scalar.
  2178. When the inputs are two tensors, the shapes of them could be broadcast.
  2179. When the inputs are one tensor and one scalar, the scalar could only be a constant.
  2180. Inputs:
  2181. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  2182. a bool or a tensor whose data type is number or bool.
  2183. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  2184. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2185. Outputs:
  2186. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2187. Supported Platforms:
  2188. ``Ascend`` ``GPU`` ``CPU``
  2189. Examples:
  2190. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  2191. >>> not_equal = ops.NotEqual()
  2192. >>> output = not_equal(input_x, 2.0)
  2193. >>> print(output)
  2194. [ True False True]
  2195. >>>
  2196. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2197. >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  2198. >>> not_equal = ops.NotEqual()
  2199. >>> output = not_equal(input_x, input_y)
  2200. >>> print(output)
  2201. [False False True]
  2202. """
  2203. def infer_dtype(self, x_dtype, y_dtype):
  2204. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name)
  2205. class Greater(_LogicBinaryOp):
  2206. """
  2207. Computes the boolean value of :math:`x > y` element-wise.
  2208. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2209. The inputs must be two tensors or one tensor and one scalar.
  2210. When the inputs are two tensors,
  2211. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  2212. When the inputs are one tensor and one scalar,
  2213. the scalar could only be a constant.
  2214. Inputs:
  2215. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  2216. a bool or a tensor whose data type is number or bool.
  2217. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  2218. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2219. Outputs:
  2220. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2221. Supported Platforms:
  2222. ``Ascend`` ``GPU`` ``CPU``
  2223. Examples:
  2224. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2225. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  2226. >>> greater = ops.Greater()
  2227. >>> output = greater(input_x, input_y)
  2228. >>> print(output)
  2229. [False True False]
  2230. """
  2231. def infer_value(self, x, y):
  2232. if x is not None and y is not None:
  2233. x = x.asnumpy()
  2234. y = y.asnumpy()
  2235. out = np.array(np.greater(x, y))
  2236. return Tensor(out)
  2237. return None
  2238. class GreaterEqual(_LogicBinaryOp):
  2239. """
  2240. Computes the boolean value of :math:`x >= y` element-wise.
  2241. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2242. The inputs must be two tensors or one tensor and one scalar.
  2243. When the inputs are two tensors,
  2244. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  2245. When the inputs are one tensor and one scalar,
  2246. the scalar could only be a constant.
  2247. Inputs:
  2248. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  2249. a bool or a tensor whose data type is number or bool.
  2250. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  2251. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2252. Outputs:
  2253. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2254. Supported Platforms:
  2255. ``Ascend`` ``GPU`` ``CPU``
  2256. Examples:
  2257. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2258. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  2259. >>> greater_equal = ops.GreaterEqual()
  2260. >>> output = greater_equal(input_x, input_y)
  2261. >>> print(output)
  2262. [ True True False]
  2263. """
  2264. def infer_value(self, x, y):
  2265. if x is not None and y is not None:
  2266. x = x.asnumpy()
  2267. y = y.asnumpy()
  2268. out = np.array(np.greater_equal(x, y))
  2269. return Tensor(out)
  2270. return None
  2271. class Less(_LogicBinaryOp):
  2272. """
  2273. Computes the boolean value of :math:`x < y` element-wise.
  2274. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2275. The inputs must be two tensors or one tensor and one scalar.
  2276. When the inputs are two tensors,
  2277. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  2278. When the inputs are one tensor and one scalar,
  2279. the scalar could only be a constant.
  2280. Inputs:
  2281. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  2282. a bool or a tensor whose data type is number or bool.
  2283. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  2284. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2285. Outputs:
  2286. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2287. Supported Platforms:
  2288. ``Ascend`` ``GPU`` ``CPU``
  2289. Examples:
  2290. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2291. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  2292. >>> less = ops.Less()
  2293. >>> output = less(input_x, input_y)
  2294. >>> print(output)
  2295. [False False True]
  2296. """
  2297. def infer_value(self, x, y):
  2298. if x is not None and y is not None:
  2299. x = x.asnumpy()
  2300. y = y.asnumpy()
  2301. out = np.array(np.less(x, y))
  2302. return Tensor(out)
  2303. return None
  2304. class LessEqual(_LogicBinaryOp):
  2305. """
  2306. Computes the boolean value of :math:`x <= y` element-wise.
  2307. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2308. The inputs must be two tensors or one tensor and one scalar.
  2309. When the inputs are two tensors,
  2310. dtypes of them cannot be both bool , and the shapes of them could be broadcast.
  2311. When the inputs are one tensor and one scalar,
  2312. the scalar could only be a constant.
  2313. Inputs:
  2314. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  2315. a bool or a tensor whose data type is number or bool.
  2316. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  2317. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2318. Outputs:
  2319. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2320. Supported Platforms:
  2321. ``Ascend`` ``GPU`` ``CPU``
  2322. Examples:
  2323. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2324. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  2325. >>> less_equal = ops.LessEqual()
  2326. >>> output = less_equal(input_x, input_y)
  2327. >>> print(output)
  2328. [ True False True]
  2329. """
  2330. def infer_value(self, x, y):
  2331. if x is not None and y is not None:
  2332. x = x.asnumpy()
  2333. y = y.asnumpy()
  2334. out = np.array(np.less_equal(x, y))
  2335. return Tensor(out)
  2336. return None
  2337. class LogicalNot(PrimitiveWithInfer):
  2338. """
  2339. Computes the "logical NOT" of a tensor element-wise.
  2340. Inputs:
  2341. - **input_x** (Tensor) - The input tensor whose dtype is bool.
  2342. Outputs:
  2343. Tensor, the shape is the same as the `input_x`, and the dtype is bool.
  2344. Supported Platforms:
  2345. ``Ascend`` ``GPU`` ``CPU``
  2346. Examples:
  2347. >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
  2348. >>> logical_not = ops.LogicalNot()
  2349. >>> output = logical_not(input_x)
  2350. >>> print(output)
  2351. [False True False]
  2352. """
  2353. @prim_attr_register
  2354. def __init__(self):
  2355. """Initialize LogicalNot"""
  2356. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2357. def infer_shape(self, x_shape):
  2358. return x_shape
  2359. def infer_dtype(self, x_dtype):
  2360. validator.check_tensor_dtype_valid("x", x_dtype, [mstype.bool_], self.name)
  2361. return mstype.tensor_type(mstype.bool_)
  2362. class LogicalAnd(_LogicBinaryOp):
  2363. """
  2364. Computes the "logical AND" of two tensors element-wise.
  2365. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2366. The inputs must be two tensors or one tensor and one bool.
  2367. When the inputs are two tensors, the shapes of them could be broadcast,
  2368. and the data types of them must be bool.
  2369. When the inputs are one tensor and one bool, the bool object could only be a constant,
  2370. and the data type of the tensor must be bool.
  2371. Inputs:
  2372. - **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool.
  2373. - **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
  2374. a tensor whose data type is bool.
  2375. Outputs:
  2376. Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
  2377. Supported Platforms:
  2378. ``Ascend`` ``GPU`` ``CPU``
  2379. Examples:
  2380. >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
  2381. >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_)
  2382. >>> logical_and = ops.LogicalAnd()
  2383. >>> output = logical_and(input_x, input_y)
  2384. >>> print(output)
  2385. [ True False False]
  2386. """
  2387. def infer_dtype(self, x_dtype, y_dtype):
  2388. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name)
  2389. class LogicalOr(_LogicBinaryOp):
  2390. """
  2391. Computes the "logical OR" of two tensors element-wise.
  2392. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2393. The inputs must be two tensors or one tensor and one bool.
  2394. When the inputs are two tensors, the shapes of them could be broadcast,
  2395. and the data types of them must be bool.
  2396. When the inputs are one tensor and one bool, the bool object could only be a constant,
  2397. and the data type of the tensor must be bool.
  2398. Inputs:
  2399. - **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool.
  2400. - **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
  2401. a tensor whose data type is bool.
  2402. Outputs:
  2403. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2404. Supported Platforms:
  2405. ``Ascend`` ``GPU`` ``CPU``
  2406. Examples:
  2407. >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
  2408. >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_)
  2409. >>> logical_or = ops.LogicalOr()
  2410. >>> output = logical_or(input_x, input_y)
  2411. >>> print(output)
  2412. [ True True True]
  2413. """
  2414. def infer_dtype(self, x_dtype, y_dtype):
  2415. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name)
  2416. class IsNan(PrimitiveWithInfer):
  2417. """
  2418. Determines which elements are NaN for each position.
  2419. Inputs:
  2420. - **input_x** (Tensor) - The input tensor.
  2421. Outputs:
  2422. Tensor, has the same shape of input, and the dtype is bool.
  2423. Supported Platforms:
  2424. ``GPU``
  2425. Examples:
  2426. >>> is_nan = ops.IsNan()
  2427. >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  2428. >>> output = is_nan(input_x)
  2429. >>> print(output)
  2430. [True False False]
  2431. """
  2432. @prim_attr_register
  2433. def __init__(self):
  2434. """Initialize IsNan"""
  2435. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2436. def infer_shape(self, x_shape):
  2437. return x_shape
  2438. def infer_dtype(self, x_dtype):
  2439. return mstype.tensor_type(mstype.bool_)
  2440. class IsInf(PrimitiveWithInfer):
  2441. """
  2442. Determines which elements are inf or -inf for each position
  2443. Inputs:
  2444. - **input_x** (Tensor) - The input tensor.
  2445. Outputs:
  2446. Tensor, has the same shape of input, and the dtype is bool.
  2447. Supported Platforms:
  2448. ``GPU``
  2449. Examples:
  2450. >>> is_inf = ops.IsInf()
  2451. >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  2452. >>> output = is_inf(input_x)
  2453. >>> print(output)
  2454. [False False True]
  2455. """
  2456. @prim_attr_register
  2457. def __init__(self):
  2458. """Initialize IsInf"""
  2459. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2460. def infer_shape(self, x_shape):
  2461. return x_shape
  2462. def infer_dtype(self, x_dtype):
  2463. return mstype.tensor_type(mstype.bool_)
  2464. class IsFinite(PrimitiveWithInfer):
  2465. """
  2466. Determines which elements are finite for each position.
  2467. Inputs:
  2468. - **input_x** (Tensor) - The input tensor.
  2469. Outputs:
  2470. Tensor, has the same shape of input, and the dtype is bool.
  2471. Supported Platforms:
  2472. ``Ascend`` ``GPU`` ``CPU``
  2473. Examples:
  2474. >>> is_finite = ops.IsFinite()
  2475. >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  2476. >>> output = is_finite(input_x)
  2477. >>> print(output)
  2478. [False True False]
  2479. """
  2480. @prim_attr_register
  2481. def __init__(self):
  2482. """Initialize IsFinite"""
  2483. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2484. def infer_shape(self, x_shape):
  2485. return x_shape
  2486. def infer_dtype(self, x_dtype):
  2487. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type + (mstype.bool_,), self.name)
  2488. return mstype.tensor_type(mstype.bool_)
  2489. class FloatStatus(PrimitiveWithInfer):
  2490. """
  2491. Determines if the elements contain Not a Number(NaN), infinite or negative infinite. 0 for normal, 1 for overflow.
  2492. Inputs:
  2493. - **input_x** (Tensor) - The input tensor. The data type must be float16 or float32.
  2494. Outputs:
  2495. Tensor, has the shape of `(1,)`, and has the same dtype of input `mindspore.dtype.float32` or
  2496. `mindspore.dtype.float16`.
  2497. Supported Platforms:
  2498. ``GPU``
  2499. Examples:
  2500. >>> float_status = ops.FloatStatus()
  2501. >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  2502. >>> result = float_status(input_x)
  2503. >>> print(result)
  2504. [1.]
  2505. """
  2506. @prim_attr_register
  2507. def __init__(self):
  2508. """Initialize FloatStatus"""
  2509. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2510. def infer_shape(self, x_shape):
  2511. return [1]
  2512. def infer_dtype(self, x_dtype):
  2513. validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float32, mstype.float16], self.name)
  2514. return x_dtype
  2515. class NPUAllocFloatStatus(PrimitiveWithInfer):
  2516. """
  2517. Allocates a flag to store the overflow status.
  2518. The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
  2519. Note:
  2520. Examples: see `NPUGetFloatStatus`.
  2521. Outputs:
  2522. Tensor, has the shape of `(8,)`.
  2523. Supported Platforms:
  2524. ``Ascend``
  2525. Examples:
  2526. >>> alloc_status = ops.NPUAllocFloatStatus()
  2527. >>> output = alloc_status()
  2528. >>> print(output)
  2529. [0. 0. 0. 0. 0. 0. 0. 0.]
  2530. """
  2531. @prim_attr_register
  2532. def __init__(self):
  2533. """Initialize NPUAllocFloatStatus"""
  2534. self.add_prim_attr("_side_effect_flag", True)
  2535. def infer_shape(self):
  2536. return [8]
  2537. def infer_dtype(self):
  2538. return mstype.float32
  2539. class NPUGetFloatStatus(PrimitiveWithInfer):
  2540. """
  2541. Updates the flag which is the output tensor of `NPUAllocFloatStatus` with the latest overflow status.
  2542. The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
  2543. If the sum of the flag equals to 0, there is no overflow happened. If the sum of the flag is bigger than 0, there
  2544. is overflow happened.
  2545. Inputs:
  2546. - **input_x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
  2547. The data type must be float16 or float32.
  2548. Outputs:
  2549. Tensor, has the same shape as `input_x`. All the elements in the tensor will be zero.
  2550. Supported Platforms:
  2551. ``Ascend``
  2552. Examples:
  2553. >>> alloc_status = ops.NPUAllocFloatStatus()
  2554. >>> get_status = ops.NPUGetFloatStatus()
  2555. >>> init = alloc_status()
  2556. >>> get_status(init)
  2557. Tensor(shape=[8], dtype=Float32, value= [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
  2558. 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00])
  2559. >>> print(init)
  2560. [1. 1. 1. 1. 1. 1. 1. 1.]
  2561. """
  2562. @prim_attr_register
  2563. def __init__(self):
  2564. """Initialize NPUGetFloatStatus"""
  2565. self.add_prim_attr("_side_effect_flag", True)
  2566. def infer_shape(self, x_shape):
  2567. cls_name = self.name
  2568. validator.check_equal_int(len(x_shape), 1, "len(x_shape)", cls_name)
  2569. validator.check_equal_int(x_shape[0], 8, "x_shape[0]", cls_name)
  2570. return [8]
  2571. def infer_dtype(self, x_dtype):
  2572. validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float16, mstype.float32], self.name)
  2573. return mstype.float32
  2574. class NPUClearFloatStatus(PrimitiveWithInfer):
  2575. """
  2576. Clears the flag which stores the overflow status.
  2577. Note:
  2578. The flag is in the register on the `Ascend` device. It will be reset and can not be reused again after the
  2579. `NPUClearFloatStatus` is called.
  2580. Examples: see `NPUGetFloatStatus`.
  2581. Inputs:
  2582. - **input_x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
  2583. The data type must be float16 or float32.
  2584. Outputs:
  2585. Tensor, has the same shape as `input_x`. All the elements in the tensor will be zero.
  2586. Supported Platforms:
  2587. ``Ascend``
  2588. Examples:
  2589. >>> alloc_status = ops.NPUAllocFloatStatus()
  2590. >>> get_status = ops.NPUGetFloatStatus()
  2591. >>> clear_status = ops.NPUClearFloatStatus()
  2592. >>> init = alloc_status()
  2593. >>> flag = get_status(init)
  2594. >>> clear_status(init)
  2595. Tensor(shape=[8], dtype=Float32, value= [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
  2596. 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00])
  2597. >>> print(init)
  2598. [1. 1. 1. 1. 1. 1. 1. 1.]
  2599. """
  2600. @prim_attr_register
  2601. def __init__(self):
  2602. """Initialize NPUClearFloatStatus"""
  2603. self.add_prim_attr("_side_effect_flag", True)
  2604. def infer_shape(self, x_shape):
  2605. cls_name = self.name
  2606. validator.check_equal_int(len(x_shape), 1, "len(x_shape)", cls_name)
  2607. validator.check_equal_int(x_shape[0], 8, "x_shape[0]", cls_name)
  2608. return [8]
  2609. def infer_dtype(self, x_dtype):
  2610. validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float16, mstype.float32], self.name)
  2611. return mstype.float32
  2612. class Cos(PrimitiveWithInfer):
  2613. """
  2614. Computes cosine of input element-wise.
  2615. Inputs:
  2616. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2617. Outputs:
  2618. Tensor, has the same shape as `input_x`.
  2619. Supported Platforms:
  2620. ``Ascend`` ``GPU``
  2621. Examples:
  2622. >>> cos = ops.Cos()
  2623. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  2624. >>> output = cos(input_x)
  2625. >>> print(output)
  2626. [0.971338 0.67487574 0.95233357 0.9959527 ]
  2627. """
  2628. @prim_attr_register
  2629. def __init__(self):
  2630. """Initialize Cos"""
  2631. def infer_shape(self, x_shape):
  2632. return x_shape
  2633. def infer_dtype(self, x_dtype):
  2634. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2635. return x_dtype
  2636. class ACos(PrimitiveWithInfer):
  2637. r"""
  2638. Computes arccosine of input tensors element-wise.
  2639. .. math::
  2640. out_i = cos^{-1}(x_i)
  2641. Inputs:
  2642. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2643. Outputs:
  2644. Tensor, has the same shape as `input_x`.
  2645. Supported Platforms:
  2646. ``Ascend`` ``GPU``
  2647. Examples:
  2648. >>> acos = ops.ACos()
  2649. >>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
  2650. >>> output = acos(input_x)
  2651. >>> print(output)
  2652. [0.7377037 1.5307858 1.2661037 0.97641146]
  2653. """
  2654. @prim_attr_register
  2655. def __init__(self):
  2656. """Initialize ACos"""
  2657. def infer_shape(self, x_shape):
  2658. return x_shape
  2659. def infer_dtype(self, x_dtype):
  2660. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2661. return x_dtype
  2662. class Sin(PrimitiveWithInfer):
  2663. """
  2664. Computes sine of the input element-wise.
  2665. Inputs:
  2666. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2667. Outputs:
  2668. Tensor, has the same shape as `input_x`.
  2669. Supported Platforms:
  2670. ``Ascend`` ``GPU``
  2671. Examples:
  2672. >>> sin = ops.Sin()
  2673. >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
  2674. >>> output = sin(input_x)
  2675. >>> print(output)
  2676. [0.5810352 0.27635565 0.41687083 0.5810352 ]
  2677. """
  2678. @prim_attr_register
  2679. def __init__(self):
  2680. """Initialize Sin."""
  2681. def infer_shape(self, x_shape):
  2682. return x_shape
  2683. def infer_dtype(self, x_dtype):
  2684. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2685. return x_dtype
  2686. class Asin(PrimitiveWithInfer):
  2687. r"""
  2688. Computes arcsine of input tensors element-wise.
  2689. .. math::
  2690. out_i = sin^{-1}(x_i)
  2691. Inputs:
  2692. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2693. Outputs:
  2694. Tensor, has the same shape as `input_x`.
  2695. Supported Platforms:
  2696. ``Ascend`` ``GPU``
  2697. Examples:
  2698. >>> asin = ops.Asin()
  2699. >>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
  2700. >>> output = asin(input_x)
  2701. >>> print(output)
  2702. [0.8330927 0.04001068 0.30469266 0.59438497]
  2703. """
  2704. @prim_attr_register
  2705. def __init__(self):
  2706. """Initialize Asin"""
  2707. def infer_shape(self, x_shape):
  2708. return x_shape
  2709. def infer_dtype(self, x_dtype):
  2710. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2711. return x_dtype
  2712. class NMSWithMask(PrimitiveWithInfer):
  2713. """
  2714. Selects some bounding boxes in descending order of score.
  2715. Args:
  2716. iou_threshold (float): Specifies the threshold of overlap boxes with respect to
  2717. IOU. Default: 0.5.
  2718. Raises:
  2719. ValueError: If the iou_threshold is not a float number, or if the first dimension
  2720. of input Tensor is less than or equal to 0, or if the data type of the input
  2721. Tensor is not float16 or float32.
  2722. Inputs:
  2723. - **bboxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. Input bounding boxes.
  2724. `N` is the number of input bounding boxes. Every bounding box
  2725. contains 5 values, the first 4 values are the coordinates of bounding
  2726. box, and the last value is the score of this bounding box.
  2727. The data type must be float16 or float32.
  2728. Outputs:
  2729. tuple[Tensor], tuple of three tensors, they are selected_boxes, selected_idx and selected_mask.
  2730. - **selected_boxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. The list of bounding boxes
  2731. after non-max suppression calculation.
  2732. - **selected_idx** (Tensor) - The shape of tensor is :math:`(N,)`. The indexes list of
  2733. valid input bounding boxes.
  2734. - **selected_mask** (Tensor) - The shape of tensor is :math:`(N,)`. A mask list of
  2735. valid output bounding boxes.
  2736. Supported Platforms:
  2737. ``Ascend`` ``GPU``
  2738. Examples:
  2739. >>> bbox = np.array([[0.4, 0.2, 0.4, 0.3, 0.1], [0.4, 0.3, 0.6, 0.8, 0.7]])
  2740. >>> bbox[:, 2] += bbox[:, 0]
  2741. >>> bbox[:, 3] += bbox[:, 1]
  2742. >>> inputs = Tensor(bbox, mindspore.float32)
  2743. >>> nms = ops.NMSWithMask(0.5)
  2744. >>> output_boxes, indices, mask = nms(inputs)
  2745. >>> indices_np = indices.asnumpy()
  2746. >>> print(indices_np[mask.asnumpy()])
  2747. [0 1]
  2748. """
  2749. @prim_attr_register
  2750. def __init__(self, iou_threshold=0.5):
  2751. """Initialize NMSWithMask"""
  2752. validator.check_value_type("iou_threshold", iou_threshold, [float], self.name)
  2753. self.init_prim_io_names(inputs=['bboxes'], outputs=['selected_boxes', 'selected_idx', 'selected_mask'])
  2754. self.is_ge = context.get_context("enable_ge")
  2755. def infer_shape(self, bboxes_shape):
  2756. cls_name = self.name
  2757. validator.check_equal_int(len(bboxes_shape), 2, "bboxes rank", cls_name)
  2758. validator.check_positive_int(bboxes_shape[0], "bboxes.shape[0]", cls_name)
  2759. validator.check_equal_int(bboxes_shape[1], 5, "bboxes.shape[1]", cls_name)
  2760. num = bboxes_shape[0]
  2761. return (bboxes_shape, (num,), (num,))
  2762. def infer_dtype(self, bboxes_dtype):
  2763. validator.check_tensor_dtype_valid("bboxes", bboxes_dtype, [mstype.float16, mstype.float32], self.name)
  2764. return (bboxes_dtype, mstype.int32, mstype.bool_)
  2765. class Abs(PrimitiveWithInfer):
  2766. r"""
  2767. Returns absolute value of a tensor element-wise.
  2768. .. math::
  2769. out_i = |x_i|
  2770. Inputs:
  2771. - **input_x** (Tensor) - The input tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2772. Outputs:
  2773. Tensor, has the same shape as the `input_x`.
  2774. Supported Platforms:
  2775. ``Ascend`` ``GPU`` ``CPU``
  2776. Examples:
  2777. >>> input_x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
  2778. >>> abs = ops.Abs()
  2779. >>> output = abs(input_x)
  2780. >>> print(output)
  2781. [1. 1. 0.]
  2782. """
  2783. @prim_attr_register
  2784. def __init__(self):
  2785. """Initialize Abs"""
  2786. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  2787. def infer_shape(self, x_shape):
  2788. return x_shape
  2789. def infer_dtype(self, x_type):
  2790. validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, self.name)
  2791. return x_type
  2792. def infer_value(self, x):
  2793. if x is not None:
  2794. x = x.asnumpy()
  2795. out = np.array(np.abs(x, dtype=x.dtype))
  2796. return Tensor(out)
  2797. return None
  2798. class Sign(PrimitiveWithInfer):
  2799. r"""
  2800. Performs sign on the tensor element-wise.
  2801. Note:
  2802. .. math::
  2803. sign(x) = \begin{cases} -1, &if\ x < 0 \cr
  2804. 0, &if\ x = 0 \cr
  2805. 1, &if\ x > 0\end{cases}
  2806. Inputs:
  2807. - **input_x** (Tensor) - The input tensor.
  2808. Outputs:
  2809. Tensor, has the same shape and type as the `input_x`.
  2810. Supported Platforms:
  2811. ``Ascend`` ``CPU``
  2812. Examples:
  2813. >>> input_x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
  2814. >>> sign = ops.Sign()
  2815. >>> output = sign(input_x)
  2816. >>> print(output)
  2817. [[ 1. 0. -1.]]
  2818. """
  2819. @prim_attr_register
  2820. def __init__(self):
  2821. pass
  2822. def infer_shape(self, x_shape):
  2823. return x_shape
  2824. def infer_dtype(self, x_dtype):
  2825. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2826. return x_dtype
  2827. class Round(PrimitiveWithInfer):
  2828. """
  2829. Returns half to even of a tensor element-wise.
  2830. Inputs:
  2831. - **input_x** (Tensor) - The input tensor.
  2832. Outputs:
  2833. Tensor, has the same shape and type as the `input_x`.
  2834. Supported Platforms:
  2835. ``Ascend``
  2836. Examples:
  2837. >>> input_x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
  2838. >>> round = ops.Round()
  2839. >>> output = round(input_x)
  2840. >>> print(output)
  2841. [ 1. 2. 2. 2. -4.]
  2842. """
  2843. @prim_attr_register
  2844. def __init__(self):
  2845. """Initialize Round"""
  2846. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  2847. def infer_shape(self, x_shape):
  2848. return x_shape
  2849. def infer_dtype(self, x_dtype):
  2850. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2851. return x_dtype
  2852. class Tan(PrimitiveWithInfer):
  2853. """
  2854. Computes tangent of `input_x` element-wise.
  2855. Inputs:
  2856. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. Data type must be
  2857. float16, float32 or int32.
  2858. Outputs:
  2859. Tensor, has the same shape as `input_x`.
  2860. Supported Platforms:
  2861. ``Ascend``
  2862. Examples:
  2863. >>> tan = ops.Tan()
  2864. >>> input_x = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
  2865. >>> output = tan(input_x)
  2866. >>> print(output)
  2867. [-1.5574081 0. 1.5574081]
  2868. """
  2869. @prim_attr_register
  2870. def __init__(self):
  2871. """Initialize Tan"""
  2872. def infer_shape(self, x_shape):
  2873. return x_shape
  2874. def infer_dtype(self, x_type):
  2875. valid_dtypes = [mstype.float16, mstype.float32, mstype.int32]
  2876. validator.check_tensor_dtype_valid('x', x_type, valid_dtypes, self.name)
  2877. return x_type
  2878. class Atan(PrimitiveWithInfer):
  2879. r"""
  2880. Computes the trigonometric inverse tangent of the input element-wise.
  2881. .. math::
  2882. out_i = tan^{-1}(x_i)
  2883. Inputs:
  2884. - **input_x** (Tensor): The input tensor. The data type should be one of the following types: float16, float32.
  2885. Outputs:
  2886. A Tensor, has the same type as the input.
  2887. Supported Platforms:
  2888. ``Ascend`` ``GPU``
  2889. Examples:
  2890. >>> input_x = Tensor(np.array([1.0, 0.0]), mindspore.float32)
  2891. >>> atan = ops.Atan()
  2892. >>> output = atan(input_x)
  2893. >>> print(output)
  2894. [0.7853982 0. ]
  2895. """
  2896. @prim_attr_register
  2897. def __init__(self):
  2898. pass
  2899. def infer_shape(self, x_shape):
  2900. return x_shape
  2901. def infer_dtype(self, x_type):
  2902. validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, self.name)
  2903. return x_type
  2904. class Atanh(PrimitiveWithInfer):
  2905. """
  2906. Computes inverse hyperbolic tangent of the input element-wise.
  2907. Inputs:
  2908. - **input_x** (Tensor): The input tensor.
  2909. Outputs:
  2910. A Tensor, has the same type as the input.
  2911. Supported Platforms:
  2912. ``Ascend``
  2913. Examples:
  2914. >>> input_x = Tensor(np.array([1.047, 0.785]), mindspore.float32)
  2915. >>> atanh = ops.Atanh()
  2916. >>> output = atanh(input_x)
  2917. >>> print(output)
  2918. [1.8869909 1.058268 ]
  2919. """
  2920. @prim_attr_register
  2921. def __init__(self):
  2922. pass
  2923. def infer_shape(self, x_shape):
  2924. return x_shape
  2925. def infer_dtype(self, x_type):
  2926. validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, self.name)
  2927. return x_type
  2928. class Atan2(_MathBinaryOp):
  2929. r"""
  2930. Returns arctangent of input_x/input_y element-wise.
  2931. It returns :math:`\theta\ \in\ [-\pi, \pi]`
  2932. such that :math:`x = r*\sin(\theta), y = r*\cos(\theta)`, where :math:`r = \sqrt{x^2 + y^2}`.
  2933. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2934. If they have different data types, lower priority data type will be converted to
  2935. relatively highest priority data type.
  2936. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2937. Inputs:
  2938. - **input_x** (Tensor) - The input tensor.
  2939. - **input_y** (Tensor) - The input tensor.
  2940. Outputs:
  2941. Tensor, the shape is the same as the one after broadcasting,and the data type is same as `input_x`.
  2942. Supported Platforms:
  2943. ``Ascend``
  2944. Examples:
  2945. >>> input_x = Tensor(np.array([0, 1]), mindspore.float32)
  2946. >>> input_y = Tensor(np.array([1, 1]), mindspore.float32)
  2947. >>> atan2 = ops.Atan2()
  2948. >>> output = atan2(input_x, input_y)
  2949. >>> print(output)
  2950. [0. 0.7853982]
  2951. """
  2952. class SquareSumAll(PrimitiveWithInfer):
  2953. """
  2954. Returns the square sum of a tensor element-wise
  2955. Inputs:
  2956. - **input_x1** (Tensor) - The input tensor. The data type must be float16 or float32.
  2957. - **input_x2** (Tensor) - The input tensor has the same type and shape as the `input_x1`.
  2958. Note:
  2959. SquareSumAll only supports float16 and float32 data type.
  2960. Outputs:
  2961. - **output_y1** (Tensor) - The same type as the `input_x1`.
  2962. - **output_y2** (Tensor) - The same type as the `input_x1`.
  2963. Supported Platforms:
  2964. ``Ascend``
  2965. Examples:
  2966. >>> input_x1 = Tensor(np.array([0, 0, 2, 0]), mindspore.float32)
  2967. >>> input_x2 = Tensor(np.array([0, 0, 2, 4]), mindspore.float32)
  2968. >>> square_sum_all = ops.SquareSumAll()
  2969. >>> output = square_sum_all(input_x1, input_x2)
  2970. >>> print(output)
  2971. (Tensor(shape=[], dtype=Float32, value= 4),
  2972. Tensor(shape=[], dtype=Float32, value= 20))
  2973. """
  2974. @prim_attr_register
  2975. def __init__(self):
  2976. """Initialize SquareSumAll"""
  2977. def infer_shape(self, x_shape, y_shape):
  2978. validator.check("x1_shape", x_shape, "x2_shape", y_shape, Rel.EQ, self.name)
  2979. return [], []
  2980. def infer_dtype(self, x_type, y_type):
  2981. valid_types = (mstype.float16, mstype.float32)
  2982. args = {"x1_type": x_type, "x2_type": y_type}
  2983. validator.check_tensors_dtypes_same_and_valid(args, valid_types, self.name)
  2984. return x_type, y_type
  2985. class BitwiseAnd(_BitwiseBinaryOp):
  2986. """
  2987. Returns bitwise `and` of two tensors element-wise.
  2988. Inputs of `input_x1` and `input_x2` comply with the implicit type conversion rules to
  2989. make the data types consistent.
  2990. If they have different data types, lower priority data type will be converted to
  2991. relatively highest priority data type.
  2992. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2993. Inputs:
  2994. - **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type.
  2995. - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
  2996. Outputs:
  2997. Tensor, has the same type as the `input_x1`.
  2998. Supported Platforms:
  2999. ``Ascend``
  3000. Examples:
  3001. >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
  3002. >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
  3003. >>> bitwise_and = ops.BitwiseAnd()
  3004. >>> output = bitwise_and(input_x1, input_x2)
  3005. >>> print(output)
  3006. [ 0 0 1 -1 1 0 1]
  3007. """
  3008. class BitwiseOr(_BitwiseBinaryOp):
  3009. """
  3010. Returns bitwise `or` of two tensors element-wise.
  3011. Inputs of `input_x1` and `input_x2` comply with the implicit type conversion rules to
  3012. make the data types consistent.
  3013. If they have different data types, lower priority data type will be converted to
  3014. relatively highest priority data type.
  3015. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  3016. Inputs:
  3017. - **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type.
  3018. - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
  3019. Outputs:
  3020. Tensor, has the same type as the `input_x1`.
  3021. Supported Platforms:
  3022. ``Ascend``
  3023. Examples:
  3024. >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
  3025. >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
  3026. >>> bitwise_or = ops.BitwiseOr()
  3027. >>> output = bitwise_or(input_x1, input_x2)
  3028. >>> print(output)
  3029. [ 0 1 1 -1 -1 3 3]
  3030. """
  3031. class BitwiseXor(_BitwiseBinaryOp):
  3032. """
  3033. Returns bitwise `xor` of two tensors element-wise.
  3034. Inputs of `input_x1` and `input_x2` comply with the implicit type conversion rules to
  3035. make the data types consistent.
  3036. If they have different data types, lower priority data type will be converted to
  3037. relatively highest priority data type.
  3038. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  3039. Inputs:
  3040. - **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type.
  3041. - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
  3042. Outputs:
  3043. Tensor, has the same type as the `input_x1`.
  3044. Supported Platforms:
  3045. ``Ascend``
  3046. Examples:
  3047. >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
  3048. >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
  3049. >>> bitwise_xor = ops.BitwiseXor()
  3050. >>> output = bitwise_xor(input_x1, input_x2)
  3051. >>> print(output)
  3052. [ 0 1 0 0 -2 3 2]
  3053. """
  3054. class BesselI0e(PrimitiveWithInfer):
  3055. """
  3056. Computes BesselI0e of input element-wise.
  3057. Inputs:
  3058. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. Data type must be float16 or
  3059. float32.
  3060. Outputs:
  3061. Tensor, has the same shape as `input_x`.
  3062. Supported Platforms:
  3063. ``Ascend``
  3064. Examples:
  3065. >>> bessel_i0e = ops.BesselI0e()
  3066. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  3067. >>> output = bessel_i0e(input_x)
  3068. >>> print(output)
  3069. [0.7979961 0.5144438 0.75117415 0.9157829 ]
  3070. """
  3071. @prim_attr_register
  3072. def __init__(self):
  3073. """Initialize BesselI0e"""
  3074. def infer_shape(self, x):
  3075. return x
  3076. def infer_dtype(self, x):
  3077. validator.check_tensor_dtype_valid('x', x, mstype.number_type, self.name)
  3078. return x
  3079. class BesselI1e(PrimitiveWithInfer):
  3080. """
  3081. Computes BesselI1e of input element-wise.
  3082. Inputs:
  3083. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. Data type must be float16 or
  3084. float32.
  3085. Outputs:
  3086. Tensor, has the same shape as `input_x`.
  3087. Supported Platforms:
  3088. ``Ascend``
  3089. Examples:
  3090. >>> bessel_i1e = ops.BesselI1e()
  3091. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  3092. >>> output = bessel_i1e(input_x)
  3093. >>> print(output)
  3094. [0.09507662 0.19699717 0.11505538 0.04116856]
  3095. """
  3096. @prim_attr_register
  3097. def __init__(self):
  3098. """Initialize BesselI1e"""
  3099. def infer_shape(self, x):
  3100. return x
  3101. def infer_dtype(self, x):
  3102. validator.check_tensor_dtype_valid('x', x, mstype.number_type, self.name)
  3103. return x
  3104. class Inv(PrimitiveWithInfer):
  3105. """
  3106. Computes Inv(Reciprocal) of input tensor element-wise.
  3107. Inputs:
  3108. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  3109. Must be one of the following types: float16, float32, int32.
  3110. Outputs:
  3111. Tensor, has the same shape and data type as `input_x`.
  3112. Supported Platforms:
  3113. ``Ascend``
  3114. Examples:
  3115. >>> inv = ops.Inv()
  3116. >>> input_x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
  3117. >>> output = inv(input_x)
  3118. >>> print(output)
  3119. [4. 2.5 3.2258065 1.923077 ]
  3120. """
  3121. @prim_attr_register
  3122. def __init__(self):
  3123. pass
  3124. def infer_shape(self, x_shape):
  3125. return x_shape
  3126. def infer_dtype(self, x_dtype):
  3127. validator.check_tensor_dtype_valid('x_dtype', x_dtype, [mstype.float16, mstype.float32,
  3128. mstype.int32], self.name)
  3129. return x_dtype
  3130. class Invert(PrimitiveWithInfer):
  3131. """
  3132. Flips all bits of input tensor element-wise.
  3133. Inputs:
  3134. - **input_x** (Tensor[int16], Tensor[uint16]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  3135. Outputs:
  3136. Tensor, has the same shape as `input_x`.
  3137. Supported Platforms:
  3138. ``Ascend``
  3139. Examples:
  3140. >>> invert = ops.Invert()
  3141. >>> input_x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
  3142. >>> output = invert(input_x)
  3143. >>> print(output)
  3144. [-26 -5 -14 -10]
  3145. """
  3146. @prim_attr_register
  3147. def __init__(self):
  3148. pass
  3149. def infer_shape(self, x_shape):
  3150. return x_shape
  3151. def infer_dtype(self, x_dtype):
  3152. validator.check_tensor_dtype_valid('x_dtype', x_dtype, [mstype.int16, mstype.uint16], self.name)
  3153. return x_dtype
  3154. class Eps(PrimitiveWithInfer):
  3155. """
  3156. Creates a tensor filled with `input_x` dtype minimum value.
  3157. Inputs:
  3158. - **input_x** (Tensor) - Input tensor. The data type must be float16 or float32.
  3159. Outputs:
  3160. Tensor, has the same type and shape as `input_x`, but filled with `input_x` dtype minimum val.
  3161. Supported Platforms:
  3162. ``Ascend`` ``GPU``
  3163. Examples:
  3164. >>> input_x = Tensor([4, 1, 2, 3], mindspore.float32)
  3165. >>> output = ops.Eps()(input_x)
  3166. >>> print(output)
  3167. [1.5258789e-05 1.5258789e-05 1.5258789e-05 1.5258789e-05]
  3168. """
  3169. @prim_attr_register
  3170. def __init__(self):
  3171. """Initialize Eps"""
  3172. self.init_prim_io_names(inputs=['input_x'], outputs=['y'])
  3173. def __infer__(self, input_x):
  3174. valid_dtypes = [mstype.float16, mstype.float32]
  3175. validator.check_tensor_dtype_valid('input_x', input_x['dtype'], valid_dtypes, self.name)
  3176. x_nptype = mstype.dtype_to_nptype(input_x['dtype'].element_type())
  3177. if x_nptype == np.float16:
  3178. min_val = 2 ** (-14)
  3179. else:
  3180. min_val = 2 ** (-16)
  3181. res = np.full(input_x['shape'], min_val, x_nptype)
  3182. out = {
  3183. 'value': Tensor(res),
  3184. 'shape': input_x['shape'],
  3185. 'dtype': input_x['dtype'],
  3186. }
  3187. return out
  3188. class LinSpace(PrimitiveWithInfer):
  3189. r"""
  3190. Generates values in an interval (inclusive of start and stop) and returns the corresponding
  3191. interpolated array with **num** number of ticks.
  3192. Inputs:
  3193. - **start** (Tensor[float32]) - Start value of interval, With shape of 0-D.
  3194. - **stop** (Tensor[float32]) - Last value of interval, With shape of 0-D.
  3195. - **num** (int) - Number of ticks in the interval, inclusive of start and stop.
  3196. Outputs:
  3197. Tensor, has the same shape as `start`.
  3198. Supported Platforms:
  3199. ``Ascend`` ``GPU``
  3200. Examples:
  3201. >>> linspace = P.LinSpace()
  3202. >>> start = Tensor(1, mindspore.float32)
  3203. >>> stop = Tensor(10, mindspore.float32)
  3204. >>> num = 5
  3205. >>> output = linspace(start, stop, num)
  3206. >>> print(output)
  3207. [ 1. 3.25 5.5 7.75 10. ]
  3208. """
  3209. @prim_attr_register
  3210. def __init__(self):
  3211. """Initialize LinSpace"""
  3212. def __infer__(self, start, stop, num):
  3213. args = {"start": start['dtype'], "stop": start['dtype']}
  3214. validator.check_tensors_dtypes_same_and_valid(args, (mstype.float32,), self.name)
  3215. start_shape = start['shape']
  3216. stop_shape = stop['shape']
  3217. validator.check_equal_int(len(start_shape), 0, "rank of start_shape", self.name)
  3218. validator.check_equal_int(len(stop_shape), 0, "rank of stop_shape", self.name)
  3219. num_v = num['value']
  3220. validator.check_value_type('num', num_v, [int], self.name)
  3221. validator.check_positive_int(num_v, "num", self.name)
  3222. out_shape = [num_v]
  3223. out = {'shape': out_shape,
  3224. 'dtype': start['dtype'],
  3225. 'value': None}
  3226. return out
  3227. class MatrixInverse(PrimitiveWithInfer):
  3228. """
  3229. Returns the inverse of the input matrix. If the matrix is irreversible, an error may be reported or an unknown
  3230. result may be returned
  3231. Note:
  3232. The parameter 'adjoint' is only supporting False right now. Because complex number is not supported at present.
  3233. Args:
  3234. adjoint (bool) : An optional bool. Default: False.
  3235. Inputs:
  3236. - **x** (Tensor) - A matrix to be calculated.
  3237. types: float32, double.
  3238. Outputs:
  3239. Tensor, has the same type and shape as input `x`.
  3240. Supported Platforms:
  3241. ``GPU``
  3242. Examples:
  3243. >>> mindspore.set_seed(1)
  3244. >>> x = Tensor(np.random.uniform(-2, 2, (2, 2, 2)), mindspore.float32)
  3245. >>> matrix_inverse = P.MatrixInverse(adjoint=False)
  3246. >>> output = matrix_inverse(x)
  3247. >>> print(output)
  3248. [[[-0.39052644 -0.43528939]
  3249. [ 0.98761106 -0.16393748]]
  3250. [[ 0.52641493 -1.3895369 ]
  3251. [-1.0693996 1.2040523 ]]]
  3252. """
  3253. @prim_attr_register
  3254. def __init__(self, adjoint=False):
  3255. """Initialize MatrixInverse"""
  3256. validator.check_type_name("adjoint", adjoint, False, self.name)
  3257. self.adjoint = adjoint
  3258. def infer_dtype(self, x_dtype):
  3259. valid_type = [mstype.float32, mstype.double]
  3260. validator.check_tensor_dtype_valid("x_dtype", x_dtype, valid_type, self.name)
  3261. return x_dtype
  3262. def infer_shape(self, x_shape):
  3263. validator.check_int(len(x_shape), 2, Rel.GE, self.name, None)
  3264. validator.check_equal_int(x_shape[-1], x_shape[-2], self.name, None)
  3265. return x_shape