You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

array_ops.py 200 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
6 years ago
5 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995
  1. # coding: utf-8
  2. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # ============================================================================
  16. """Operators for array."""
  17. import copy
  18. import functools
  19. import itertools
  20. import numbers
  21. import numpy as np
  22. from mindspore import log as logger
  23. from .._utils import get_concat_offset
  24. from ..operations.math_ops import _infer_shape_reduce
  25. from ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
  26. from .. import signature as sig
  27. from ..._checkparam import Rel
  28. from ..._checkparam import Validator as validator
  29. from ...common import dtype as mstype
  30. from ...common._decorator import deprecated
  31. from ...common.parameter import Parameter
  32. from ...common.tensor import Tensor
  33. class _ScatterOp(PrimitiveWithInfer):
  34. """
  35. Defines Scatter operators
  36. """
  37. __mindspore_signature__ = (
  38. sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
  39. sig.make_sig('indices', dtype=sig.sig_dtype.T1),
  40. sig.make_sig('updates', dtype=sig.sig_dtype.T)
  41. )
  42. def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name):
  43. if indices_shape != [-1] and updates_shape and updates_shape != indices_shape + x_shape[1:]:
  44. raise ValueError(f"For '{prim_name}', "
  45. f"updates_shape = indices_shape + x_shape[1:], but got x_shape: {x_shape}, "
  46. f"indices_shape: {indices_shape}, updates_shape: {updates_shape}.")
  47. @prim_attr_register
  48. def __init__(self, use_locking=False):
  49. """Initialize _ScatterOp"""
  50. validator.check_value_type('use_locking', use_locking, [bool], self.name)
  51. self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])
  52. self.add_prim_attr('side_effect_mem', True)
  53. def infer_shape(self, x_shape, indices_shape, updates_shape):
  54. self._check_scatter_shape(x_shape, indices_shape, updates_shape, self.name)
  55. return x_shape
  56. def infer_dtype(self, x_dtype, indices_dtype, updates_dtype):
  57. validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32], self.name)
  58. args = {"x": x_dtype, "updates": updates_dtype}
  59. validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type, self.name)
  60. return x_dtype
  61. class _ScatterOp_Dynamic(PrimitiveWithCheck):
  62. """
  63. Defines Scatter operators with dynamic shape
  64. """
  65. __mindspore_signature__ = (
  66. sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
  67. sig.make_sig('indices', dtype=sig.sig_dtype.T1),
  68. sig.make_sig('updates', dtype=sig.sig_dtype.T)
  69. )
  70. def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name):
  71. # x_shape cannot be dynamic
  72. if np.any(np.array(x_shape) == -1):
  73. raise ValueError(f"x does not support dynamic shape")
  74. # support indices and updates dynamic
  75. if np.any(np.array(indices_shape) == -1) or np.any(np.array(updates_shape) == -1):
  76. pass
  77. elif indices_shape != [-1] and updates_shape and updates_shape != indices_shape + x_shape[1:]:
  78. raise ValueError(f"For '{prim_name}', "
  79. f"updates_shape = indices_shape + x_shape[1:], but got x_shape: {x_shape}, "
  80. f"indices_shape: {indices_shape}, updates_shape: {updates_shape}.")
  81. @prim_attr_register
  82. def __init__(self, use_locking=False):
  83. """Initialize _ScatterOp_Dynamic"""
  84. validator.check_value_type('use_locking', use_locking, [bool], self.name)
  85. self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])
  86. self.add_prim_attr('side_effect_mem', True)
  87. def check_shape(self, x_shape, indices_shape, updates_shape):
  88. self._check_scatter_shape(x_shape, indices_shape, updates_shape, self.name)
  89. def check_dtype(self, x_dtype, indices_dtype, updates_dtype):
  90. validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32], self.name)
  91. args = {"x": x_dtype, "updates": updates_dtype}
  92. validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type, self.name)
  93. class _ScatterNdOp(_ScatterOp):
  94. """
  95. Defines _ScatterNd operators
  96. """
  97. def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name):
  98. validator.check('the dimension of x', len(x_shape),
  99. 'the dimension of indices', indices_shape[-1], Rel.GE)
  100. if indices_shape[:-1] + x_shape[indices_shape[-1]:] != updates_shape:
  101. raise ValueError(f"For '{prim_name}', updates_shape = "
  102. f"indices_shape[:-1] + x_shape[indices_shape[-1]:], but got x_shape: {x_shape}, "
  103. f"indices_shape: {indices_shape}, updates_shape: {updates_shape}.")
  104. def _check_infer_attr_reduce(axis, keep_dims, prim_name):
  105. validator.check_value_type('keep_dims', keep_dims, [bool], prim_name)
  106. validator.check_value_type('axis', axis, [int, tuple], prim_name)
  107. if isinstance(axis, tuple):
  108. for index, value in enumerate(axis):
  109. validator.check_value_type('axis[%d]' % index, value, [int], prim_name)
  110. class ExpandDims(PrimitiveWithInfer):
  111. """
  112. Adds an additional dimension at the given axis.
  113. Note:
  114. If the specified axis is a negative number, the index is counted
  115. backward from the end and starts at 1.
  116. Raises:
  117. ValueError: If axis is not an integer or not in the valid range.
  118. Inputs:
  119. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  120. - **axis** (int) - Specifies the dimension index at which to expand
  121. the shape of `input_x`. The value of axis must be in the range
  122. `[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.
  123. Outputs:
  124. Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the
  125. value of `axis` is 0. It has the same type as `input_x`.
  126. Supported Platforms:
  127. ``Ascend`` ``GPU`` ``CPU``
  128. Examples:
  129. >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
  130. >>> expand_dims = ops.ExpandDims()
  131. >>> output = expand_dims(input_tensor, 0)
  132. >>> print(output)
  133. [[[2. 2.]
  134. [2. 2.]]]
  135. """
  136. @prim_attr_register
  137. def __init__(self):
  138. """Initialize ExpandDims"""
  139. self.init_prim_io_names(inputs=['x', 'axis'], outputs=['output'])
  140. def __infer__(self, x, axis):
  141. validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
  142. x_shape = list(x['shape'])
  143. axis_v = axis['value']
  144. rank = len(x_shape)
  145. validator.check_int_range(axis_v, -rank - 1, rank, Rel.INC_BOTH, 'axis', self.name)
  146. value = None
  147. if x['value'] is not None:
  148. value = x['value'].asnumpy()
  149. value = np.expand_dims(value, axis_v)
  150. value = Tensor(value)
  151. if axis_v < 0:
  152. axis_v = rank + 1 + axis_v
  153. x_shape.insert(axis_v, 1)
  154. out = {'shape': x_shape,
  155. 'dtype': x['dtype'],
  156. 'value': value}
  157. if 'min_shape' in x and 'max_shape' in x:
  158. out['min_shape'] = x['min_shape']
  159. out['min_shape'].insert(axis_v, 1)
  160. out['max_shape'] = x['max_shape']
  161. out['max_shape'].insert(axis_v, 1)
  162. return out
  163. class DType(PrimitiveWithInfer):
  164. """
  165. Returns the data type of the input tensor as mindspore.dtype.
  166. Inputs:
  167. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  168. Outputs:
  169. mindspore.dtype, the data type of a tensor.
  170. Supported Platforms:
  171. ``Ascend`` ``GPU`` ``CPU``
  172. Examples:
  173. >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
  174. >>> output = ops.DType()(input_tensor)
  175. >>> print(output)
  176. Float32
  177. """
  178. @prim_attr_register
  179. def __init__(self):
  180. """Initialize DType"""
  181. def __infer__(self, x):
  182. validator.check_subclass("input_x", x['dtype'], mstype.tensor, self.name)
  183. out = {'shape': (),
  184. 'dtype': mstype.type_type,
  185. 'value': x['dtype'].element_type()}
  186. return out
  187. class SameTypeShape(PrimitiveWithInfer):
  188. """
  189. Checks whether the data type and shape of two tensors are the same.
  190. Raises:
  191. TypeError: If the data types of two tensors are not the same.
  192. ValueError: If the shapes of two tensors are not the same.
  193. Inputs:
  194. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  195. - **input_y** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_S)`.
  196. Outputs:
  197. Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`,
  198. if data type and shape of `input_x` and `input_y` are the same.
  199. Supported Platforms:
  200. ``Ascend`` ``GPU`` ``CPU``
  201. Examples:
  202. >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
  203. >>> input_y = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
  204. >>> output = ops.SameTypeShape()(input_x, input_y)
  205. >>> print(output)
  206. [[2. 2.]
  207. [2. 2.]]
  208. """
  209. @prim_attr_register
  210. def __init__(self):
  211. """Initialize Same"""
  212. def __call__(self, x, y):
  213. """run in PyNative mode"""
  214. validator.check_value_type('x', x, Tensor, self.name)
  215. validator.check_value_type('y', y, Tensor, self.name)
  216. validator.check('x dtype', x.dtype, 'y dtype', y.dtype, Rel.EQ, self.name, TypeError)
  217. validator.check('x shape', x.shape, 'y shape', y.shape, Rel.EQ, self.name)
  218. return x
  219. def __infer__(self, x, y):
  220. validator.check_subclass('x', x['dtype'], mstype.tensor, self.name)
  221. validator.check_subclass('y', y['dtype'], mstype.tensor, self.name)
  222. validator.check('x dtype', x['dtype'], 'y dtype', y['dtype'], Rel.EQ, self.name, TypeError)
  223. validator.check('x shape', x['shape'], 'y shape', y['shape'], Rel.EQ, self.name)
  224. return x
  225. class Cast(PrimitiveWithInfer):
  226. """
  227. Returns a tensor with the new specified data type.
  228. Inputs:
  229. - **input_x** (Union[Tensor, Number]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  230. The tensor to be cast.
  231. - **type** (dtype.Number) - The valid data type of the output tensor. Only constant value is allowed.
  232. Outputs:
  233. Tensor, the shape of tensor is the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.
  234. Supported Platforms:
  235. ``Ascend`` ``GPU`` ``CPU``
  236. Examples:
  237. >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
  238. >>> input_x = Tensor(input_np)
  239. >>> type_dst = mindspore.int32
  240. >>> cast = ops.Cast()
  241. >>> output = cast(input_x, type_dst)
  242. >>> print(output.dtype)
  243. Int32
  244. >>> print(output.shape)
  245. (2, 3, 4, 5)
  246. """
  247. @prim_attr_register
  248. def __init__(self):
  249. # if primitive need setattr in __infer__ need add this flag
  250. """Initialize Cast"""
  251. self.init_prim_io_names(inputs=['x', 'dst_type'], outputs=['output'])
  252. def check_elim(self, x, dtype):
  253. if isinstance(x, (Tensor, numbers.Number, Parameter)):
  254. if isinstance(x, Tensor) and x.dtype == dtype:
  255. return (True, x)
  256. if isinstance(x, numbers.Number):
  257. return (True, Tensor(x, dtype=dtype))
  258. if isinstance(x, Parameter):
  259. data = x.data
  260. if data.dtype == dtype:
  261. return (True, x)
  262. return (False, None)
  263. def __infer__(self, x, t):
  264. src_type = x['dtype']
  265. dst_type = t['value']
  266. validator.check_subclass("input_x", src_type, [mstype.tensor, mstype.number], self.name)
  267. validator.check_subclass("type", dst_type, mstype.number, self.name)
  268. if isinstance(src_type, type(mstype.tensor)):
  269. src_type = x['dtype'].element_type()
  270. if isinstance(dst_type, type(mstype.tensor)):
  271. dst_type = dst_type.element_type()
  272. self.add_prim_attr('DstT', dst_type)
  273. self.add_prim_attr('SrcT', src_type)
  274. self.add_prim_attr('dst_type', dst_type)
  275. value = None
  276. if x['value'] is not None:
  277. np_dst_type = mstype.dtype_to_nptype(dst_type)
  278. if isinstance(x['value'], (int, float)):
  279. value = Tensor(np.array(x['value']).astype(np_dst_type))
  280. else:
  281. value = Tensor(x['value'].asnumpy().astype(np_dst_type))
  282. out = {'shape': x['shape'],
  283. 'dtype': mstype.tensor_type(t['value']),
  284. 'value': value}
  285. if 'min_shape' in x and 'max_shape' in x:
  286. out['min_shape'] = x['min_shape']
  287. out['max_shape'] = x['max_shape']
  288. return out
  289. class IsSubClass(PrimitiveWithInfer):
  290. """
  291. Checks whether this type is a sub-class of another type.
  292. Inputs:
  293. - **sub_type** (mindspore.dtype) - The type to be checked. Only constant value is allowed.
  294. - **type_** (mindspore.dtype) - The target type. Only constant value is allowed.
  295. Outputs:
  296. bool, the check result.
  297. Supported Platforms:
  298. ``Ascend`` ``GPU`` ``CPU``
  299. Examples:
  300. >>> output = ops.IsSubClass()(mindspore.int32, mindspore.intc)
  301. >>> print(output)
  302. True
  303. """
  304. @prim_attr_register
  305. def __init__(self):
  306. pass
  307. def __infer__(self, sub_type, type_):
  308. sub_type_t = sub_type['value']
  309. type_v = type_['value']
  310. validator.check_value_type("sub_type", sub_type_t, [mstype.Type], self.name)
  311. validator.check_value_type("type_", type_v, [mstype.Type], self.name)
  312. value = mstype.issubclass_(sub_type_t, type_v)
  313. out = {'shape': (),
  314. 'dtype': mstype.type_type,
  315. 'value': value}
  316. return out
  317. class IsInstance(PrimitiveWithInfer):
  318. """
  319. Checks whether an object is an instance of a target type.
  320. Inputs:
  321. - **inst** (Any Object) - The instance to be checked. Only constant value is allowed.
  322. - **type_** (mindspore.dtype) - The target type. Only constant value is allowed.
  323. Outputs:
  324. bool, the check result.
  325. Supported Platforms:
  326. ``Ascend`` ``GPU`` ``CPU``
  327. Examples:
  328. >>> a = 1
  329. >>> output = ops.IsInstance()(a, mindspore.int32)
  330. >>> print(output)
  331. False
  332. """
  333. @prim_attr_register
  334. def __init__(self):
  335. pass
  336. def __infer__(self, inst, type_):
  337. sub_type_t = inst['dtype']
  338. type_v = type_['value']
  339. validator.check_value_type("type_", type_v, [mstype.Type], self.name)
  340. if type_v == mstype.list_:
  341. value = isinstance(sub_type_t, list)
  342. elif type_v == mstype.tuple_:
  343. value = isinstance(sub_type_t, tuple)
  344. else:
  345. value = mstype.issubclass_(sub_type_t, type_v)
  346. out = {'shape': (),
  347. 'dtype': mstype.type_type,
  348. 'value': value}
  349. return out
  350. class Reshape(PrimitiveWithInfer):
  351. """
  352. Reshapes the input tensor with the same values based on a given shape tuple.
  353. Raises:
  354. ValueError: Given a shape tuple, if it has several -1; or if the product
  355. of its elements is less than or equal to 0 or cannot be divided by the product
  356. of the input tensor shape; or if it does not match the input's array size.
  357. Inputs:
  358. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  359. - **input_shape** (tuple[int]) - The input tuple is constructed by multiple
  360. integers, i.e., :math:`(y_1, y_2, ..., y_S)`. Only constant value is allowed.
  361. Outputs:
  362. Tensor, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
  363. Supported Platforms:
  364. ``Ascend`` ``GPU`` ``CPU``
  365. Examples:
  366. >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
  367. >>> reshape = ops.Reshape()
  368. >>> output = reshape(input_tensor, (3, 2))
  369. >>> print(output)
  370. [[-0.1 0.3]
  371. [ 3.6 0.4]
  372. [ 0.5 -3.2]]
  373. """
  374. @prim_attr_register
  375. def __init__(self):
  376. """Initialize Reshape"""
  377. self.init_prim_io_names(inputs=['tensor', 'shape'], outputs=['output'])
  378. def __infer__(self, x, shape):
  379. shape_v = shape['value']
  380. x_shp = x['shape']
  381. validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
  382. validator.check_value_type("shape", shape_v, [tuple], self.name)
  383. shape_v = list(shape_v)
  384. neg_index = -1
  385. dim_prod = 1
  386. for i, shp_i in enumerate(shape_v):
  387. validator.check_value_type("shape[%d]" % i, shp_i, [int], self.name)
  388. if shp_i == -1:
  389. if neg_index != -1:
  390. raise ValueError(f'The shape can only has one -1 at most, but {shape_v}.')
  391. neg_index = i
  392. else:
  393. dim_prod *= shp_i
  394. arr_prod = np.prod(x_shp)
  395. if arr_prod <= 0:
  396. if 'max_shape' in x:
  397. x_max_shape = x['max_shape']
  398. else:
  399. x_max_shape = x['shape']
  400. if 'min_shape' in x:
  401. x_min_shape = x['min_shape']
  402. else:
  403. x_min_shape = x['shape']
  404. max_arr_prod = np.prod(x_max_shape)
  405. min_arr_prod = np.prod(x_min_shape)
  406. max_shape = list(shape_v)
  407. min_shape = list(shape_v)
  408. if neg_index != -1:
  409. max_shape[neg_index] = int(max_arr_prod / dim_prod)
  410. min_shape[neg_index] = int(min_arr_prod / dim_prod)
  411. else:
  412. raise ValueError(f'For dynamic shape, Reshape must have neg index')
  413. out = {'shape': shape['value'],
  414. 'dtype': x['dtype'],
  415. 'value': None,
  416. 'max_shape': tuple(max_shape),
  417. 'min_shape': tuple(min_shape)}
  418. else:
  419. if dim_prod <= 0 or arr_prod % dim_prod != 0:
  420. raise ValueError(f'For \'{self.name}\' input_x\'s shape is {x_shp}, input_shape\'s value is {shape_v}.'
  421. f'The product of input_x\'s shape should > 0, '
  422. f'and can be divided by product of input_shape, but '
  423. f'product of input_x\'s shape is {arr_prod}, product of input_shape is {dim_prod}.')
  424. if neg_index != -1:
  425. shape_v[neg_index] = int(arr_prod / dim_prod)
  426. dim_prod *= shape_v[neg_index]
  427. if dim_prod != arr_prod:
  428. raise ValueError(f'For \'{self.name}\' input_x\'s shape is {x_shp}, input_shape\'s value is {shape_v}.'
  429. f'The product of input_x\'s shape should be equal to product of input_shape, but '
  430. f'product of input_x\'s shape is {arr_prod}, product of input_shape is {dim_prod}.')
  431. value = None
  432. if x['value'] is not None:
  433. value = Tensor(x['value'].asnumpy().reshape(shape_v))
  434. out = {'shape': tuple(shape_v),
  435. 'dtype': x['dtype'],
  436. 'value': value}
  437. return out
  438. class Shape(PrimitiveWithInfer):
  439. """
  440. Returns the shape of the input tensor.
  441. Inputs:
  442. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  443. Outputs:
  444. tuple[int], the output tuple is constructed by multiple integers,
  445. :math:`(x_1, x_2, ..., x_R)`.
  446. Supported Platforms:
  447. ``Ascend`` ``GPU`` ``CPU``
  448. Examples:
  449. >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
  450. >>> shape = ops.Shape()
  451. >>> output = shape(input_tensor)
  452. >>> print(output)
  453. (3, 2, 1)
  454. """
  455. @prim_attr_register
  456. def __init__(self):
  457. """Initialize Shape"""
  458. def __infer__(self, x):
  459. validator.check_subclass("input_x", x['dtype'], mstype.tensor, self.name)
  460. out = {'shape': (),
  461. 'dtype': mstype.tuple_,
  462. 'value': tuple(x['shape'])}
  463. return out
  464. class DynamicShape(Primitive):
  465. """
  466. Returns the shape of the input tensor.
  467. Inputs:
  468. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  469. Outputs:
  470. Tensor[int], 1-dim Tensor of type int32
  471. Supported Platforms:
  472. ``Ascend`` ``GPU`` ``CPU``
  473. Examples:
  474. >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
  475. >>> shape = ops.DynamicShape()
  476. >>> output = shape(input_tensor)
  477. >>> print(output)
  478. [3 2 1]
  479. """
  480. @prim_attr_register
  481. def __init__(self):
  482. """init Shape"""
  483. self.init_prim_io_names(inputs=['tensor'], outputs=['output'])
  484. self.add_prim_attr('is_dynamic_shape', True)
  485. self.add_prim_attr("dynamic_shape_depends", [0])
  486. class Squeeze(PrimitiveWithInfer):
  487. """
  488. Returns a tensor with the same type but dimensions of 1 are removed based on `axis`.
  489. Note:
  490. The dimension index starts at 0 and must be in the range `[-input.ndim, input.ndim`.
  491. Raises:
  492. ValueError: If the corresponding dimension of the specified axis does not equal to 1.
  493. Args:
  494. axis (Union[int, tuple(int)]): Specifies the dimension indexes of shape to be removed, which will remove
  495. all the dimensions that are equal to 1. If specified, it must be int32 or int64.
  496. Default: (), an empty tuple.
  497. Inputs:
  498. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  499. Outputs:
  500. Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_S)`.
  501. Raises:
  502. TypeError: If `axis` is neither an int nor tuple.
  503. TypeError: If `axis` is a tuple whose elements are not all int.
  504. Supported Platforms:
  505. ``Ascend`` ``GPU``
  506. Examples:
  507. >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
  508. >>> squeeze = ops.Squeeze(2)
  509. >>> output = squeeze(input_tensor)
  510. >>> print(output)
  511. [[1. 1.]
  512. [1. 1.]
  513. [1. 1.]]
  514. """
  515. @prim_attr_register
  516. def __init__(self, axis=()):
  517. """Initialize Squeeze"""
  518. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  519. validator.check_value_type('axis', axis, [int, tuple], self.name)
  520. if isinstance(axis, tuple):
  521. for idx, item in enumerate(axis):
  522. validator.check_value_type("axis[%d]" % idx, item, [int], self.name)
  523. else:
  524. self.axis = (axis,)
  525. self.add_prim_attr("axis", (axis,))
  526. def infer_shape(self, x_shape):
  527. axis = self.axis
  528. x_shape = list(x_shape)
  529. ndim = len(x_shape)
  530. if not axis:
  531. ret = [d for d in x_shape if d != 1]
  532. else:
  533. for a in axis:
  534. validator.check_int_range(a, -ndim, ndim - 1, Rel.INC_BOTH, 'axis or its elements', self.name)
  535. if x_shape[a] != 1:
  536. raise ValueError('Cannot select an axis to squeeze out which has size not equal to one.')
  537. ret = [x_shape[i] for i in range(ndim) if not (i in axis or (i - ndim) in axis)]
  538. return ret
  539. def infer_dtype(self, x_dtype):
  540. validator.check_subclass("x", x_dtype, mstype.tensor, self.name)
  541. return x_dtype
  542. class Transpose(PrimitiveWithInfer):
  543. """
  544. Permutes the dimensions of the input tensor according to input permutation.
  545. Inputs:
  546. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  547. - **input_perm** (tuple[int]) - The permutation to be converted. The input tuple is constructed by multiple
  548. indexes. The length of `input_perm` and the shape of `input_x` must be the same. Only constant value is
  549. allowed. Must be in the range [0, rank(input_x)).
  550. Outputs:
  551. Tensor, the type of output tensor is the same as `input_x` and the shape of output tensor is decided by the
  552. shape of `input_x` and the value of `input_perm`.
  553. Supported Platforms:
  554. ``Ascend`` ``GPU`` ``CPU``
  555. Examples:
  556. >>> input_tensor = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
  557. >>> perm = (0, 2, 1)
  558. >>> transpose = ops.Transpose()
  559. >>> output = transpose(input_tensor, perm)
  560. >>> print(output)
  561. [[[ 1. 4.]
  562. [ 2. 5.]
  563. [ 3. 6.]]
  564. [[ 7. 10.]
  565. [ 8. 11.]
  566. [ 9. 12.]]]
  567. """
  568. @prim_attr_register
  569. def __init__(self):
  570. """Initialize Transpose"""
  571. self.init_prim_io_names(inputs=['x', 'perm'], outputs=['output'])
  572. def __infer__(self, x, perm):
  573. x_shape = x['shape']
  574. p_value = perm['value']
  575. x_type = x['dtype']
  576. validator.check_value_type("p_value", p_value, [tuple], self.name)
  577. validator.check_subclass("x_type", x_type, mstype.tensor, self.name)
  578. if len(x_shape) != len(p_value):
  579. raise ValueError('The dimension of x and perm must be equal.')
  580. tmp = list(p_value)
  581. for i, dim in enumerate(p_value):
  582. validator.check_int(dim, 0, Rel.GE, f'perm[{i}]', self.name)
  583. validator.check_int(dim, len(p_value), Rel.LT, f'perm[{i}]', self.name)
  584. tmp.remove(dim)
  585. if dim in tmp:
  586. raise ValueError('The value of perm is wrong.')
  587. out_shapes = []
  588. for i in p_value:
  589. out_shapes.append(x_shape[i])
  590. out = {'shape': tuple(out_shapes),
  591. 'dtype': x['dtype'],
  592. 'value': None}
  593. if 'min_shape' in x and 'max_shape' in x:
  594. min_vec = []
  595. max_vec = []
  596. for i in p_value:
  597. min_vec.append(x['min_shape'][i])
  598. max_vec.append(x['max_shape'][i])
  599. out['min_shape'] = tuple(min_vec)
  600. out['max_shape'] = tuple(max_vec)
  601. return out
  602. class Unique(Primitive):
  603. """
  604. Returns the unique elements of input tensor and also return a tensor containing the index of each value of input
  605. tensor corresponding to the output unique tensor.
  606. Inputs:
  607. - **x** (Tensor) - The input tensor.
  608. Outputs:
  609. Tuple, containing Tensor objects `(y, idx), `y` is a tensor with the
  610. same type as `x`, and contains the unique elements in `x`, sorted in
  611. ascending order. `idx` is a tensor containing indices of elements in
  612. the input corresponding to the output tensor.
  613. Supported Platforms:
  614. ``Ascend`` ``GPU`` ``CPU``
  615. Examples:
  616. >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
  617. >>> output = ops.Unique()(x)
  618. >>> print(output)
  619. (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
  620. >>>
  621. >>> # note that for GPU, this operator must be wrapped inside a model, and executed in graph mode.
  622. >>> class UniqueNet(nn.Cell):
  623. ... def __init__(self):
  624. ... super(UniqueNet, self).__init__()
  625. ... self.unique_op = ops.Unique()
  626. ...
  627. ... def construct(self, x):
  628. ... output, indices = self.unique_op(x)
  629. ... return output, indices
  630. ...
  631. >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
  632. >>> net = UniqueNet()
  633. >>> output = net(x)
  634. >>> print(output)
  635. (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
  636. """
  637. @prim_attr_register
  638. def __init__(self):
  639. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  640. class Gather(PrimitiveWithCheck):
  641. """
  642. Returns a slice of the input tensor based on the specified indices and axis.
  643. Inputs:
  644. - **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  645. The original Tensor.
  646. - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
  647. Specifies the indices of elements of the original Tensor. Must be in the range
  648. `[0, input_param.shape[axis])`.
  649. - **axis** (int) - Specifies the dimension index to gather indices.
  650. Outputs:
  651. Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.
  652. Supported Platforms:
  653. ``Ascend`` ``GPU``
  654. Examples:
  655. >>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)
  656. >>> input_indices = Tensor(np.array([1, 2]), mindspore.int32)
  657. >>> axis = 1
  658. >>> output = ops.Gather()(input_params, input_indices, axis)
  659. >>> print(output)
  660. [[ 2. 7.]
  661. [ 4. 54.]
  662. [ 2. 55.]]
  663. """
  664. @prim_attr_register
  665. def __init__(self):
  666. """Initialize index_select"""
  667. self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])
  668. self.add_prim_attr("dynamic_shape_depends", [2])
  669. def __check__(self, params, indices, axis):
  670. validator.check_subclass("params", params['dtype'], mstype.tensor, self.name)
  671. validator.check_tensor_dtype_valid("indices", indices['dtype'], mstype.int_type, self.name)
  672. validator.check_subclass("axis", axis['dtype'], [mstype.number], self.name)
  673. axis_v = axis['value']
  674. validator.check_value_type('axis', axis_v, [int], self.name)
  675. rank = len(params['shape'])
  676. validator.check_int_range(axis_v, -rank, rank, Rel.INC_LEFT, "axis", self.name)
  677. class GatherV2(PrimitiveWithCheck):
  678. """
  679. Same as operator Gather. GatherV2 will be deprecated in the future.
  680. Please use Gather instead.
  681. """
  682. # deprecate_new_name = "Gather"
  683. @deprecated("1.1", "Gather", True)
  684. @prim_attr_register
  685. def __init__(self):
  686. """Initialize index_select"""
  687. self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])
  688. self.add_prim_attr("dynamic_shape_depends", [2])
  689. def __check__(self, params, indices, axis):
  690. validator.check_subclass("params", params['dtype'], mstype.tensor, self.name)
  691. validator.check_tensor_dtype_valid("indices", indices['dtype'], mstype.int_type, self.name)
  692. validator.check_subclass("axis", axis['dtype'], [mstype.number], self.name)
  693. axis_v = axis['value']
  694. validator.check_value_type('axis', axis_v, [int], self.name)
  695. rank = len(params['shape'])
  696. validator.check_int_range(axis_v, -rank, rank, Rel.INC_LEFT, "axis", self.name)
  697. class SparseGatherV2(Gather):
  698. """
  699. Returns a slice of input tensor based on the specified indices and axis.
  700. Inputs:
  701. - **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  702. The original Tensor.
  703. - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
  704. Specifies the indices of elements of the original Tensor, must be in the range
  705. `[0, input_param.shape[axis])`.
  706. - **axis** (int) - Specifies the dimension index to gather indices.
  707. Outputs:
  708. Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.
  709. Supported Platforms:
  710. ``Ascend`` ``GPU``
  711. Examples:
  712. >>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)
  713. >>> input_indices = Tensor(np.array([1, 2]), mindspore.int32)
  714. >>> axis = 1
  715. >>> out = ops.SparseGatherV2()(input_params, input_indices, axis)
  716. >>> print(out)
  717. [[2. 7.]
  718. [4. 54.]
  719. [2. 55.]]
  720. """
  721. class Padding(PrimitiveWithInfer):
  722. """
  723. Extends the last dimension of the input tensor from 1 to pad_dim_size, by filling with 0.
  724. Args:
  725. pad_dim_size (int): The value of the last dimension of x to be extended, which must be positive.
  726. Inputs:
  727. - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The rank of x must be at least 2.
  728. The last dimension of x must be 1.
  729. Outputs:
  730. Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.
  731. Supported Platforms:
  732. ``Ascend``
  733. Examples:
  734. >>> x = Tensor(np.array([[8], [10]]), mindspore.float32)
  735. >>> pad_dim_size = 4
  736. >>> output = ops.Padding(pad_dim_size)(x)
  737. >>> print(output)
  738. [[ 8. 0. 0. 0.]
  739. [10. 0. 0. 0.]]
  740. """
  741. @prim_attr_register
  742. def __init__(self, pad_dim_size=8):
  743. """Initialize padding"""
  744. validator.check_value_type("pad_dim_size", pad_dim_size, [int], self.name)
  745. validator.check_positive_int(pad_dim_size, "pad_dim_size", self.name)
  746. self.pad_dim_size = pad_dim_size
  747. def __infer__(self, x):
  748. validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
  749. x_shape = list(x['shape'])
  750. validator.check_int(len(x_shape), 1, Rel.GT, "rank of x", self.name)
  751. validator.check_int(x_shape[-1], 1, Rel.EQ, "last dim of x", self.name)
  752. out_shape = x_shape
  753. out_shape[-1] = self.pad_dim_size
  754. out = {'shape': out_shape,
  755. 'dtype': x['dtype'],
  756. 'value': None}
  757. return out
  758. class UniqueWithPad(PrimitiveWithInfer):
  759. """
  760. Returns unique elements and relative indexes in 1-D tensor, filled with padding num.
  761. Inputs:
  762. - **x** (Tensor) - The tensor need to be unique. Must be 1-D vector with types: int32, int64.
  763. - **pad_num** (int) - Pad num.
  764. Outputs:
  765. tuple(Tensor), tuple of 2 tensors, y and idx.
  766. - y (Tensor) - The unique elements filled with pad_num, the shape and type same as x.
  767. - idx (Tensor) - The index of each value of x in the unique output y, the shape and type same as x.
  768. Supported Platforms:
  769. ``Ascend`` ``CPU``
  770. Examples:
  771. >>> x = Tensor(np.array([1, 1, 5, 5, 4, 4, 3, 3, 2, 2,]), mindspore.int32)
  772. >>> pad_num = 8
  773. >>> output = ops.UniqueWithPad()(x, pad_num)
  774. >>> print(output)
  775. (Tensor(shape=[10], dtype=Int32, value= [1, 5, 4, 3, 2, 8, 8, 8, 8, 8]),
  776. Tensor(shape=[10], dtype=Int32, value= [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]))
  777. """
  778. @prim_attr_register
  779. def __init__(self):
  780. """init UniqueWithPad"""
  781. def __infer__(self, x, pad_num):
  782. validator.check_tensor_dtype_valid("x", x['dtype'], [mstype.int32, mstype.int64], self.name)
  783. validator.check_subclass("pad_num", pad_num['dtype'], [mstype.int32, mstype.int64], self.name)
  784. x_shape = list(x['shape'])
  785. validator.check("rank of x", len(x_shape), "expected", 1, Rel.EQ, self.name)
  786. out_shape = x_shape
  787. out = {'shape': (out_shape, out_shape),
  788. 'dtype': (x['dtype'], x['dtype']),
  789. 'value': None}
  790. return out
  791. class Split(PrimitiveWithCheck):
  792. """
  793. Splits the input tensor into output_num of tensors along the given axis and output numbers.
  794. Args:
  795. axis (int): Index of the split position. Default: 0.
  796. output_num (int): The number of output tensors. Must be positive int. Default: 1.
  797. Raises:
  798. ValueError: If `axis` is out of the range [-len(`input_x.shape`), len(`input_x.shape`)),
  799. or if the `output_num` is less than or equal to 0.
  800. Inputs:
  801. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  802. Outputs:
  803. tuple[Tensor], the shape of each output tensor is the same, which is
  804. :math:`(y_1, y_2, ..., y_S)`.
  805. Supported Platforms:
  806. ``Ascend`` ``GPU`` ``CPU``
  807. Examples:
  808. >>> split = ops.Split(1, 2)
  809. >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32)
  810. >>> output = split(x)
  811. >>> print(output)
  812. (Tensor(shape=[2, 2], dtype=Int32, value=
  813. [[1, 1],
  814. [2, 2]]), Tensor(shape=[2, 2], dtype=Int32, value=
  815. [[1, 1],
  816. [2, 2]]))
  817. """
  818. @prim_attr_register
  819. def __init__(self, axis=0, output_num=1):
  820. """Initialize Split"""
  821. validator.check_value_type("axis", axis, [int], self.name)
  822. validator.check_value_type("output_num", output_num, [int], self.name)
  823. validator.check_positive_int(output_num, "output_num", self.name)
  824. self.axis = axis
  825. self.output_num = output_num
  826. def __check__(self, x):
  827. validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
  828. x_shape = list(x['shape'])
  829. dim = len(x_shape)
  830. validator.check_int_range(self.axis, -dim, dim, Rel.INC_LEFT, 'axis value', self.name)
  831. if -1 not in x_shape:
  832. # only validate when shape fully known
  833. output_valid_check = x_shape[self.axis] % self.output_num
  834. if output_valid_check != 0:
  835. raise ValueError(f"x_shape[{self.axis}] {x_shape[self.axis]} must be divide exactly by"
  836. f" output_num {self.output_num}")
  837. size_splits = [x_shape[self.axis] // self.output_num] * self.output_num
  838. self.add_prim_attr('size_splits', size_splits)
  839. class Rank(PrimitiveWithInfer):
  840. """
  841. Returns the rank of a tensor.
  842. Returns a 0-D int32 Tensor representing the rank of input; the rank of a tensor
  843. is the number of indices required to uniquely select each element of the tensor.
  844. Inputs:
  845. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  846. Outputs:
  847. Tensor. 0-D int32 Tensor representing the rank of input, i.e., :math:`R`.
  848. Supported Platforms:
  849. ``Ascend`` ``GPU`` ``CPU``
  850. Examples:
  851. >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
  852. >>> rank = ops.Rank()
  853. >>> output = rank(input_tensor)
  854. >>> print(output)
  855. 2
  856. """
  857. @prim_attr_register
  858. def __init__(self):
  859. """Initialize Rank"""
  860. def __infer__(self, x):
  861. validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
  862. out = {'shape': None,
  863. 'dtype': None,
  864. 'value': len(x['shape'])}
  865. return out
  866. class TruncatedNormal(PrimitiveWithInfer):
  867. """
  868. Returns a tensor of the specified shape filled with truncated normal values.
  869. The generated values follow a normal distribution.
  870. Args:
  871. seed (int): A integer number used to create random seed. Default: 0.
  872. dtype (:class:`mindspore.dtype`): Data type. Default: mindspore.float32.
  873. Inputs:
  874. - **shape** (tuple[int]) - The shape of the output tensor, is a tuple of positive integer.
  875. Outputs:
  876. Tensor, the data type of output tensor is the same as attribute `dtype`.
  877. Examples:
  878. >>> shape = (1, 2, 3)
  879. >>> truncated_normal = ops.TruncatedNormal()
  880. >>> output = truncated_normal(shape)
  881. """
  882. @prim_attr_register
  883. def __init__(self, seed=0, dtype=mstype.float32):
  884. """Initialize TruncatedNormal"""
  885. validator.check_value_type('seed', seed, [int], self.name)
  886. validator.check_types_same_and_valid({'dtype': dtype}, mstype.number_type, self.name)
  887. def __infer__(self, shape):
  888. shape_value = shape['value']
  889. validator.check_value_type("shape", shape_value, [tuple], self.name)
  890. for i, value in enumerate(shape_value):
  891. validator.check_positive_int(value, f'{i}th value of shape', self.name)
  892. out = {'shape': shape_value,
  893. 'dtype': mstype.tensor_type(self.dtype),
  894. 'value': None}
  895. return out
  896. class Size(PrimitiveWithInfer):
  897. r"""
  898. Returns the size of a tensor.
  899. Returns an int scalar representing the elements size of input, the total number of elements in the tensor.
  900. Inputs:
  901. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  902. Outputs:
  903. int, a scalar representing the elements size of `input_x`, tensor is the number of elements
  904. in a tensor, :math:`size=x_1*x_2*...x_R`.
  905. Supported Platforms:
  906. ``Ascend`` ``GPU`` ``CPU``
  907. Examples:
  908. >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
  909. >>> size = ops.Size()
  910. >>> output = size(input_tensor)
  911. >>> print(output)
  912. 4
  913. """
  914. @prim_attr_register
  915. def __init__(self):
  916. """Initialize Size"""
  917. def __infer__(self, x):
  918. size = 1
  919. validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
  920. shp = x['shape']
  921. if not shp:
  922. size = 0
  923. else:
  924. size = functools.reduce(lambda x, y: x * y, x['shape'])
  925. out = {'shape': None,
  926. 'dtype': mstype.int32,
  927. 'value': size}
  928. return out
  929. class Fill(PrimitiveWithInfer):
  930. """
  931. Creates a tensor filled with a scalar value.
  932. Creates a tensor with shape described by the first argument and fills it with values in the second argument.
  933. Inputs:
  934. - **type** (mindspore.dtype) - The specified type of output tensor. Only constant value is allowed.
  935. - **shape** (tuple) - The specified shape of output tensor. Only constant value is allowed.
  936. - **value** (scalar) - Value to fill the returned tensor. Only constant value is allowed.
  937. Outputs:
  938. Tensor, has the same type and shape as input value.
  939. Supported Platforms:
  940. ``Ascend`` ``GPU`` ``CPU``
  941. Examples:
  942. >>> fill = ops.Fill()
  943. >>> output = fill(mindspore.float32, (2, 2), 1)
  944. >>> print(output)
  945. [[1. 1.]
  946. [1. 1.]]
  947. """
  948. @prim_attr_register
  949. def __init__(self):
  950. """Initialize Fill"""
  951. def __infer__(self, dtype, dims, x):
  952. validator.check_value_type("shape", dims['value'], [tuple], self.name)
  953. validator.check_value_type("value", x['value'], [numbers.Number, bool], self.name)
  954. for i, item in enumerate(dims['value']):
  955. validator.check_positive_int(item, f'dims[{i}]', self.name)
  956. valid_dtypes = [mstype.bool_, mstype.int8, mstype.int16, mstype.int32, mstype.int64,
  957. mstype.uint8, mstype.uint16, mstype.uint32, mstype.uint64,
  958. mstype.float16, mstype.float32, mstype.float64]
  959. validator.check_types_same_and_valid({"value": dtype['value']}, valid_dtypes, self.name)
  960. x_nptype = mstype.dtype_to_nptype(dtype['value'])
  961. ret = np.full(dims['value'], x['value'], x_nptype)
  962. out = {
  963. 'value': Tensor(ret),
  964. 'shape': dims['value'],
  965. 'dtype': x['dtype'],
  966. }
  967. return out
  968. class Ones(PrimitiveWithInfer):
  969. r"""
  970. Creates a tensor filled with value ones.
  971. Creates a tensor with shape described by the first argument and
  972. fills it with value ones in type of the second argument.
  973. Inputs:
  974. - **shape** (Union[tuple[int], int]) - The specified shape of output tensor.
  975. Only constant positive int is allowed.
  976. - **type** (mindspore.dtype) - The specified type of output tensor. Only constant value is allowed.
  977. Outputs:
  978. Tensor, has the same type and shape as input shape value.
  979. Supported Platforms:
  980. ``Ascend`` ``GPU`` ``CPU``
  981. Examples:
  982. >>> from mindspore.ops import operations as ops
  983. >>> ones = ops.Ones()
  984. >>> output = ones((2, 2), mindspore.float32)
  985. >>> print(output)
  986. [[1. 1.]
  987. [1. 1.]]
  988. """
  989. @prim_attr_register
  990. def __init__(self):
  991. """Initialize Ones"""
  992. def __infer__(self, dims, dtype):
  993. if isinstance(dims['value'], int):
  994. shape = (dims['value'],)
  995. else:
  996. shape = dims['value']
  997. validator.check_value_type("shape", shape, [tuple], self.name)
  998. for i, item in enumerate(shape):
  999. validator.check_non_negative_int(item, shape[i], self.name)
  1000. valid_types = [mstype.bool_, mstype.int8, mstype.int16, mstype.int32, mstype.int64,
  1001. mstype.uint8, mstype.uint16, mstype.uint32, mstype.uint64,
  1002. mstype.float16, mstype.float32, mstype.float64]
  1003. validator.check_types_same_and_valid({"value": dtype['value']}, valid_types, self.name)
  1004. x_nptype = mstype.dtype_to_nptype(dtype['value'])
  1005. ret = np.ones(shape, x_nptype)
  1006. out = {
  1007. 'value': Tensor(ret),
  1008. 'shape': shape,
  1009. 'dtype': x_nptype,
  1010. }
  1011. return out
  1012. class Zeros(PrimitiveWithInfer):
  1013. r"""
  1014. Creates a tensor filled with value zeros.
  1015. Creates a tensor with shape described by the first argument and
  1016. fills it with value zeros in type of the second argument.
  1017. Inputs:
  1018. - **shape** (Union[tuple[int], int]) - The specified shape of output tensor.
  1019. Only constant positive int is allowed.
  1020. - **type** (mindspore.dtype) - The specified type of output tensor. Only constant value is allowed.
  1021. Outputs:
  1022. Tensor, has the same type and shape as input shape value.
  1023. Supported Platforms:
  1024. ``Ascend`` ``GPU`` ``CPU``
  1025. Examples:
  1026. >>> from mindspore.ops import operations as ops
  1027. >>> zeros = ops.Zeros()
  1028. >>> output = zeros((2, 2), mindspore.float32)
  1029. >>> print(output)
  1030. [[0. 0.]
  1031. [0. 0.]]
  1032. """
  1033. @prim_attr_register
  1034. def __init__(self):
  1035. """Initialize Zeros"""
  1036. def __infer__(self, dims, dtype):
  1037. if isinstance(dims['value'], int):
  1038. shape = (dims['value'],)
  1039. else:
  1040. shape = dims['value']
  1041. validator.check_value_type("shape", shape, [tuple], self.name)
  1042. for i, item in enumerate(shape):
  1043. validator.check_non_negative_int(item, shape[i], self.name)
  1044. valid_types = [mstype.bool_, mstype.int8, mstype.int16, mstype.int32, mstype.int64,
  1045. mstype.uint8, mstype.uint16, mstype.uint32, mstype.uint64,
  1046. mstype.float16, mstype.float32, mstype.float64]
  1047. validator.check_types_same_and_valid({"value": dtype['value']}, valid_types, self.name)
  1048. x_nptype = mstype.dtype_to_nptype(dtype['value'])
  1049. ret = np.zeros(shape, x_nptype)
  1050. out = {
  1051. 'value': Tensor(ret),
  1052. 'shape': shape,
  1053. 'dtype': x_nptype,
  1054. }
  1055. return out
  1056. class OnesLike(PrimitiveWithInfer):
  1057. """
  1058. Creates a new tensor. The values of all elements are 1.
  1059. Returns a tensor of ones with the same shape and type as the input.
  1060. Inputs:
  1061. - **input_x** (Tensor) - Input tensor.
  1062. Outputs:
  1063. Tensor, has the same shape and type as `input_x` but filled with ones.
  1064. Supported Platforms:
  1065. ``Ascend`` ``GPU`` ``CPU``
  1066. Examples:
  1067. >>> oneslike = ops.OnesLike()
  1068. >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
  1069. >>> output = oneslike(x)
  1070. >>> print(output)
  1071. [[1 1]
  1072. [1 1]]
  1073. """
  1074. @prim_attr_register
  1075. def __init__(self):
  1076. """Initialize OnesLike"""
  1077. def infer_shape(self, x_shape):
  1078. return x_shape
  1079. def infer_dtype(self, x_dtype):
  1080. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type + (mstype.bool_,), self.name)
  1081. return x_dtype
  1082. class ZerosLike(PrimitiveWithCheck):
  1083. """
  1084. Creates a new tensor. All elements value are 0.
  1085. Returns a tensor of zeros with the same shape and data type as the input tensor.
  1086. Inputs:
  1087. - **input_x** (Tensor) - Input tensor.
  1088. Outputs:
  1089. Tensor, has the same shape and data type as `input_x` but filled with zeros.
  1090. Supported Platforms:
  1091. ``Ascend`` ``GPU`` ``CPU``
  1092. Examples:
  1093. >>> zeroslike = ops.ZerosLike()
  1094. >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
  1095. >>> output = zeroslike(x)
  1096. >>> print(output)
  1097. [[0. 0.]
  1098. [0. 0.]]
  1099. """
  1100. @prim_attr_register
  1101. def __init__(self):
  1102. """Initialize ZerosLike"""
  1103. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1104. def check_dtype(self, x_dtype):
  1105. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type + (mstype.bool_,), self.name)
  1106. class TupleToArray(PrimitiveWithInfer):
  1107. """
  1108. Converts a tuple to a tensor.
  1109. If the type of the first number in the tuple is integer, the data type of the output tensor is int.
  1110. Otherwise, the data type of the output tensor is float.
  1111. Inputs:
  1112. - **input_x** (tuple) - A tuple of numbers. These numbers have the same type. Only constant value is allowed.
  1113. Outputs:
  1114. Tensor, if the input tuple contains `N` numbers, then the shape of the output tensor is (N,).
  1115. Supported Platforms:
  1116. ``Ascend`` ``GPU`` ``CPU``
  1117. Examples:
  1118. >>> type = ops.TupleToArray()((1,2,3))
  1119. >>> print(type)
  1120. [1 2 3]
  1121. """
  1122. @prim_attr_register
  1123. def __init__(self):
  1124. """Initialize TupleToArray"""
  1125. def infer_value(self, x):
  1126. validator.check_value_type("x", x, [tuple], self.name)
  1127. validator.check("size of x", len(x), '', 0, Rel.GT, self.name)
  1128. dtype = type(x[0])
  1129. for i, item in enumerate(x):
  1130. validator.check_value_type(f"x[{i}]", item, [numbers.Number], self.name)
  1131. if not all(isinstance(item, dtype) for item in x):
  1132. raise TypeError("For \'{self.name}\' all elements of input x must be have same type.")
  1133. if isinstance(x[0], int):
  1134. ret = np.array(x, np.int32)
  1135. else:
  1136. ret = np.array(x, np.float32)
  1137. return Tensor(ret)
  1138. def __call__(self, x):
  1139. args = list()
  1140. if isinstance(x, range):
  1141. args.append(tuple(x))
  1142. else:
  1143. args.append(x)
  1144. return _run_op(self, self.name, args)
  1145. class ScalarToArray(PrimitiveWithInfer):
  1146. """
  1147. Converts a scalar to a `Tensor`.
  1148. Inputs:
  1149. - **input_x** (Union[int, float]) - The input is a scalar. Only constant value is allowed.
  1150. Outputs:
  1151. Tensor. 0-D Tensor and the content is the input.
  1152. Supported Platforms:
  1153. ``Ascend`` ``GPU`` ``CPU``
  1154. Examples:
  1155. >>> op = ops.ScalarToArray()
  1156. >>> data = 1.0
  1157. >>> output = op(data)
  1158. >>> print(output)
  1159. 1.0
  1160. """
  1161. @prim_attr_register
  1162. def __init__(self):
  1163. pass
  1164. def infer_value(self, x):
  1165. validator.check_value_type("x", x, [int, float], self.name)
  1166. if isinstance(x, int):
  1167. ret = np.array(x, np.int32)
  1168. else:
  1169. ret = np.array(x, np.float32)
  1170. return Tensor(ret)
  1171. class ScalarToTensor(PrimitiveWithInfer):
  1172. """
  1173. Converts a scalar to a `Tensor`, and converts the data type to the specified type.
  1174. Inputs:
  1175. - **input_x** (Union[int, float]) - The input is a scalar. Only constant value is allowed.
  1176. - **dtype** (mindspore.dtype) - The target data type. Default: mindspore.float32. Only
  1177. constant value is allowed.
  1178. Outputs:
  1179. Tensor. 0-D Tensor and the content is the input.
  1180. Supported Platforms:
  1181. ``Ascend`` ``GPU`` ``CPU``
  1182. Examples:
  1183. >>> op = ops.ScalarToTensor()
  1184. >>> data = 1
  1185. >>> output = op(data, mindspore.float32)
  1186. >>> print(output)
  1187. 1.0
  1188. """
  1189. @prim_attr_register
  1190. def __init__(self):
  1191. pass
  1192. def infer_value(self, x, dtype=mstype.float32):
  1193. validator.check_value_type("x", x, [int, float], self.name)
  1194. validator.check_subclass("dtype", dtype, mstype.number, self.name)
  1195. data_type = mstype.dtype_to_nptype(dtype)
  1196. return Tensor(np.array(x, data_type))
  1197. class InvertPermutation(PrimitiveWithInfer):
  1198. r"""
  1199. Computes the inverse of an index permutation.
  1200. Given a tuple input, this operation inserts a dimension of 1 at the dimension
  1201. This operation calculates the inverse of the index replacement. It requires a
  1202. 1-dimensional tuple x, which represents the array starting at zero,
  1203. and swaps each value with its index position. In other words, for the output
  1204. tuple y and the input tuple x, this operation calculates the following:
  1205. :math:`y[x[i]] = i, \quad i \in [0, 1, \ldots, \text{len}(x)-1]`.
  1206. Note:
  1207. These values must include 0. There must be no duplicate values and the
  1208. values can not be negative.
  1209. Inputs:
  1210. - **input_x** (Union(tuple[int], list[int]) - The input is constructed by multiple
  1211. integers, i.e., :math:`(y_1, y_2, ..., y_S)` representing the indices.
  1212. The values must include 0. There can be no duplicate values or negative values.
  1213. Only constant value is allowed. The maximum value must be equal to length of input_x.
  1214. Outputs:
  1215. tuple[int]. It has the same length as the input.
  1216. Supported Platforms:
  1217. ``Ascend`` ``GPU`` ``CPU``
  1218. Examples:
  1219. >>> invert = ops.InvertPermutation()
  1220. >>> input_data = (3, 4, 0, 2, 1)
  1221. >>> output = invert(input_data)
  1222. >>> print(output)
  1223. (2, 4, 3, 0, 1)
  1224. """
  1225. @prim_attr_register
  1226. def __init__(self):
  1227. """Initialize InvertPermutation"""
  1228. self.set_const_prim(True)
  1229. def __infer__(self, x):
  1230. x_shp = x['shape']
  1231. x_value = x['value']
  1232. if x_value is None:
  1233. raise ValueError(f'For \'{self.name}\' the input value must be const.')
  1234. validator.check_value_type("shape", x_shp, [tuple, list], self.name)
  1235. if mstype.issubclass_(x['dtype'], mstype.tensor):
  1236. raise ValueError(f'For \'{self.name}\' the input value must be non-Tensor.')
  1237. for shp in x_shp:
  1238. if shp != []:
  1239. x_rank = len(np.array(x_value, np.int64).shape)
  1240. raise ValueError(f'For \'{self.name}\' the rank of input must be 1, but got {x_rank}.')
  1241. for i, value in enumerate(x_value):
  1242. validator.check_value_type("input[%d]" % i, value, [int], self.name)
  1243. z = [x_value[i] for i in range(len(x_value))]
  1244. z.sort()
  1245. for i in range(1, len(z)):
  1246. if z[i - 1] == z[i]:
  1247. raise ValueError(f"For {self.name}, {z[i]} is duplicated in the input.")
  1248. validator.check(f'value min', min(x_value), '', 0, Rel.EQ, self.name)
  1249. validator.check(f'value max', max(x_value), '', len(x_value) - 1, Rel.EQ, self.name)
  1250. y = [None] * len(x_value)
  1251. for i, value in enumerate(x_value):
  1252. validator.check_value_type("input[%d]" % i, value, [int], self.name)
  1253. validator.check(f'value', z[i], f'index', i, Rel.EQ, self.name)
  1254. y[value] = i
  1255. z.append(value)
  1256. return {'shape': x_shp,
  1257. 'dtype': x['dtype'],
  1258. 'value': tuple(y)}
  1259. class Argmax(PrimitiveWithInfer):
  1260. """
  1261. Returns the indices of the maximum value of a tensor across the axis.
  1262. If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor will be
  1263. :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
  1264. Args:
  1265. axis (int): Axis where the Argmax operation applies to. Default: -1.
  1266. output_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32`.
  1267. Default: `mindspore.dtype.int32`.
  1268. Inputs:
  1269. - **input_x** (Tensor) - Input tensor.
  1270. Outputs:
  1271. Tensor, indices of the max value of input tensor across the axis.
  1272. Supported Platforms:
  1273. ``Ascend`` ``GPU`` ``CPU``
  1274. Examples:
  1275. >>> input_x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
  1276. >>> output = ops.Argmax(output_type=mindspore.int32)(input_x)
  1277. >>> print(output)
  1278. [1 0 0]
  1279. """
  1280. @prim_attr_register
  1281. def __init__(self, axis=-1, output_type=mstype.int32):
  1282. """Initialize Argmax"""
  1283. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1284. validator.check_value_type("axis", axis, [int], self.name)
  1285. validator.check_types_same_and_valid({'output': output_type}, [mstype.int32], self.name)
  1286. self.axis = axis
  1287. self.add_prim_attr('output_type', output_type)
  1288. def infer_shape(self, x_shape):
  1289. axis = self.axis
  1290. if axis is None:
  1291. axis = 0
  1292. x_rank = len(x_shape)
  1293. validator.check_int_range(axis, -x_rank, x_rank, Rel.INC_LEFT, "axis", self.name)
  1294. axis = axis + x_rank if axis < 0 else axis
  1295. ouput_shape = [x_shape[i] for i in range(x_rank) if i != axis]
  1296. return ouput_shape
  1297. def infer_dtype(self, x_dtype):
  1298. validator.check_subclass("input_x", x_dtype, mstype.tensor, self.name)
  1299. return mstype.tensor_type(self.output_type)
  1300. class Argmin(PrimitiveWithInfer):
  1301. """
  1302. Returns the indices of the minimum value of a tensor across the axis.
  1303. If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor is
  1304. :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
  1305. Args:
  1306. axis (int): Axis where the Argmin operation applies to. Default: -1.
  1307. output_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32`.
  1308. Default: `mindspore.dtype.int32`.
  1309. Inputs:
  1310. - **input_x** (Tensor) - Input tensor.
  1311. Outputs:
  1312. Tensor, indices of the min value of input tensor across the axis.
  1313. Supported Platforms:
  1314. ``Ascend``
  1315. Examples:
  1316. >>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)
  1317. >>> index = ops.Argmin()(input_x)
  1318. >>> print(index)
  1319. 2
  1320. """
  1321. @prim_attr_register
  1322. def __init__(self, axis=-1, output_type=mstype.int32):
  1323. """Initialize Argmin"""
  1324. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1325. validator.check_value_type("axis", axis, [int], self.name)
  1326. validator.check_type_name("output_type", output_type, [mstype.int32, mstype.int64], self.name)
  1327. self.axis = axis
  1328. self.add_prim_attr('output_type', output_type)
  1329. def infer_shape(self, x_shape):
  1330. axis = self.axis
  1331. if axis is None:
  1332. axis = 0
  1333. x_rank = len(x_shape)
  1334. validator.check_int_range(axis, -x_rank, x_rank, Rel.INC_LEFT, "axis", self.name)
  1335. axis = axis + x_rank if axis < 0 else axis
  1336. ouput_shape = [x_shape[i] for i in range(x_rank) if i != axis]
  1337. return ouput_shape
  1338. def infer_dtype(self, x_dtype):
  1339. validator.check_subclass("input_x", x_dtype, mstype.tensor, self.name)
  1340. return mstype.tensor_type(self.output_type)
  1341. class ArgMaxWithValue(PrimitiveWithInfer):
  1342. """
  1343. Calculates the maximum value with the corresponding index.
  1344. Calculates the maximum value along with the given axis for the input tensor. It returns the maximum values and
  1345. indices.
  1346. Note:
  1347. In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
  1348. Args:
  1349. axis (int): The dimension to reduce. Default: 0.
  1350. keep_dims (bool): Whether to reduce dimension, if true, the output will keep same dimension with the input,
  1351. the output will reduce dimension if false. Default: False.
  1352. Inputs:
  1353. - **input_x** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
  1354. :math:`(x_1, x_2, ..., x_N)`.
  1355. Outputs:
  1356. tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input
  1357. tensor.
  1358. - index (Tensor) - The index for the maximum value of the input tensor. If `keep_dims` is true, the shape of
  1359. output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is
  1360. :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
  1361. - output_x (Tensor) - The maximum value of input tensor, with the same shape as index.
  1362. Supported Platforms:
  1363. ``Ascend`` ``GPU``
  1364. Examples:
  1365. >>> input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
  1366. >>> index, output = ops.ArgMaxWithValue()(input_x)
  1367. >>> print(index, output)
  1368. 3 0.7
  1369. """
  1370. @prim_attr_register
  1371. def __init__(self, axis=0, keep_dims=False):
  1372. """Initialize ArgMaxWithValue"""
  1373. self.axis = axis
  1374. self.keep_dims = keep_dims
  1375. validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
  1376. validator.check_value_type('axis', axis, [int], self.name)
  1377. def infer_shape(self, x_shape):
  1378. axis = self.axis
  1379. x_rank = len(x_shape)
  1380. validator.check_int_range(axis, -x_rank, x_rank, Rel.INC_LEFT, "axis", self.name)
  1381. ouput_shape = _infer_shape_reduce(x_shape, self.axis, self.keep_dims, self.name)
  1382. return ouput_shape, ouput_shape
  1383. def infer_dtype(self, x_dtype):
  1384. validator.check_subclass("input_x", x_dtype, mstype.tensor, self.name)
  1385. return mstype.tensor_type(mstype.int32), x_dtype
  1386. class ArgMinWithValue(PrimitiveWithInfer):
  1387. """
  1388. Calculates the minimum value with corresponding index, and returns indices and values.
  1389. Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
  1390. indices.
  1391. Note:
  1392. In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
  1393. Args:
  1394. axis (int): The dimension to reduce. Default: 0.
  1395. keep_dims (bool): Whether to reduce dimension, if true the output will keep the same dimension as the input,
  1396. the output will reduce dimension if false. Default: False.
  1397. Inputs:
  1398. - **input_x** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
  1399. :math:`(x_1, x_2, ..., x_N)`.
  1400. Outputs:
  1401. tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input
  1402. tensor.
  1403. - index (Tensor) - The index for the minimum value of the input tensor. If `keep_dims` is true, the shape of
  1404. output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is
  1405. :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
  1406. - output_x (Tensor) - The minimum value of input tensor, with the same shape as index.
  1407. Supported Platforms:
  1408. ``Ascend`` ``CPU``
  1409. Examples:
  1410. >>> input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
  1411. >>> output = ops.ArgMinWithValue()(input_x)
  1412. >>> print(output)
  1413. (Tensor(shape=[], dtype=Int32, value= 0), Tensor(shape=[], dtype=Float32, value= 0.0))
  1414. """
  1415. @prim_attr_register
  1416. def __init__(self, axis=0, keep_dims=False):
  1417. """Initialize ArgMinWithValue"""
  1418. self.axis = axis
  1419. self.keep_dims = keep_dims
  1420. validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
  1421. validator.check_value_type('axis', axis, [int], self.name)
  1422. def infer_shape(self, x_shape):
  1423. axis = self.axis
  1424. x_rank = len(x_shape)
  1425. validator.check_int_range(axis, -x_rank, x_rank, Rel.INC_LEFT, "axis", self.name)
  1426. ouput_shape = _infer_shape_reduce(x_shape, self.axis, self.keep_dims, self.name)
  1427. return ouput_shape, ouput_shape
  1428. def infer_dtype(self, x_dtype):
  1429. validator.check_subclass("input_x", x_dtype, mstype.tensor, self.name)
  1430. return mstype.tensor_type(mstype.int32), x_dtype
  1431. class Tile(PrimitiveWithInfer):
  1432. r"""
  1433. Replicates a tensor with given multiples times.
  1434. Creates a new tensor by replicating input multiples times. The dimension of
  1435. output tensor is the larger of the input tensor dimension and the length of `multiples`.
  1436. Inputs:
  1437. - **input_x** (Tensor) - 1-D or higher Tensor. Set the shape of input tensor as
  1438. :math:`(x_1, x_2, ..., x_S)`.
  1439. - **multiples** (tuple[int]) - The input tuple is constructed by multiple
  1440. integers, i.e., :math:`(y_1, y_2, ..., y_S)`. The length of `multiples`
  1441. cannot be smaller than the length of the shape of `input_x`.
  1442. Only constant value is allowed.
  1443. Outputs:
  1444. Tensor, has the same data type as the `input_x`.
  1445. - If the length of `multiples` is the same as the length of shape of `input_x`,
  1446. then the shape of their corresponding positions can be multiplied, and
  1447. the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_R)`.
  1448. - If the length of `multiples` is larger than the length of shape of `input_x`,
  1449. fill in multiple 1 in the length of the shape of `input_x` until their lengths are consistent.
  1450. Such as set the shape of `input_x` as :math:`(1, ..., x_1, x_2, ..., x_S)`,
  1451. then the shape of their corresponding positions can be multiplied, and
  1452. the shape of Outputs is :math:`(1*y_1, ..., x_S*y_R)`.
  1453. Supported Platforms:
  1454. ``Ascend`` ``GPU`` ``CPU``
  1455. Examples:
  1456. >>> tile = ops.Tile()
  1457. >>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
  1458. >>> multiples = (2, 3)
  1459. >>> output = tile(input_x, multiples)
  1460. >>> print(output)
  1461. [[1. 2. 1. 2. 1. 2.]
  1462. [3. 4. 3. 4. 3. 4.]
  1463. [1. 2. 1. 2. 1. 2.]
  1464. [3. 4. 3. 4. 3. 4.]]
  1465. """
  1466. @prim_attr_register
  1467. def __init__(self):
  1468. """Initialize Tile"""
  1469. self.init_prim_io_names(inputs=['x', 'multiples'], outputs=['output'])
  1470. def check_elim(self, base_tensor, multiplier):
  1471. if (not isinstance(base_tensor, Tensor)) or (not isinstance(multiplier, tuple)):
  1472. raise TypeError("Expecting (Tensor, tuple), got: ({}, {})".format(base_tensor, multiplier))
  1473. if all(v == 1 for v in multiplier):
  1474. return (True, base_tensor)
  1475. return (False, None)
  1476. def __infer__(self, x, multiples):
  1477. multiples_v = multiples['value']
  1478. x_shp = x['shape']
  1479. validator.check_value_type("multiples", multiples_v, [tuple], self.name)
  1480. for i, multiple in enumerate(multiples_v):
  1481. validator.check_positive_int(multiple, "multiples[%d]" % i, self.name)
  1482. validator.check_value_type("x[\'dtype\']", x["dtype"], mstype.tensor_type, self.name)
  1483. len_sub = len(multiples_v) - len(x_shp)
  1484. multiples_w = None
  1485. if len_sub == 0:
  1486. multiples_w = multiples_v
  1487. if len_sub > 0:
  1488. for i in range(0, len_sub):
  1489. x_shp.insert(0, 1)
  1490. multiples_w = multiples_v
  1491. elif len_sub < 0:
  1492. raise ValueError(f'For \'{self.name}\' the length of multiples can not be smaller than '
  1493. f'the length of dimension in input_x.')
  1494. for i, a in enumerate(multiples_w):
  1495. x_shp[i] *= a
  1496. value = None
  1497. if x['value'] is not None:
  1498. value = Tensor(np.tile(x['value'].asnumpy(), multiples_w))
  1499. return {'shape': x_shp,
  1500. 'dtype': x['dtype'],
  1501. 'value': value}
  1502. class UnsortedSegmentSum(PrimitiveWithInfer):
  1503. r"""
  1504. Computes the sum of a tensor along segments.
  1505. Calculates a tensor such that :math:`\text{output}[i] = \sum_{segment\_ids[j] == i} \text{data}[j, \ldots]`, where
  1506. :math:`j` is a tuple describing the index of element in data. `segment_ids` selects which elements in data to sum
  1507. up. Segment_ids does not need to be sorted, and it does not need to cover all values in the entire valid value
  1508. range.
  1509. Note:
  1510. If the segment_id i is absent in the segment_ids, then output[i] will be filled with 0.
  1511. If the sum of the given segment_ids :math:`i` is empty, then :math:`\text{output}[i] = 0`. If the given segment_ids
  1512. is negative, the value will be ignored. 'num_segments' must be equal to the number of different segment_ids.
  1513. Inputs:
  1514. - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
  1515. - **segment_ids** (Tensor) - Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R. Type must be int.
  1516. - **num_segments** (int) - Set :math:`z` as num_segments.
  1517. Outputs:
  1518. Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
  1519. Supported Platforms:
  1520. ``Ascend`` ``GPU``
  1521. Examples:
  1522. >>> input_x = Tensor([1, 2, 3, 4], mindspore.float32)
  1523. >>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32)
  1524. >>> num_segments = 4
  1525. >>> output = ops.UnsortedSegmentSum()(input_x, segment_ids, num_segments)
  1526. >>> print(output)
  1527. [3. 3. 4. 0.]
  1528. """
  1529. @prim_attr_register
  1530. def __init__(self):
  1531. """Initialize UnsortedSegmentSum"""
  1532. self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])
  1533. def __infer__(self, x, segment_ids, num_segments):
  1534. x_type = x['dtype']
  1535. x_shp = x['shape']
  1536. validator.check_subclass("input_x", x_type, mstype.tensor, self.name)
  1537. validator.check_value_type("x_shape", x_shp, [list], self.name)
  1538. x_shp_len = len(x_shp)
  1539. validator.check_positive_int(x_shp_len, "rank of input_x", self.name)
  1540. segment_ids_shp = segment_ids['shape']
  1541. segment_ids_type = segment_ids['dtype']
  1542. validator.check_subclass("segment_ids", segment_ids_type, mstype.tensor, self.name)
  1543. validator.check_value_type("segment_ids", segment_ids_shp, [list], self.name)
  1544. segment_ids_shp_len = len(segment_ids_shp)
  1545. validator.check_positive_int(segment_ids_shp_len, "rank of segment_ids", self.name)
  1546. validator.check(f'rank of input_x', len(x_shp),
  1547. 'rank of segments_id', len(segment_ids_shp), Rel.GE, self.name)
  1548. if (not -1 in x_shp and not -1 in segment_ids_shp):
  1549. # only validate when both shapes fully known
  1550. for i, value in enumerate(segment_ids_shp):
  1551. validator.check("ids[%d]" % i, value, 'input[%d]' % i, x_shp[i], Rel.EQ, self.name)
  1552. num_segments_v = num_segments['value']
  1553. num_segments_type = num_segments['dtype']
  1554. validator.check_subclass("num_segments", num_segments_type, [mstype.tensor, mstype.number], self.name)
  1555. if isinstance(num_segments_type, type(mstype.tensor)):
  1556. validator.check_tensor_dtype_valid("num_segments", num_segments_type, [mstype.int32, mstype.int64],
  1557. self.name)
  1558. shp = [-1]
  1559. else:
  1560. validator.check_value_type('num_segments', num_segments_v, [int], self.name)
  1561. validator.check_positive_int(num_segments_v, "num_segments", self.name)
  1562. shp = [num_segments_v]
  1563. shp += x_shp[segment_ids_shp_len:]
  1564. if "max_value" in num_segments and "min_value" in num_segments:
  1565. output_max_shape = list(num_segments['max_value'])
  1566. output_min_shape = list(num_segments['min_value'])
  1567. else:
  1568. if isinstance(num_segments_type, type(mstype.tensor)):
  1569. raise ValueError("Num_segments only support int type when it is not a dynamic value")
  1570. output_max_shape = [num_segments_v]
  1571. output_min_shape = [num_segments_v]
  1572. if 'max_shape' in x and 'min_shape' in x:
  1573. max_output_incoming = x['max_shape']
  1574. min_output_incoming = x['min_shape']
  1575. else:
  1576. max_output_incoming = x_shp
  1577. min_output_incoming = x_shp
  1578. output_max_shape += max_output_incoming[segment_ids_shp_len:]
  1579. output_min_shape += min_output_incoming[segment_ids_shp_len:]
  1580. return {'shape': shp,
  1581. 'max_shape': output_max_shape,
  1582. 'min_shape': output_min_shape,
  1583. 'dtype': mstype.tensor_type(x_type.element_type()),
  1584. 'value': None}
  1585. class UnsortedSegmentMin(PrimitiveWithCheck):
  1586. """
  1587. Computes the minimum of a tensor along segments.
  1588. Inputs:
  1589. - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
  1590. The data type must be float16, float32 or int32.
  1591. - **segment_ids** (Tensor) - A `1-D` tensor whose shape is :math:`(x_1)`, the value must be >= 0.
  1592. The data type must be int32.
  1593. - **num_segments** (int) - The value specifies the number of distinct `segment_ids`.
  1594. Note:
  1595. If the segment_id i is absent in the segment_ids, then output[i] will be filled with
  1596. the maximum value of the input_x's type.
  1597. Outputs:
  1598. Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
  1599. Supported Platforms:
  1600. ``Ascend`` ``GPU``
  1601. Examples:
  1602. >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
  1603. >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))
  1604. >>> num_segments = 2
  1605. >>> unsorted_segment_min = ops.UnsortedSegmentMin()
  1606. >>> output = unsorted_segment_min(input_x, segment_ids, num_segments)
  1607. >>> print(output)
  1608. [[1. 2. 3.]
  1609. [4. 2. 1.]]
  1610. """
  1611. @prim_attr_register
  1612. def __init__(self):
  1613. """Initialize UnsortedSegmentMin"""
  1614. self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])
  1615. self.add_prim_attr("dynamic_shape_depends", [2])
  1616. def __check__(self, x, segment_ids, num_segments):
  1617. x_shape = x['shape']
  1618. segment_ids_shape = segment_ids['shape']
  1619. valid_type = [mstype.float16, mstype.float32, mstype.int32]
  1620. validator.check_tensor_dtype_valid("x", x['dtype'], valid_type, self.name)
  1621. validator.check_tensor_dtype_valid("segment_ids", segment_ids['dtype'], [mstype.int32], self.name)
  1622. validator.check_equal_int(len(segment_ids_shape), 1, "rank of segment_ids_shape", self.name)
  1623. num_segments_type = num_segments['dtype']
  1624. validator.check_subclass("num_segments", num_segments_type, [mstype.number], self.name)
  1625. if (not -1 in x_shape and not -1 in segment_ids_shape):
  1626. # only validate when both shapes fully known
  1627. validator.check(f'first shape of input_x', x_shape[0],
  1628. 'length of segments_id', segment_ids_shape[0], Rel.EQ, self.name)
  1629. num_segments_v = num_segments['value']
  1630. validator.check_value_type('num_segments', num_segments_v, [int], self.name)
  1631. validator.check_positive_int(num_segments_v, "num_segments", self.name)
  1632. class UnsortedSegmentMax(PrimitiveWithCheck):
  1633. """
  1634. Computes the maximum along segments of a tensor.
  1635. Inputs:
  1636. - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
  1637. The data type must be float16, float32 or int32.
  1638. - **segment_ids** (Tensor) - A `1-D` tensor whose shape is :math:`(x_1)`, the value must be >= 0.
  1639. The data type must be int32.
  1640. - **num_segments** (int) - The value specifies the number of distinct `segment_ids`.
  1641. Note:
  1642. If the segment_id i is absent in the segment_ids, then output[i] will be filled with
  1643. the minimum value of the input_x's type.
  1644. Outputs:
  1645. Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
  1646. Supported Platforms:
  1647. ``Ascend`` ``GPU``
  1648. Examples:
  1649. >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
  1650. >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))
  1651. >>> num_segments = 2
  1652. >>> unsorted_segment_max = ops.UnsortedSegmentMax()
  1653. >>> output = unsorted_segment_max(input_x, segment_ids, num_segments)
  1654. >>> print(output)
  1655. [[1. 2. 3.]
  1656. [4. 5. 6.]]
  1657. """
  1658. @prim_attr_register
  1659. def __init__(self):
  1660. """Initialize UnsortedSegmentMax"""
  1661. self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])
  1662. self.add_prim_attr("dynamic_shape_depends", [2])
  1663. def __check__(self, x, segment_ids, num_segments):
  1664. x_shape = x['shape']
  1665. segment_ids_shape = segment_ids['shape']
  1666. valid_type = [mstype.float16, mstype.float32, mstype.int32]
  1667. validator.check_tensor_dtype_valid("x", x['dtype'], valid_type, self.name)
  1668. validator.check_tensors_dtypes_same_and_valid({"segment_ids": segment_ids['dtype']},
  1669. [mstype.int32, mstype.int64], self.name)
  1670. validator.check_equal_int(len(segment_ids_shape), 1, "rank of segment_ids_shape", self.name)
  1671. num_segments_type = num_segments['dtype']
  1672. validator.check_subclass("num_segments", num_segments_type, [mstype.number], self.name)
  1673. if (not -1 in x_shape and not -1 in segment_ids_shape):
  1674. # only validate when both shapes fully known
  1675. validator.check(f'first shape of input_x', x_shape[0],
  1676. 'length of segments_id', segment_ids_shape[0], Rel.EQ, self.name)
  1677. num_segments_v = num_segments['value']
  1678. validator.check_value_type('num_segments', num_segments_v, [int], self.name)
  1679. validator.check_positive_int(num_segments_v, "num_segments", self.name)
  1680. class UnsortedSegmentProd(PrimitiveWithInfer):
  1681. """
  1682. Computes the product of a tensor along segments.
  1683. Inputs:
  1684. - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
  1685. With float16, float32 or int32 data type.
  1686. - **segment_ids** (Tensor) - A `1-D` tensor whose shape is :math:`(x_1)`, the value must be >= 0.
  1687. Data type must be int32.
  1688. - **num_segments** (int) - The value specifies the number of distinct `segment_ids`,
  1689. must be greater than 0.
  1690. Outputs:
  1691. Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.
  1692. Supported Platforms:
  1693. ``Ascend``
  1694. Examples:
  1695. >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
  1696. >>> segment_ids = Tensor(np.array([0, 1, 0]).astype(np.int32))
  1697. >>> num_segments = 2
  1698. >>> unsorted_segment_prod = ops.UnsortedSegmentProd()
  1699. >>> output = unsorted_segment_prod(input_x, segment_ids, num_segments)
  1700. >>> print(output)
  1701. [[4. 4. 3.]
  1702. [4. 5. 6.]]
  1703. """
  1704. @prim_attr_register
  1705. def __init__(self):
  1706. """Initialize UnsortedSegmentProd"""
  1707. self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])
  1708. def __infer__(self, x, segment_ids, num_segments):
  1709. x_type = x['dtype']
  1710. x_shape = x['shape']
  1711. segment_ids_shape = segment_ids['shape']
  1712. validator.check_subclass("input_x", x_type, mstype.tensor, self.name)
  1713. validator.check_value_type("x_shape", x_shape, [list], self.name)
  1714. valid_type = [mstype.float16, mstype.float32, mstype.int32]
  1715. validator.check_tensor_dtype_valid("x", x['dtype'], valid_type, self.name)
  1716. validator.check_tensor_dtype_valid("segment_ids", segment_ids['dtype'], [mstype.int32], self.name)
  1717. validator.check_equal_int(len(segment_ids_shape), 1, "rank of segment_ids_shape", self.name)
  1718. validator.check(f'first shape of input_x', x_shape[0],
  1719. 'length of segments_id', segment_ids_shape[0], Rel.EQ, self.name)
  1720. num_segments_v = num_segments['value']
  1721. validator.check_value_type('num_segments', num_segments_v, [int], self.name)
  1722. validator.check_positive_int(num_segments_v, "num_segments", self.name)
  1723. segment_ids_shape_len = len(segment_ids_shape)
  1724. out_shape = [num_segments_v]
  1725. out_shape += x_shape[segment_ids_shape_len:]
  1726. out = {'shape': out_shape,
  1727. 'dtype': mstype.tensor_type(x_type.element_type()),
  1728. 'value': None}
  1729. return out
  1730. class Concat(PrimitiveWithInfer):
  1731. r"""
  1732. Connect tensor in the specified axis.
  1733. Connect input tensors along with the given axis.
  1734. The input data is a tuple of tensors. These tensors have the same rank `R`. Set the given axis as `m`, and
  1735. :math:`0 \le m < R`. Set the number of input tensors as `N`. For the :math:`i`-th tensor :math:`t_i`, it has
  1736. the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`. :math:`x_{mi}` is the :math:`m`-th dimension of the
  1737. :math:`i`-th tensor. Then, the shape of the output tensor is
  1738. .. math::
  1739. (x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)
  1740. Args:
  1741. axis (int): The specified axis. Default: 0.
  1742. Inputs:
  1743. - **input_x** (tuple, list) - A tuple or a list of input tensors.
  1744. Outputs:
  1745. Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
  1746. Supported Platforms:
  1747. ``Ascend`` ``GPU`` ``CPU``
  1748. Examples:
  1749. >>> data1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
  1750. >>> data2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
  1751. >>> op = ops.Concat()
  1752. >>> output = op((data1, data2))
  1753. >>> print(output)
  1754. [[0. 1.]
  1755. [2. 1.]
  1756. [0. 1.]
  1757. [2. 1.]]
  1758. """
  1759. @prim_attr_register
  1760. def __init__(self, axis=0):
  1761. """Initialize Concat"""
  1762. validator.check_value_type("axis", axis, [int], self.name)
  1763. def __infer__(self, input_x):
  1764. axis = self.axis
  1765. x_shp = input_x['shape']
  1766. x_type = input_x['dtype']
  1767. _, all_shp, _ = get_concat_offset(x_shp, x_type, axis, self.name)
  1768. self.add_prim_attr('inputNums', len(x_shp))
  1769. ret_shp = x_shp[0].copy()
  1770. value = None
  1771. if input_x['value'] is not None:
  1772. value = Tensor(np.concatenate([x.asnumpy() for x in input_x['value']], axis=axis))
  1773. ret_shp[axis] = all_shp
  1774. out = {'shape': ret_shp,
  1775. 'dtype': x_type[0],
  1776. 'value': value}
  1777. if -1 in x_shp[0]:
  1778. x_min_shp = input_x['min_shape']
  1779. ret_min_shp = x_min_shp[0].copy()
  1780. ret_min_shp[axis] = 0
  1781. for all_min_shp in x_min_shp:
  1782. ret_min_shp[axis] += all_min_shp[axis]
  1783. out['min_shape'] = ret_min_shp
  1784. x_max_shp = input_x['max_shape']
  1785. ret_max_shp = x_max_shp[0].copy()
  1786. ret_max_shp[axis] = 0
  1787. for all_max_shp in x_max_shp:
  1788. ret_max_shp[axis] += all_max_shp[axis]
  1789. out['max_shape'] = ret_max_shp
  1790. return out
  1791. class ParallelConcat(PrimitiveWithInfer):
  1792. r"""
  1793. Concats tensor in the first dimension.
  1794. Concats input tensors along with the first dimension.
  1795. Note:
  1796. The input tensors are all required to have size 1 in the first dimension.
  1797. Inputs:
  1798. - **values** (tuple, list) - A tuple or a list of input tensors. The data type and shape of these
  1799. tensors must be the same.
  1800. Outputs:
  1801. Tensor, data type is the same as `values`.
  1802. Supported Platforms:
  1803. ``Ascend``
  1804. Examples:
  1805. >>> data1 = Tensor(np.array([[0, 1]]).astype(np.int32))
  1806. >>> data2 = Tensor(np.array([[2, 1]]).astype(np.int32))
  1807. >>> op = ops.ParallelConcat()
  1808. >>> output = op((data1, data2))
  1809. >>> print(output)
  1810. [[0 1]
  1811. [2 1]]
  1812. """
  1813. @prim_attr_register
  1814. def __init__(self):
  1815. """Initialize ParallelConcat"""
  1816. def __infer__(self, values):
  1817. x_shp = values['shape']
  1818. x_type = values['dtype']
  1819. validator.check_int(len(x_shp), 1, Rel.GE, f'x_shp length', self.name)
  1820. args = {f"x_type[{i}]": elem for i, elem in enumerate(x_type)}
  1821. validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type + (mstype.bool_,), self.name)
  1822. first_elem = x_shp[0]
  1823. for i, elem in enumerate(x_shp[1:]):
  1824. j = i + 1
  1825. validator.check_equal_int(elem[0], 1, f'x_shp[{j}][0]', self.name)
  1826. validator.check(f"x_shp[0] shape", first_elem, f"x_shp[{j}] shape", elem, Rel.EQ, self.name)
  1827. ret_shp = x_shp[0].copy()
  1828. ret_shp[0] = len(x_shp)
  1829. self.add_prim_attr('shape', ret_shp)
  1830. self.add_prim_attr('N', len(x_shp))
  1831. out = {'shape': ret_shp,
  1832. 'dtype': x_type[0],
  1833. 'value': None}
  1834. return out
  1835. def _get_stack_shape(x_shape, x_type, axis, prim_name):
  1836. """for stack output shape"""
  1837. validator.check_value_type("shape", x_shape, [tuple, list], prim_name)
  1838. validator.check_int(len(x_shape), 1, Rel.GE, "len of input_x", prim_name)
  1839. validator.check_subclass("input_x[0]", x_type[0], mstype.tensor, prim_name)
  1840. rank_base = len(x_shape[0])
  1841. N = len(x_shape)
  1842. out_shape = x_shape[0]
  1843. validator.check_int_range(axis, -rank_base - 1, rank_base, Rel.INC_BOTH, 'axis', prim_name)
  1844. if axis < 0:
  1845. axis = axis + rank_base + 1
  1846. for i in range(1, N):
  1847. validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], Rel.EQ, prim_name, TypeError)
  1848. if x_shape[i] != x_shape[0]:
  1849. raise ValueError(f"For \'{prim_name}\' element {i} shape in input can not pack with first element")
  1850. out_shape.insert(axis, N)
  1851. return out_shape
  1852. class Pack(PrimitiveWithInfer):
  1853. """
  1854. Same as operator Stack. Pack will be deprecated in the future.
  1855. Please use Stack instead.
  1856. """
  1857. @deprecated("1.1", "Stack", True)
  1858. @prim_attr_register
  1859. def __init__(self, axis=0):
  1860. """Initialize Pack"""
  1861. validator.check_value_type("axis", axis, [int], self.name)
  1862. self.axis = axis
  1863. def __infer__(self, value):
  1864. x_shape = value['shape']
  1865. x_type = value['dtype']
  1866. self.add_prim_attr('num', len(x_shape))
  1867. all_shape = _get_stack_shape(x_shape, x_type, self.axis, self.name)
  1868. out = {'shape': all_shape,
  1869. 'dtype': x_type[0],
  1870. 'value': None}
  1871. return out
  1872. class Stack(PrimitiveWithInfer):
  1873. r"""
  1874. Stacks a list of tensors in specified axis.
  1875. Stacks the list of input tensors with the same rank `R`, output is a tensor of rank `(R+1)`.
  1876. Given input tensors of shape :math:`(x_1, x_2, ..., x_R)`. Set the number of input tensors as `N`.
  1877. If :math:`0 \le axis`, the shape of the output tensor is :math:`(x_1, x_2, ..., x_{axis}, N, x_{axis+1}, ..., x_R)`.
  1878. Args:
  1879. axis (int): Dimension to stack. Default: 0.
  1880. Negative values wrap around. The range is [-(R+1), R+1).
  1881. Inputs:
  1882. - **input_x** (Union[tuple, list]) - A Tuple or list of Tensor objects with the same shape and type.
  1883. Outputs:
  1884. Tensor. A stacked Tensor with the same type as `input_x`.
  1885. Raises:
  1886. TypeError: If the data types of elements in `input_x` are not the same.
  1887. ValueError: If the length of `input_x` is not greater than 1;
  1888. or if axis is out of the range [-(R+1), R+1);
  1889. or if the shapes of elements in input_x are not the same.
  1890. Supported Platforms:
  1891. ``Ascend`` ``GPU`` ``CPU``
  1892. Examples:
  1893. >>> data1 = Tensor(np.array([0, 1]).astype(np.float32))
  1894. >>> data2 = Tensor(np.array([2, 3]).astype(np.float32))
  1895. >>> stack = ops.Stack()
  1896. >>> output = stack([data1, data2])
  1897. >>> print(output)
  1898. [[0. 1.]
  1899. [2. 3.]]
  1900. """
  1901. @prim_attr_register
  1902. def __init__(self, axis=0):
  1903. """Initialize Stack"""
  1904. validator.check_value_type("axis", axis, [int], self.name)
  1905. self.axis = axis
  1906. def __infer__(self, value):
  1907. x_shape = value['shape']
  1908. x_type = value['dtype']
  1909. self.add_prim_attr('num', len(x_shape))
  1910. all_shape = _get_stack_shape(x_shape, x_type, self.axis, self.name)
  1911. out = {'shape': all_shape,
  1912. 'dtype': x_type[0],
  1913. 'value': None}
  1914. return out
  1915. class Unpack(PrimitiveWithInfer):
  1916. """
  1917. Same as operator Unstack. Unpack will be deprecated in the future.
  1918. Please use Unstack instead.
  1919. """
  1920. @deprecated("1.1", "Unstack", True)
  1921. @prim_attr_register
  1922. def __init__(self, axis=0):
  1923. """Initialize Unpack"""
  1924. validator.check_value_type("axis", axis, [int], self.name)
  1925. self.axis = axis
  1926. def __infer__(self, x):
  1927. validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
  1928. x_shape = list(x['shape'])
  1929. dim = len(x_shape)
  1930. validator.check_int_range(self.axis, -dim, dim, Rel.INC_LEFT, 'axis value', self.name)
  1931. if self.axis < 0:
  1932. self.axis = self.axis + dim
  1933. output_num = x_shape[self.axis]
  1934. validator.check_value_type("num", output_num, [int], self.name)
  1935. validator.check_positive_int(output_num, "output_num", self.name)
  1936. self.add_prim_attr('num', output_num)
  1937. output_valid_check = x_shape[self.axis] - output_num
  1938. validator.check_int(output_valid_check, 0, Rel.EQ,
  1939. "The dimension which to unstack divides output_num", self.name)
  1940. out_shapes = []
  1941. out_dtypes = []
  1942. out_shape = x_shape[:self.axis] + x_shape[self.axis + 1:]
  1943. for _ in range(output_num):
  1944. out_shapes.append(tuple(out_shape))
  1945. out_dtypes.append(x['dtype'])
  1946. out_shapes = tuple(out_shapes)
  1947. out_dtypes = tuple(out_dtypes)
  1948. out = {'shape': out_shapes,
  1949. 'dtype': out_dtypes,
  1950. 'value': None}
  1951. return out
  1952. class Unstack(PrimitiveWithInfer):
  1953. r"""
  1954. Unstacks tensor in specified axis.
  1955. Unstacks a tensor of rank `R` along axis dimension, output tensors will have rank `(R-1)`.
  1956. Given a tensor of shape :math:`(x_1, x_2, ..., x_R)`. If :math:`0 \le axis`,
  1957. the shape of tensor in output is :math:`(x_1, x_2, ..., x_{axis}, x_{axis+2}, ..., x_R)`.
  1958. This is the opposite of pack.
  1959. Args:
  1960. axis (int): Dimension along which to pack. Default: 0.
  1961. Negative values wrap around. The range is [-R, R).
  1962. Inputs:
  1963. - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
  1964. A tensor to be unstacked and the rank of the tensor must be greater than 0.
  1965. Outputs:
  1966. A tuple of tensors, the shape of each objects is the same.
  1967. Raises:
  1968. ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)).
  1969. Supported Platforms:
  1970. ``Ascend`` ``GPU`` ``CPU``
  1971. Examples:
  1972. >>> unstack = ops.Unstack()
  1973. >>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))
  1974. >>> output = unstack(input_x)
  1975. >>> print(output)
  1976. (Tensor(shape=[4], dtype=Int32, value= [1, 1, 1, 1]),
  1977. Tensor(shape=[4], dtype=Int32, value= [2, 2, 2, 2]))
  1978. """
  1979. @prim_attr_register
  1980. def __init__(self, axis=0):
  1981. """Initialize Unstack"""
  1982. validator.check_value_type("axis", axis, [int], self.name)
  1983. self.axis = axis
  1984. def __infer__(self, x):
  1985. validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
  1986. x_shape = list(x['shape'])
  1987. dim = len(x_shape)
  1988. validator.check_int_range(self.axis, -dim, dim, Rel.INC_LEFT, 'axis value', self.name)
  1989. if self.axis < 0:
  1990. self.axis = self.axis + dim
  1991. output_num = x_shape[self.axis]
  1992. validator.check_value_type("num", output_num, [int], self.name)
  1993. validator.check_positive_int(output_num, "output_num", self.name)
  1994. self.add_prim_attr('num', output_num)
  1995. output_valid_check = x_shape[self.axis] - output_num
  1996. validator.check_int(output_valid_check, 0, Rel.EQ,
  1997. "The dimension which to unstack divides output_num", self.name)
  1998. out_shapes = []
  1999. out_dtypes = []
  2000. out_shape = x_shape[:self.axis] + x_shape[self.axis + 1:]
  2001. for _ in range(output_num):
  2002. out_shapes.append(tuple(out_shape))
  2003. out_dtypes.append(x['dtype'])
  2004. out_shapes = tuple(out_shapes)
  2005. out_dtypes = tuple(out_dtypes)
  2006. out = {'shape': out_shapes,
  2007. 'dtype': out_dtypes,
  2008. 'value': None}
  2009. return out
  2010. class Slice(PrimitiveWithInfer):
  2011. """
  2012. Slices a tensor in the specified shape.
  2013. Inputs:
  2014. - **x** (Tensor): The target tensor.
  2015. - **begin** (tuple, list): The beginning of the slice. Only constant value is allowed.
  2016. - **size** (tuple, list): The size of the slice. Only constant value is allowed.
  2017. Outputs:
  2018. Tensor, the shape is : input `size`, the data type is the same as input `x`.
  2019. Supported Platforms:
  2020. ``Ascend`` ``GPU`` ``CPU``
  2021. Examples:
  2022. >>> data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]],
  2023. ... [[3, 3, 3], [4, 4, 4]],
  2024. ... [[5, 5, 5], [6, 6, 6]]]).astype(np.int32))
  2025. >>> slice = ops.Slice()
  2026. >>> output = slice(data, (1, 0, 0), (1, 1, 3))
  2027. >>> print(output)
  2028. [[[3 3 3]]]
  2029. """
  2030. @prim_attr_register
  2031. def __init__(self):
  2032. """Initialize slice"""
  2033. self.init_prim_io_names(inputs=['x', 'begin', 'size'], outputs=['output'])
  2034. def __infer__(self, x, begin, size):
  2035. x_shape = x['shape']
  2036. x_shp_len = len(x_shape)
  2037. validator.check_const_input('begin', begin['value'], self.name)
  2038. validator.check_const_input('size', size['value'], self.name)
  2039. begin_v, size_v = begin['value'], size['value']
  2040. if begin_v is None or size_v is None:
  2041. return {'shape': None,
  2042. 'dtype': x['dtype'],
  2043. 'value': None}
  2044. validator.check_value_type("input begin", begin_v, [tuple, list], self.name)
  2045. validator.check_value_type("input size", size_v, [tuple, list], self.name)
  2046. for key, value in zip(('begin', 'size'), (begin_v, size_v)):
  2047. validator.check(f'len of {key}', len(value),
  2048. 'len x\'s dim', x_shp_len)
  2049. for i in range(x_shp_len):
  2050. validator.check_positive_int(size_v[i], f'input size[{i}]')
  2051. if x_shape[i] < begin_v[i] + size_v[i]:
  2052. y = begin_v[i] + size_v[i]
  2053. raise ValueError("For '%s' slice shape can not bigger than origin shape %d, %d." %
  2054. (self.name, x_shape[i], y))
  2055. return {'shape': size_v,
  2056. 'dtype': x['dtype'],
  2057. 'value': None}
  2058. class ReverseV2(PrimitiveWithInfer):
  2059. """
  2060. Reverses specific dimensions of a tensor.
  2061. Args:
  2062. axis (Union[tuple(int), list(int)): The indices of the dimensions to reverse.
  2063. Inputs:
  2064. - **input_x** (Tensor) - The target tensor.
  2065. Outputs:
  2066. Tensor, has the same shape and type as `input_x`.
  2067. Supported Platforms:
  2068. ``Ascend``
  2069. Examples:
  2070. >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
  2071. >>> op = ops.ReverseV2(axis=[1])
  2072. >>> output = op(input_x)
  2073. >>> print(output)
  2074. [[4 3 2 1]
  2075. [8 7 6 5]]
  2076. """
  2077. @prim_attr_register
  2078. def __init__(self, axis):
  2079. validator.check_value_type('axis', axis, [list, tuple], self.name)
  2080. for i, each in enumerate(axis):
  2081. validator.check_value_type(f'axis[{i}]', each, [int], self.name)
  2082. self.axis = axis
  2083. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2084. def infer_shape(self, x_shape):
  2085. dim = len(x_shape)
  2086. for i, each in enumerate(self.axis):
  2087. validator.check_int_range(each, -dim, dim, Rel.INC_LEFT, f'axis[{i}]', self.name)
  2088. return x_shape
  2089. def infer_dtype(self, x_dtype):
  2090. validator.check_tensor_dtype_valid('x', x_dtype, (mstype.bool_,) + mstype.number_type, self.name)
  2091. return x_dtype
  2092. class Rint(PrimitiveWithInfer):
  2093. """
  2094. Returns an integer that is closest to x element-wise.
  2095. Inputs:
  2096. - **input_x** (Tensor) - The target tensor, which must be one of the following types:
  2097. float16, float32.
  2098. Outputs:
  2099. Tensor, has the same shape and type as `input_x`.
  2100. Supported Platforms:
  2101. ``Ascend``
  2102. Examples:
  2103. >>> input_x = Tensor(np.array([-1.6, -0.1, 1.5, 2.0]), mindspore.float32)
  2104. >>> op = ops.Rint()
  2105. >>> output = op(input_x)
  2106. >>> print(output)
  2107. [-2. 0. 2. 2.]
  2108. """
  2109. @prim_attr_register
  2110. def __init__(self):
  2111. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2112. def infer_shape(self, x_shape):
  2113. return x_shape
  2114. def infer_dtype(self, x_dtype):
  2115. validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float16, mstype.float32], self.name)
  2116. return x_dtype
  2117. class Select(PrimitiveWithInfer):
  2118. r"""
  2119. Returns the selected elements, either from input :math:`x` or input :math:`y`, depending on the `condition`.
  2120. Given a tensor as input, this operation inserts a dimension of 1 at the dimension,
  2121. if both :math:`x` and :math:`y` are none, the operation returns the coordinates of the true
  2122. element in the `condition`, the coordinates are returned as a two-dimensional
  2123. tensor, where the first dimension (row) represents the number of true elements
  2124. and the second dimension (columns) represents the coordinates of the true
  2125. elements. Keep in mind that the shape of the output tensor can vary depending
  2126. on how many true values are in the input. Indexes are output in row-first
  2127. order.
  2128. If neither is None, :math:`x` and :math:`y` must have the same shape. If :math:`x` and :math:`y` are
  2129. scalars, the conditional tensor must be a scalar. If :math:`x` and :math:`y` are
  2130. higher-dimensional vectors, the `condition` must be a vector whose size matches the
  2131. first dimension of :math:`x`, or must have the same shape as :math:`y`.
  2132. The conditional tensor acts as an optional compensation (mask), which
  2133. determines whether the corresponding element / row in the output must be
  2134. selected from :math:`x` (if true) or :math:`y` (if false) based on the value of each
  2135. element.
  2136. If condition is a vector, then :math:`x` and :math:`y` are higher-dimensional matrices, then it
  2137. chooses to copy that row (external dimensions) from :math:`x` and :math:`y`. If condition has
  2138. the same shape as :math:`x` and :math:`y`, you can choose to copy these elements from :math:`x`
  2139. and :math:`y`.
  2140. Inputs:
  2141. - **input_cond** (Tensor[bool]) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
  2142. The condition tensor, decides which element is chosen.
  2143. - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
  2144. The first input tensor.
  2145. - **input_y** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
  2146. The second input tensor.
  2147. Outputs:
  2148. Tensor, has the same shape as `input_x`. The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
  2149. Supported Platforms:
  2150. ``Ascend`` ``GPU`` ``CPU``
  2151. Examples:
  2152. >>> select = ops.Select()
  2153. >>> input_cond = Tensor([True, False])
  2154. >>> input_x = Tensor([2,3], mindspore.float32)
  2155. >>> input_y = Tensor([1,2], mindspore.float32)
  2156. >>> output = select(input_cond, input_x, input_y)
  2157. >>> print(output)
  2158. [2. 2.]
  2159. """
  2160. @prim_attr_register
  2161. def __init__(self):
  2162. """init"""
  2163. self.init_prim_io_names(inputs=['condition', 'x', 'y'], outputs=['output'])
  2164. def infer_shape(self, cond_shape, x_shape, y_shape):
  2165. if cond_shape != x_shape or x_shape != y_shape:
  2166. raise ValueError('The x_shape and y_shape must be the same as cond_shape.')
  2167. return x_shape
  2168. def infer_dtype(self, cond_type, x_type, y_type):
  2169. validator.check_subclass("x_type", x_type, mstype.tensor, self.name)
  2170. validator.check_subclass("y_type", y_type, mstype.tensor, self.name)
  2171. validator.check_tensor_dtype_valid("cond", cond_type, [mstype.bool_], self.name)
  2172. if x_type != y_type:
  2173. raise TypeError('\'%s\' the x_type %s must be the same as y_type %s.' % (self.name, x_type, y_type))
  2174. return x_type
  2175. def infer_value(self, cond, x, y):
  2176. if cond is not None and x is not None and y is not None:
  2177. cond = cond.asnumpy()
  2178. x = x.asnumpy()
  2179. y = y.asnumpy()
  2180. out = np.where(cond, x, y)
  2181. return Tensor(out)
  2182. return None
  2183. def _compute_slicing_length(begin, end, stride, x_shape, i):
  2184. """Computes the length of the slicing."""
  2185. if i >= len(x_shape):
  2186. raise ValueError(f"For 'StridedSlice', When their is no new axis, the index length must be less or "
  2187. f"equal than the dim of x.")
  2188. x_dim = x_shape[i]
  2189. if stride > 0:
  2190. # When slicing forward, convert begin and end to positive numbers.
  2191. if begin >= x_dim or end < -x_dim:
  2192. # When slicing forward, if begin >= x_dim or end < -x_dim, the length of the slicing is 0.
  2193. slicing_length = 0
  2194. else:
  2195. if -x_dim <= begin < 0:
  2196. begin += x_dim
  2197. if begin < -x_dim:
  2198. # When slicing forward, if begin < -x_dim, set begin = 0, which means start from the 0th element.
  2199. begin = 0
  2200. if -x_dim <= end < 0:
  2201. end += x_dim
  2202. if end > x_dim:
  2203. # When slicing forward, if end > x_dim, set end = x_dims, which means slice to the last element.
  2204. end = x_dim
  2205. if begin >= end:
  2206. # When slicing forward, if begin >= end, the length of the slicing is 0.
  2207. slicing_length = 0
  2208. else:
  2209. slicing_length = 1 + (end - 1 - begin) // stride
  2210. else:
  2211. # When slicing backward, convert begin and end to negative numbers.
  2212. if begin < -x_dim or end >= x_dim:
  2213. # When slicing backward, if begin < -x_dim or end >= x_dim, the length of the slicing is 0.
  2214. slicing_length = 0
  2215. else:
  2216. if 0 <= begin < x_dim:
  2217. begin += -x_dim
  2218. if begin >= x_dim:
  2219. begin = -1
  2220. if 0 <= end < x_dim:
  2221. end += -x_dim
  2222. if end < -x_dim - 1:
  2223. # When slicing backward, if end < -x_dim - 1, set end = -x_dim - 1, which means
  2224. # slicing to the 0th element.
  2225. end = -x_dim - 1
  2226. if begin <= end:
  2227. # When slicing backward, if begin <= end, the length of the slicing is 0.
  2228. slicing_length = 0
  2229. else:
  2230. slicing_length = 1 + (end + 1 - begin) // stride
  2231. return slicing_length
  2232. class StridedSlice(PrimitiveWithInfer):
  2233. r"""
  2234. Extracts a strided slice of a tensor.
  2235. Given an input tensor, this operation inserts a dimension of length 1 at the dimension.
  2236. This operation extracts a fragment of size (end-begin)/stride from the given 'input_tensor'.
  2237. Starting from the beginning position, the fragment continues adding stride to the index until
  2238. all dimensions are not less than the ending position.
  2239. Note:
  2240. The stride may be negative value, which causes reverse slicing.
  2241. The shape of `begin`, `end` and `strides` must be the same.
  2242. Args:
  2243. begin_mask (int): Starting index of the slice. Default: 0.
  2244. end_mask (int): Ending index of the slice. Default: 0.
  2245. ellipsis_mask (int): An int mask. Default: 0.
  2246. new_axis_mask (int): An int mask. Default: 0.
  2247. shrink_axis_mask (int): An int mask. Default: 0.
  2248. Inputs:
  2249. - **input_x** (Tensor) - The input Tensor.
  2250. - **begin** (tuple[int]) - A tuple which represents the location where to start. Only
  2251. constant value is allowed.
  2252. - **end** (tuple[int]) - A tuple or which represents the maximum location where to end.
  2253. Only constant value is allowed.
  2254. - **strides** (tuple[int]) - A tuple which represents the stride is continuously added
  2255. before reaching the maximum location. Only constant value is allowed.
  2256. Outputs:
  2257. Tensor.
  2258. The output is explained by following example.
  2259. - In the 0th dimension, begin is 1, end is 2, and strides is 1,
  2260. because :math:`1+1=2\geq2`, the interval is :math:`[1,2)`.
  2261. Thus, return the element with :math:`index = 1` in 0th dimension, i.e., [[3, 3, 3], [4, 4, 4]].
  2262. - In the 1st dimension, similarly, the interval is :math:`[0,1)`.
  2263. Based on the return value of the 0th dimension, return the element with :math:`index = 0`,
  2264. i.e., [3, 3, 3].
  2265. - In the 2nd dimension, similarly, the interval is :math:`[0,3)`.
  2266. Based on the return value of the 1st dimension, return the element with :math:`index = 0,1,2`,
  2267. i.e., [3, 3, 3].
  2268. - Finally, the output is [3, 3, 3].
  2269. Supported Platforms:
  2270. ``Ascend`` ``GPU`` ``CPU``
  2271. Examples:
  2272. >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
  2273. ... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
  2274. >>> slice = ops.StridedSlice()
  2275. >>> output = slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
  2276. >>> print(output)
  2277. [[[3. 3. 3.]]]
  2278. """
  2279. @prim_attr_register
  2280. def __init__(self,
  2281. begin_mask=0,
  2282. end_mask=0,
  2283. ellipsis_mask=0,
  2284. new_axis_mask=0,
  2285. shrink_axis_mask=0):
  2286. """Initialize StridedSlice"""
  2287. self.init_prim_io_names(inputs=['x', 'begin', 'end', 'strides'], outputs=['output'])
  2288. validator.check_non_negative_int(begin_mask, 'begin_mask', self.name)
  2289. validator.check_non_negative_int(end_mask, 'end_mask', self.name)
  2290. validator.check_non_negative_int(ellipsis_mask, 'ellipsis_mask', self.name)
  2291. if len(tuple(filter(lambda x: x == '1', bin(ellipsis_mask)[-1:1:-1]))) > 1:
  2292. raise ValueError(f"For '{self.name}', only support one ellipsis in the index, but got {end_mask}.")
  2293. validator.check_non_negative_int(new_axis_mask, 'new_axis_mask', self.name)
  2294. validator.check_non_negative_int(shrink_axis_mask, 'shrink_axis_mask', self.name)
  2295. def __infer__(self, x, begin, end, strides):
  2296. begin_v, end_v, strides_v = begin['value'], end['value'], strides['value']
  2297. validator.check_value_type("begin", begin_v, [tuple], self.name)
  2298. validator.check_value_type("end", end_v, [tuple], self.name)
  2299. validator.check_value_type("strides", strides_v, [tuple], self.name)
  2300. if tuple(filter(lambda x: not isinstance(x, int), begin_v + end_v + strides_v)):
  2301. raise TypeError(f"For {self.name}, both the begins, ends, and strides must be a tuple of int, "
  2302. f"but got begins: {begin_v}, ends: {end_v}, strides: {strides_v}.")
  2303. if tuple(filter(lambda x: x == 0, strides_v)):
  2304. raise ValueError(f"For '{self.name}', the strides cannot contain 0, but got strides: {strides_v}.")
  2305. if len(end_v) != len(begin_v) or len(strides_v) != len(begin_v):
  2306. raise ValueError(f"For '{self.name}' the length of begin index: {begin_v}, end index: {end_v} and "
  2307. f"strides: {strides_v} must be equal.")
  2308. ret_shape = self._compute_slicing_shape(x['shape'], begin_v, end_v, strides_v)
  2309. value = None if all(ret_shape) else Tensor(np.array([]).reshape(ret_shape), x['dtype'].element_type())
  2310. if "max_value" in x and "min_value" in x:
  2311. validator.check_value_type("min_value", x["min_value"], [tuple, list], self.name)
  2312. validator.check_value_type("max_value", x["max_value"], [tuple, list], self.name)
  2313. max_value_np = np.array(x["max_value"])
  2314. min_value_np = np.array(x["min_value"])
  2315. slice_index = []
  2316. for begin_i, end_i, strides_i in zip(begin_v, end_v, strides_v):
  2317. s = slice(begin_i, end_i, strides_i)
  2318. slice_index.append(s)
  2319. slice_index = tuple(slice_index)
  2320. max_value_slice = max_value_np[slice_index]
  2321. min_value_slice = min_value_np[slice_index]
  2322. max_value_slice = tuple(max_value_slice.tolist())
  2323. min_value_slice = tuple(min_value_slice.tolist())
  2324. return {'shape': ret_shape,
  2325. 'dtype': x['dtype'],
  2326. 'value': value,
  2327. 'max_value': max_value_slice,
  2328. 'min_value': min_value_slice}
  2329. return {'shape': ret_shape,
  2330. 'dtype': x['dtype'],
  2331. 'value': value}
  2332. def _compute_slicing_shape(self, x_shape, begin_v, end_v, strides_v):
  2333. """Computes the shape of the slicing."""
  2334. x_rank = len(x_shape)
  2335. slice_len = len(begin_v)
  2336. # After the integer is converted to binary, it is a str and the first two chars are the flag char '0b'.
  2337. begin_pos = bin(self.begin_mask)[-1:1:-1]
  2338. end_pos = bin(self.end_mask)[-1:1:-1]
  2339. ellipsis_pos = bin(self.ellipsis_mask)[-1:1:-1]
  2340. new_axis_pos = bin(self.new_axis_mask)[-1:1:-1]
  2341. shrink_axis_pos = bin(self.shrink_axis_mask)[-1:1:-1]
  2342. ret_shape = []
  2343. i, j = 0, 0
  2344. has_ellipsis = False
  2345. while i < x_rank or j < slice_len:
  2346. if j < slice_len:
  2347. begin, end, stride = begin_v[j], end_v[j], strides_v[j]
  2348. if j < len(ellipsis_pos) and ellipsis_pos[j] == '1':
  2349. # When there is ellipsis, the latter part of the ellipsis will be processed separately.
  2350. has_ellipsis = True
  2351. break
  2352. if j < len(begin_pos) and begin_pos[j] == '1':
  2353. begin = -1 if strides_v[j] < 0 else 0
  2354. if j < len(end_pos) and end_pos[j] == '1':
  2355. end = -(x_shape[i] + 1) if strides_v[j] < 0 else x_shape[i]
  2356. if j < len(new_axis_pos) and new_axis_pos[j] == '1':
  2357. ret_shape.append(1)
  2358. j += 1
  2359. continue
  2360. if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1':
  2361. if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0:
  2362. raise ValueError(f"For {self.name}, when shrink axis, the stride cannot be negative number, "
  2363. f"and begin should be in [-{x_shape[i]}, {x_shape[i]}), "
  2364. f"but got stride: {stride}, begin: {begin}.")
  2365. j += 1
  2366. i += 1
  2367. continue
  2368. else:
  2369. begin, end, stride = 0, x_shape[i], 1
  2370. slicing_length = _compute_slicing_length(begin, end, stride, x_shape, i)
  2371. ret_shape.append(slicing_length)
  2372. i += 1
  2373. j += 1
  2374. if has_ellipsis:
  2375. # When there is ellipsis, handle the second half of the ellipsis split.
  2376. ellipsis_occupied_dims = x_rank - i - (slice_len - (j + 1)) + \
  2377. len(tuple(filter(lambda x: x == '1', new_axis_pos[j + 1:slice_len])))
  2378. ret_shape.extend(x_shape[i:i + ellipsis_occupied_dims])
  2379. j += 1
  2380. i += ellipsis_occupied_dims
  2381. while i < x_rank or j < slice_len:
  2382. begin, end, stride = begin_v[j], end_v[j], strides_v[j]
  2383. if j < len(begin_pos) and begin_pos[j] == '1':
  2384. begin = -1 if strides_v[j] < 0 else 0
  2385. if j < len(end_pos) and end_pos[j] == '1':
  2386. end = -(x_shape[i] + 1) if strides_v[j] < 0 else x_shape[i]
  2387. if j < len(new_axis_pos) and new_axis_pos[j] == '1':
  2388. ret_shape.append(1)
  2389. j += 1
  2390. continue
  2391. if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1':
  2392. if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0:
  2393. raise ValueError(f"For {self.name}, when shrink axis, the stride cannot be negative number, "
  2394. f"and begin should be in [-{x_shape[i]}, {x_shape[i]}), "
  2395. f"but got stride: {stride}, begin: {begin}.")
  2396. j += 1
  2397. i += 1
  2398. continue
  2399. slicing_length = _compute_slicing_length(begin, end, stride, x_shape, i)
  2400. ret_shape.append(slicing_length)
  2401. i += 1
  2402. j += 1
  2403. return ret_shape
  2404. class Diag(PrimitiveWithInfer):
  2405. r"""
  2406. Constructs a diagonal tensor with a given diagonal values.
  2407. Assume `input_x` has dimensions :math:`[D_1,... D_k]`, the output is a tensor of
  2408. rank 2k with dimensions :math:`[D_1,..., D_k, D_1,..., D_k]` where:
  2409. :math:`output[i_1,..., i_k, i_1,..., i_k] = input_x[i_1,..., i_k]` and 0 everywhere else.
  2410. Inputs:
  2411. - **input_x** (Tensor) - The input tensor. The input shape must be less than 5d.
  2412. Outputs:
  2413. Tensor, has the same dtype as the `input_x`.
  2414. Examples:
  2415. >>> input_x = Tensor([1, 2, 3, 4])
  2416. >>> diag = ops.Diag()
  2417. >>> output = diag(input_x)
  2418. >>> print(output)
  2419. [[1, 0, 0, 0],
  2420. [0, 2, 0, 0],
  2421. [0, 0, 3, 0],
  2422. [0, 0, 0, 4]]
  2423. """
  2424. @prim_attr_register
  2425. def __init__(self):
  2426. """Initialize Diag"""
  2427. def infer_dtype(self, x_type):
  2428. validator.check_subclass('input_x', x_type, mstype.tensor, self.name)
  2429. return x_type
  2430. def infer_shape(self, x_shape):
  2431. validator.check("x rank", len(x_shape), "", 1, Rel.GE)
  2432. ret_shape = copy.deepcopy(x_shape)
  2433. ret_shape = ret_shape + ret_shape
  2434. return ret_shape
  2435. def infer_value(self, x):
  2436. if x is None:
  2437. return None
  2438. # do constant-folding only when x rank is 1
  2439. if len(x.shape) != 1:
  2440. return None
  2441. ret = np.diag(x.asnumpy())
  2442. return Tensor(ret)
  2443. class DiagPart(PrimitiveWithInfer):
  2444. r"""
  2445. Extracts the diagonal part from given tensor.
  2446. Assume input has dimensions :math:`[D_1,..., D_k, D_1,..., D_k]`, the output is a tensor
  2447. of rank k with dimensions :math:`[D_1,..., D_k]` where:
  2448. :math:`output[i_1,..., i_k] = input[i_1,..., i_k, i_1,..., i_k]`.
  2449. Inputs:
  2450. - **input_x** (Tensor) - tensor of rank k where k is even and not zero.
  2451. Outputs:
  2452. Tensor, the extracted diagonal has the same dtype as the `input_x`.
  2453. Examples
  2454. >>> input_x = Tensor([[1, 0, 0, 0],
  2455. ... [0, 2, 0, 0],
  2456. ... [0, 0, 3, 0],
  2457. ... [0, 0, 0, 4]])
  2458. >>> diag_part = ops.DiagPart()
  2459. >>> output = diag_part(input_x)
  2460. >>> print(output)
  2461. [1 2 3 4]
  2462. """
  2463. @prim_attr_register
  2464. def __init__(self):
  2465. """Initialize DiagPart"""
  2466. def infer_dtype(self, x_type):
  2467. validator.check_subclass('input_x', x_type, mstype.tensor, self.name)
  2468. return x_type
  2469. def infer_shape(self, x_shape):
  2470. if len(x_shape) % 2 != 0 or \
  2471. not x_shape:
  2472. raise ValueError(f"For \'{self.name}\' input rank must be non-zero and even, but got rank {len(x_shape)}, "
  2473. f"with shapes {x_shape}")
  2474. length = len(x_shape) // 2
  2475. for i in range(length):
  2476. validator.check('input_shape[i + len(input_shape)/2]', x_shape[i + length],
  2477. 'input_shape[i]', x_shape[i], Rel.EQ, self.name)
  2478. ret_shape = x_shape[0:length]
  2479. return ret_shape
  2480. def infer_value(self, x):
  2481. if x is None:
  2482. return None
  2483. # do constant-folding only when x rank is 2
  2484. if len(x.shape) != 2:
  2485. return None
  2486. ret = np.diag(x.asnumpy())
  2487. return Tensor(ret)
  2488. class Eye(PrimitiveWithInfer):
  2489. """
  2490. Creates a tensor with ones on the diagonal and zeros the rest.
  2491. Inputs:
  2492. - **n** (int) - The number of rows of returned tensor
  2493. - **m** (int) - The number of columns of returned tensor
  2494. - **t** (mindspore.dtype) - MindSpore's dtype, The data type of the returned tensor.
  2495. Outputs:
  2496. Tensor, a tensor with ones on the diagonal and the rest of elements are zero.
  2497. Supported Platforms:
  2498. ``Ascend`` ``GPU`` ``CPU``
  2499. Examples:
  2500. >>> eye = ops.Eye()
  2501. >>> output = eye(2, 2, mindspore.int32)
  2502. >>> print(output)
  2503. [[1 0]
  2504. [0 1]]
  2505. """
  2506. @prim_attr_register
  2507. def __init__(self):
  2508. """Initialize Eye"""
  2509. def infer_value(self, n, m, t):
  2510. validator.check_positive_int(n, "n", self.name)
  2511. validator.check_positive_int(m, "m", self.name)
  2512. args = {"dtype": t}
  2513. validator.check_types_same_and_valid(args, mstype.number_type + (mstype.bool_,), self.name)
  2514. np_type = mstype.dtype_to_nptype(t)
  2515. ret = np.eye(n, m, dtype=np_type)
  2516. return Tensor(ret)
  2517. class ScatterNd(PrimitiveWithInfer):
  2518. r"""
  2519. Scatters a tensor into a new tensor depending on the specified indices.
  2520. Creates an empty tensor with the given `shape`, and set values by scattering the update tensor depending on indices.
  2521. The empty tensor has rank P and `indices` has rank Q where `Q >= 2`.
  2522. `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.
  2523. The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of the empty tensor.
  2524. `updates` is a tensor of rank `Q-1+P-N`. Its shape is: :math:`(i_0, i_1, ..., i_{Q-2}, shape_N, ..., shape_{P-1})`.
  2525. Inputs:
  2526. - **indices** (Tensor) - The index of scattering in the new tensor with int32 data type.
  2527. The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`.
  2528. - **updates** (Tensor) - The source Tensor to be scattered.
  2529. It has shape `indices_shape[:-1] + shape[indices_shape[-1]:]`.
  2530. - **shape** (tuple[int]) - Define the shape of the output tensor, has the same type as indices.
  2531. Outputs:
  2532. Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.
  2533. Supported Platforms:
  2534. ``Ascend`` ``GPU``
  2535. Examples:
  2536. >>> op = ops.ScatterNd()
  2537. >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
  2538. >>> updates = Tensor(np.array([3.2, 1.1]), mindspore.float32)
  2539. >>> shape = (3, 3)
  2540. >>> output = op(indices, updates, shape)
  2541. >>> print(output)
  2542. [[0. 3.2 0. ]
  2543. [0. 1.1 0. ]
  2544. [0. 0. 0. ]]
  2545. """
  2546. @prim_attr_register
  2547. def __init__(self):
  2548. """Initialize ScatterNd"""
  2549. self.init_prim_io_names(inputs=['indices', 'update', 'shape'], outputs=['output'])
  2550. def __infer__(self, indices, update, shape):
  2551. shp = shape['value']
  2552. validator.check_subclass("update_dtype", update['dtype'], mstype.tensor, self.name)
  2553. validator.check_tensor_dtype_valid("indices", indices['dtype'], [mstype.int32, mstype.int64], self.name)
  2554. validator.check_value_type("shape", shp, [tuple], self.name)
  2555. for i, x in enumerate(shp):
  2556. validator.check_positive_int(x, f'shape[{i}]', self.name)
  2557. indices_shape, update_shape = indices["shape"], update["shape"]
  2558. if indices_shape[0] != update_shape[0]:
  2559. raise ValueError(f'For \'{self.name}\' The indices_shape[0] and update_shape[0] must be equal.')
  2560. return {'shape': shp,
  2561. 'dtype': update['dtype'],
  2562. 'value': None}
  2563. class ResizeNearestNeighbor(PrimitiveWithInfer):
  2564. r"""
  2565. Resizes the input tensor by using the nearest neighbor algorithm.
  2566. Resizes the input tensor to a given size by using the nearest neighbor algorithm. The nearest
  2567. neighbor algorithm selects the value of the nearest point and does not consider the
  2568. values of neighboring points at all, yielding a piecewise-constant interpolant.
  2569. Args:
  2570. size (Union[tuple, list]): The target size. The dimension of size must be 2.
  2571. align_corners (bool): Whether the centers of the 4 corner pixels of the input
  2572. and output tensors are aligned. Default: False.
  2573. Inputs:
  2574. - **input_x** (Tensor) - The input tensor. The shape of the tensor is :math:`(N, C, H, W)`.
  2575. Outputs:
  2576. Tensor, the shape of the output tensor is :math:`(N, C, NEW\_H, NEW\_W)`.
  2577. Supported Platforms:
  2578. ``Ascend`` ``GPU``
  2579. Examples:
  2580. >>> input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32)
  2581. >>> resize = ops.ResizeNearestNeighbor((2, 2))
  2582. >>> output = resize(input_tensor)
  2583. >>> print(output)
  2584. [[[[-0.1 0.3]
  2585. [ 0.4 0.5]]]]
  2586. """
  2587. @prim_attr_register
  2588. def __init__(self, size, align_corners=False):
  2589. """Initialize ResizeNearestNeighbor"""
  2590. validator.check_value_type("size", size, [tuple, list], self.name)
  2591. validator.check_value_type("align_corners", align_corners, [bool], self.name)
  2592. validator.check_equal_int(len(size), 2, "length of size", self.name)
  2593. for i, value in enumerate(size):
  2594. validator.check_non_negative_int(value, f'{i}th value of size', self.name)
  2595. self.init_prim_io_names(inputs=['image_in'], outputs=['image_out'])
  2596. def infer_shape(self, x_shape):
  2597. validator.check('the dimension of input_x', len(x_shape), '', 4, Rel.EQ, self.name)
  2598. return tuple(x_shape)[:-2] + tuple(self.size)
  2599. def infer_dtype(self, x_dtype):
  2600. validator.check_tensor_dtype_valid("x", x_dtype, mstype.number_type, self.name)
  2601. return x_dtype
  2602. class GatherNd(PrimitiveWithInfer):
  2603. """
  2604. Gathers slices from a tensor by indices.
  2605. Using given indices to gather slices from a tensor with a specified shape.
  2606. Inputs:
  2607. - **input_x** (Tensor) - The target tensor to gather values.
  2608. - **indices** (Tensor) - The index tensor, with int data type.
  2609. Outputs:
  2610. Tensor, has the same type as `input_x` and the shape is indices_shape[:-1] + x_shape[indices_shape[-1]:].
  2611. Supported Platforms:
  2612. ``Ascend`` ``GPU`` ``CPU``
  2613. Examples:
  2614. >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
  2615. >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
  2616. >>> op = ops.GatherNd()
  2617. >>> output = op(input_x, indices)
  2618. >>> print(output)
  2619. [-0.1 0.5]
  2620. """
  2621. @prim_attr_register
  2622. def __init__(self):
  2623. """Initialize GatherNd"""
  2624. self.init_prim_io_names(inputs=['input_x', 'indices'], outputs=['y'])
  2625. def infer_shape(self, x_shape, indices_shape):
  2626. validator.check('the dimension of x', len(x_shape),
  2627. 'the dimension of indices', indices_shape[-1], Rel.GE, self.name)
  2628. return indices_shape[:-1] + x_shape[indices_shape[-1]:]
  2629. def infer_dtype(self, x_dtype, indices_dtype):
  2630. validator.check_tensor_dtype_valid("indices", indices_dtype, mstype.int_type, self.name)
  2631. return x_dtype
  2632. class TensorScatterUpdate(PrimitiveWithInfer):
  2633. """
  2634. Updates tensor values using given values, along with the input indices.
  2635. Inputs:
  2636. - **input_x** (Tensor) - The target tensor. The dimension of input_x must be equal to indices.shape[-1].
  2637. - **indices** (Tensor) - The index of input tensor whose data type is int32.
  2638. - **update** (Tensor) - The tensor to update the input tensor, has the same type as input,
  2639. and update.shape = indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
  2640. Outputs:
  2641. Tensor, has the same shape and type as `input_x`.
  2642. Supported Platforms:
  2643. ``Ascend``
  2644. Examples:
  2645. >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
  2646. >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
  2647. >>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32)
  2648. >>> op = ops.TensorScatterUpdate()
  2649. >>> output = op(input_x, indices, update)
  2650. >>> print(output)
  2651. [[ 1. 0.3 3.6]
  2652. [ 0.4 2.2 -3.2]]
  2653. """
  2654. @prim_attr_register
  2655. def __init__(self):
  2656. """Initialize TensorScatterUpdate"""
  2657. self.init_prim_io_names(inputs=['x', 'indices', 'value'], outputs=['y'])
  2658. def infer_shape(self, x_shape, indices_shape, value_shape):
  2659. validator.check('the dimension of x', len(x_shape),
  2660. 'the dimension of indices', indices_shape[-1], Rel.GE)
  2661. if indices_shape[:-1] + x_shape[indices_shape[-1]:] != value_shape:
  2662. raise ValueError("For 'TensorScatterUpdate', input value are not match with input indices.")
  2663. return x_shape
  2664. def infer_dtype(self, x_dtype, indices_dtype, value_dtype):
  2665. validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32, mstype.int64], self.name)
  2666. args = {"x": x_dtype, "value": value_dtype}
  2667. validator.check_tensors_dtypes_same_and_valid(args, (mstype.bool_,) + mstype.number_type, self.name)
  2668. return x_dtype
  2669. class ScatterUpdate(_ScatterOp_Dynamic):
  2670. r"""
  2671. Updates tensor values by using input indices and value.
  2672. Using given values to update tensor value, along with the input indices.
  2673. for each `i, ..., j` in `indices.shape`:
  2674. .. math::
  2675. \text{input_x}[\text{indices}[i, ..., j], :] = \text{updates}[i, ..., j, :]
  2676. Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
  2677. If they have different data types, lower priority data type will be converted to
  2678. relatively highest priority data type.
  2679. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2680. Args:
  2681. use_locking (bool): Whether protect the assignment by a lock. Default: True.
  2682. Inputs:
  2683. - **input_x** (Parameter) - The target tensor, with data type of Parameter.
  2684. - **indices** (Tensor) - The index of input tensor. With int32 data type.
  2685. If there are duplicates in indices, the order for updating is undefined.
  2686. - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
  2687. and updates.shape = indices.shape + input_x.shape[1:].
  2688. Outputs:
  2689. Tensor, has the same shape and type as `input_x`.
  2690. Supported Platforms:
  2691. ``Ascend`` ``GPU``
  2692. Examples:
  2693. >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
  2694. >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name="x")
  2695. >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
  2696. >>> np_updates = np.array([[2.0, 1.2, 1.0], [3.0, 1.2, 1.0]])
  2697. >>> updates = Tensor(np_updates, mindspore.float32)
  2698. >>> op = ops.ScatterUpdate()
  2699. >>> output = op(input_x, indices, updates)
  2700. >>> print(output)
  2701. [[2. 1.2 1. ]
  2702. [3. 1.2 1. ]]
  2703. """
  2704. @prim_attr_register
  2705. def __init__(self, use_locking=True):
  2706. """Initialize ScatterUpdate"""
  2707. validator.check_value_type('use_locking', use_locking, [bool], self.name)
  2708. self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])
  2709. self.add_prim_attr('side_effect_mem', True)
  2710. class ScatterNdUpdate(_ScatterNdOp):
  2711. r"""
  2712. Updates tensor values by using input indices and value.
  2713. Using given values to update tensor value, along with the input indices.
  2714. `input_x` has rank P and `indices` has rank Q where `Q >= 2`.
  2715. `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.
  2716. The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.
  2717. `updates` is a tensor of rank `Q-1+P-N`. Its shape is:
  2718. :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.
  2719. Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
  2720. If they have different data types, lower priority data type will be converted to
  2721. relatively highest priority data type.
  2722. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2723. Args:
  2724. use_locking (bool): Whether protect the assignment by a lock. Default: True.
  2725. Inputs:
  2726. - **input_x** (Parameter) - The target tensor, with data type of Parameter.
  2727. - **indices** (Tensor) - The index of input tensor, with int32 data type.
  2728. The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`.
  2729. - **updates** (Tensor) - The tensor to be updated to the input tensor, has the same type as input.
  2730. the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.
  2731. Outputs:
  2732. Tensor, has the same shape and type as `input_x`.
  2733. Supported Platforms:
  2734. ``Ascend`` ``CPU``
  2735. Examples:
  2736. >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
  2737. >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name="x")
  2738. >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
  2739. >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
  2740. >>> op = ops.ScatterNdUpdate()
  2741. >>> output = op(input_x, indices, updates)
  2742. >>> print(output)
  2743. [[ 1. 0.3 3.6]
  2744. [ 0.4 2.2 -3.2]]
  2745. """
  2746. @prim_attr_register
  2747. def __init__(self, use_locking=True):
  2748. """Initialize ScatterNdUpdate"""
  2749. validator.check_value_type('use_locking', use_locking, [bool], self.name)
  2750. self.init_prim_io_names(inputs=['x', 'indices', 'value'], outputs=['y'])
  2751. self.add_prim_attr('side_effect_mem', True)
  2752. def infer_dtype(self, x_dtype, indices_dtype, value_dtype):
  2753. validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32], self.name)
  2754. args = {"x": x_dtype, "value": value_dtype}
  2755. validator.check_tensors_dtypes_same_and_valid(args, (mstype.bool_,) + mstype.number_type, self.name)
  2756. return x_dtype
  2757. class ScatterMax(_ScatterOp):
  2758. r"""
  2759. Updates the value of the input tensor through the maximum operation.
  2760. Using given values to update tensor value through the max operation, along with the input indices.
  2761. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
  2762. for each `i, ..., j` in `indices.shape`:
  2763. .. math::
  2764. \text{input_x}[\text{indices}[i, ..., j], :]
  2765. = max(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
  2766. Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
  2767. If they have different data types, lower priority data type will be converted to
  2768. relatively highest priority data type.
  2769. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2770. Args:
  2771. use_locking (bool): Whether protect the assignment by a lock. Default: True.
  2772. Inputs:
  2773. - **input_x** (Parameter) - The target parameter.
  2774. - **indices** (Tensor) - The index to do max operation whose data type must be mindspore.int32.
  2775. - **updates** (Tensor) - The tensor that performs the maximum operation with `input_x`,
  2776. the data type is the same as `input_x`, the shape is `indices_shape + x_shape[1:]`.
  2777. Outputs:
  2778. Parameter, the updated `input_x`.
  2779. Supported Platforms:
  2780. ``Ascend``
  2781. Examples:
  2782. >>> input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32), name="input_x")
  2783. >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
  2784. >>> updates = Tensor(np.ones([2, 2, 3]) * 88, mindspore.float32)
  2785. >>> scatter_max = ops.ScatterMax()
  2786. >>> output = scatter_max(input_x, indices, updates)
  2787. >>> print(output)
  2788. [[88. 88. 88.]
  2789. [88. 88. 88.]]
  2790. """
  2791. class ScatterMin(_ScatterOp):
  2792. r"""
  2793. Updates the value of the input tensor through the minimum operation.
  2794. Using given values to update tensor value through the min operation, along with the input indices.
  2795. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
  2796. for each `i, ..., j` in `indices.shape`:
  2797. .. math::
  2798. \text{input_x}[\text{indices}[i, ..., j], :]
  2799. = min(\text{input_x}[\text{indices}[i, ..., j], :], \text{updates}[i, ..., j, :])
  2800. Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
  2801. If they have different data types, lower priority data type will be converted to
  2802. relatively highest priority data type.
  2803. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2804. Args:
  2805. use_locking (bool): Whether protect the assignment by a lock. Default: False.
  2806. Inputs:
  2807. - **input_x** (Parameter) - The target parameter.
  2808. - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.
  2809. - **updates** (Tensor) - The tensor doing the min operation with `input_x`,
  2810. the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.
  2811. Outputs:
  2812. Parameter, the updated `input_x`.
  2813. Supported Platforms:
  2814. ``Ascend``
  2815. Examples:
  2816. >>> input_x = Parameter(Tensor(np.array([[0.0, 1.0, 2.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="input_x")
  2817. >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
  2818. >>> update = Tensor(np.ones([2, 2, 3]), mindspore.float32)
  2819. >>> scatter_min = ops.ScatterMin()
  2820. >>> output = scatter_min(input_x, indices, update)
  2821. >>> print(output)
  2822. [[0. 1. 1.]
  2823. [0. 0. 0.]]
  2824. """
  2825. class ScatterAdd(_ScatterOp_Dynamic):
  2826. r"""
  2827. Updates the value of the input tensor through the addition operation.
  2828. Using given values to update tensor value through the add operation, along with the input indices.
  2829. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
  2830. for each `i, ..., j` in `indices.shape`:
  2831. .. math::
  2832. \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{+}= \text{updates}[i, ..., j, :]
  2833. Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
  2834. If they have different data types, lower priority data type will be converted to
  2835. relatively highest priority data type.
  2836. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2837. Args:
  2838. use_locking (bool): Whether protect the assignment by a lock. Default: False.
  2839. Inputs:
  2840. - **input_x** (Parameter) - The target parameter.
  2841. - **indices** (Tensor) - The index to do add operation whose data type must be mindspore.int32.
  2842. - **updates** (Tensor) - The tensor that performs the add operation with `input_x`,
  2843. the data type is the same as `input_x`, the shape is `indices_shape + x_shape[1:]`.
  2844. Outputs:
  2845. Parameter, the updated `input_x`.
  2846. Supported Platforms:
  2847. ``Ascend`` ``GPU``
  2848. Examples:
  2849. >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name="x")
  2850. >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
  2851. >>> updates = Tensor(np.ones([2, 2, 3]), mindspore.float32)
  2852. >>> scatter_add = ops.ScatterAdd()
  2853. >>> output = scatter_add(input_x, indices, updates)
  2854. >>> print(output)
  2855. [[1. 1. 1.]
  2856. [3. 3. 3.]]
  2857. """
  2858. @prim_attr_register
  2859. def __init__(self, use_locking=False):
  2860. """Initialize ScatterAdd"""
  2861. validator.check_value_type('use_locking', use_locking, [bool], self.name)
  2862. self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])
  2863. self.add_prim_attr('side_effect_mem', True)
  2864. class ScatterSub(_ScatterOp):
  2865. r"""
  2866. Updates the value of the input tensor through the subtraction operation.
  2867. Using given values to update tensor value through the subtraction operation, along with the input indices.
  2868. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
  2869. for each `i, ..., j` in `indices.shape`:
  2870. .. math::
  2871. \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{-}= \text{updates}[i, ..., j, :]
  2872. Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
  2873. If they have different data types, lower priority data type will be converted to
  2874. relatively highest priority data type.
  2875. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2876. Args:
  2877. use_locking (bool): Whether protect the assignment by a lock. Default: False.
  2878. Inputs:
  2879. - **input_x** (Parameter) - The target parameter.
  2880. - **indices** (Tensor) - The index to perform the subtraction operation
  2881. whose data type must be mindspore.int32.
  2882. - **updates** (Tensor) - The tensor that performs the subtraction operation with `input_x`,
  2883. the data type is the same as `input_x`, the shape is `indices_shape + x_shape[1:]`.
  2884. Outputs:
  2885. Parameter, the updated `input_x`.
  2886. Supported Platforms:
  2887. ``Ascend``
  2888. Examples:
  2889. >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]), mindspore.float32), name="x")
  2890. >>> indices = Tensor(np.array([[0, 1]]), mindspore.int32)
  2891. >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]]), mindspore.float32)
  2892. >>> scatter_sub = ops.ScatterSub()
  2893. >>> output = scatter_sub(input_x, indices, updates)
  2894. >>> print(output)
  2895. [[-1. -1. -1.]
  2896. [-1. -1. -1.]]
  2897. """
  2898. class ScatterMul(_ScatterOp):
  2899. r"""
  2900. Updates the value of the input tensor through the multiply operation.
  2901. Using given values to update tensor value through the mul operation, along with the input indices.
  2902. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
  2903. for each `i, ..., j` in `indices.shape`:
  2904. .. math::
  2905. \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{*}= \text{updates}[i, ..., j, :]
  2906. Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
  2907. If they have different data types, lower priority data type will be converted to
  2908. relatively highest priority data type.
  2909. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2910. Args:
  2911. use_locking (bool): Whether protect the assignment by a lock. Default: False.
  2912. Inputs:
  2913. - **input_x** (Parameter) - The target parameter.
  2914. - **indices** (Tensor) - The index to do mul operation whose data type must be mindspore.int32.
  2915. - **updates** (Tensor) - The tensor doing the mul operation with `input_x`,
  2916. the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.
  2917. Outputs:
  2918. Parameter, the updated `input_x`.
  2919. Supported Platforms:
  2920. ``Ascend``
  2921. Examples:
  2922. >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x")
  2923. >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
  2924. >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)
  2925. >>> scatter_mul = ops.ScatterMul()
  2926. >>> output = scatter_mul(input_x, indices, updates)
  2927. >>> print(output)
  2928. [[2. 2. 2.]
  2929. [4. 4. 4.]]
  2930. """
  2931. class ScatterDiv(_ScatterOp):
  2932. r"""
  2933. Updates the value of the input tensor through the divide operation.
  2934. Using given values to update tensor value through the div operation, along with the input indices.
  2935. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
  2936. for each `i, ..., j` in `indices.shape`:
  2937. .. math::
  2938. \text{input_x}[\text{indices}[i, ..., j], :] \mathrel{/}= \text{updates}[i, ..., j, :]
  2939. Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
  2940. If they have different data types, lower priority data type will be converted to
  2941. relatively highest priority data type.
  2942. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2943. Args:
  2944. use_locking (bool): Whether protect the assignment by a lock. Default: False.
  2945. Inputs:
  2946. - **input_x** (Parameter) - The target parameter.
  2947. - **indices** (Tensor) - The index to do div operation whose data type must be mindspore.int32.
  2948. - **updates** (Tensor) - The tensor that performs the div operation with `input_x`,
  2949. the data type is the same as `input_x`, the shape is `indices_shape + x_shape[1:]`.
  2950. Outputs:
  2951. Parameter, the updated `input_x`.
  2952. Supported Platforms:
  2953. ``Ascend``
  2954. Examples:
  2955. >>> input_x = Parameter(Tensor(np.array([[6.0, 6.0, 6.0], [2.0, 2.0, 2.0]]), mindspore.float32), name="x")
  2956. >>> indices = Tensor(np.array([0, 1]), mindspore.int32)
  2957. >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)
  2958. >>> scatter_div = ops.ScatterDiv()
  2959. >>> output = scatter_div(input_x, indices, updates)
  2960. >>> print(output)
  2961. [[3. 3. 3.]
  2962. [1. 1. 1.]]
  2963. """
  2964. class ScatterNdAdd(_ScatterNdOp):
  2965. r"""
  2966. Applies sparse addition to individual values or slices in a tensor.
  2967. Using given values to update tensor value through the add operation, along with the input indices.
  2968. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
  2969. `input_x` has rank P and `indices` has rank Q where `Q >= 2`.
  2970. `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.
  2971. The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.
  2972. `updates` is a tensor of rank `Q-1+P-N`. Its shape is:
  2973. :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.
  2974. Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
  2975. If they have different data types, lower priority data type will be converted to
  2976. relatively highest priority data type.
  2977. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2978. Args:
  2979. use_locking (bool): Whether protect the assignment by a lock. Default: False.
  2980. Inputs:
  2981. - **input_x** (Parameter) - The target parameter.
  2982. - **indices** (Tensor) - The index to do add operation whose data type must be mindspore.int32.
  2983. The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`.
  2984. - **updates** (Tensor) - The tensor doing the add operation with `input_x`,
  2985. the data type is same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.
  2986. Outputs:
  2987. Parameter, the updated `input_x`.
  2988. Supported Platforms:
  2989. ``Ascend``
  2990. Examples:
  2991. >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
  2992. >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
  2993. >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
  2994. >>> scatter_nd_add = ops.ScatterNdAdd()
  2995. >>> output = scatter_nd_add(input_x, indices, updates)
  2996. >>> print(output)
  2997. [ 1. 10. 9. 4. 12. 6. 7. 17.]
  2998. """
  2999. class ScatterNdSub(_ScatterNdOp):
  3000. r"""
  3001. Applies sparse subtraction to individual values or slices in a tensor.
  3002. Using given values to update tensor value through the subtraction operation, along with the input indices.
  3003. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
  3004. `input_x` has rank P and `indices` has rank Q where `Q >= 2`.
  3005. `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.
  3006. The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.
  3007. `updates` is a tensor of rank `Q-1+P-N`. Its shape is:
  3008. :math:`(i_0, i_1, ..., i_{Q-2}, x\_shape_N, ..., x\_shape_{P-1})`.
  3009. Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
  3010. If they have different data types, lower priority data type will be converted to
  3011. relatively highest priority data type.
  3012. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  3013. Args:
  3014. use_locking (bool): Whether protect the assignment by a lock. Default: False.
  3015. Inputs:
  3016. - **input_x** (Parameter) - The target parameter.
  3017. - **indices** (Tensor) - The index to do add operation whose data type must be mindspore.int32.
  3018. The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`.
  3019. - **updates** (Tensor) - The tensor that performs the subtraction operation with `input_x`,
  3020. the data type is the same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.
  3021. Outputs:
  3022. Parameter, the updated `input_x`.
  3023. Supported Platforms:
  3024. ``Ascend``
  3025. Examples:
  3026. >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
  3027. >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
  3028. >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
  3029. >>> scatter_nd_sub = ops.ScatterNdSub()
  3030. >>> output = scatter_nd_sub(input_x, indices, updates)
  3031. >>> print(output)
  3032. [ 1. -6. -3. 4. -2. 6. 7. -1.]
  3033. """
  3034. class ScatterNonAliasingAdd(_ScatterNdOp):
  3035. """
  3036. Applies sparse addition to the input using individual values or slices.
  3037. Using given values to update tensor value through the add operation, along with the input indices.
  3038. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.
  3039. Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.
  3040. If they have different data types, lower priority data type will be converted to
  3041. relatively highest priority data type.
  3042. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  3043. Inputs:
  3044. - **input_x** (Parameter) - The target parameter. The data type must be float16, float32 or int32.
  3045. - **indices** (Tensor) - The index to perform the addition operation whose data type must be mindspore.int32.
  3046. - **updates** (Tensor) - The tensor that performs the addition operation with `input_x`,
  3047. the data type is the same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.
  3048. Outputs:
  3049. Parameter, the updated `input_x`.
  3050. Supported Platforms:
  3051. ``Ascend``
  3052. Examples:
  3053. >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
  3054. >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
  3055. >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
  3056. >>> scatter_non_aliasing_add = ops.ScatterNonAliasingAdd()
  3057. >>> output = scatter_non_aliasing_add(input_x, indices, updates)
  3058. >>> print(output)
  3059. [ 1. 10. 9. 4. 12. 6. 7. 17.]
  3060. """
  3061. @prim_attr_register
  3062. def __init__(self):
  3063. """Initialize ScatterNonAliasingAdd"""
  3064. self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])
  3065. self.add_prim_attr('side_effect_mem', True)
  3066. def infer_dtype(self, x_dtype, indices_dtype, updates_dtype):
  3067. validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32], self.name)
  3068. args = {"x": x_dtype, "updates": updates_dtype}
  3069. validator.check_tensors_dtypes_same_and_valid(args, [mstype.float16, mstype.float32, mstype.int32], self.name)
  3070. return x_dtype
  3071. class SpaceToDepth(PrimitiveWithInfer):
  3072. r"""
  3073. Rearranges blocks of spatial data into depth.
  3074. The output tensor's `height` dimension is :math:`height / block\_size`.
  3075. The output tensor's `weight` dimension is :math:`weight / block\_size`.
  3076. The depth of output tensor is :math:`block\_size * block\_size * input\_depth`.
  3077. The input tensor's height and width must be divisible by `block_size`.
  3078. The data format is "NCHW".
  3079. Args:
  3080. block_size (int): The block size used to divide spatial data. It must be >= 2.
  3081. Inputs:
  3082. - **x** (Tensor) - The target tensor.
  3083. Outputs:
  3084. Tensor, the same data type as `x`. It must be a 4-D tensor.
  3085. Supported Platforms:
  3086. ``Ascend``
  3087. Examples:
  3088. >>> x = Tensor(np.random.rand(1,3,2,2), mindspore.float32)
  3089. >>> block_size = 2
  3090. >>> space_to_depth = ops.SpaceToDepth(block_size)
  3091. >>> output = space_to_depth(x)
  3092. >>> print(output.shape)
  3093. (1, 12, 1, 1)
  3094. """
  3095. @prim_attr_register
  3096. def __init__(self, block_size):
  3097. """Initialize SpaceToDepth"""
  3098. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  3099. validator.check_value_type('block_size', block_size, [int], self.name)
  3100. validator.check('block_size', block_size, '', 2, Rel.GE)
  3101. self.block_size = block_size
  3102. self.add_prim_attr("data_format", "NCHW")
  3103. def infer_shape(self, x_shape):
  3104. validator.check('x dimension', len(x_shape), '', 4, Rel.EQ)
  3105. out_shape = copy.deepcopy(x_shape)
  3106. for i in range(2):
  3107. if out_shape[i + 2] % self.block_size != 0:
  3108. raise ValueError(f'For \'{self.name}\' input shape[{i + 2}] {out_shape[i + 2]} should be '
  3109. f'fully divided by block_size {self.block_size}')
  3110. out_shape[i + 2] //= self.block_size
  3111. out_shape[1] *= self.block_size * self.block_size
  3112. return out_shape
  3113. def infer_dtype(self, x_dtype):
  3114. validator.check_subclass("x_dtype", x_dtype, mstype.tensor, self.name)
  3115. return x_dtype
  3116. class DepthToSpace(PrimitiveWithInfer):
  3117. r"""
  3118. Rearranges blocks of depth data into spatial dimensions.
  3119. This is the reverse operation of SpaceToDepth.
  3120. The depth of output tensor is :math:`input\_depth / (block\_size * block\_size)`.
  3121. The output tensor's `height` dimension is :math:`height * block\_size`.
  3122. The output tensor's `weight` dimension is :math:`weight * block\_size`.
  3123. The input tensor's depth must be divisible by `block_size * block_size`.
  3124. The data format is "NCHW".
  3125. Args:
  3126. block_size (int): The block size used to divide depth data. It must be >= 2.
  3127. Inputs:
  3128. - **x** (Tensor) - The target tensor. It must be a 4-D tensor with shape :math:`(N, C_{in}, H_{in}, W_{in})`.
  3129. Outputs:
  3130. Tensor of shape :math:`(N, C_{in} / \text{block_size}, H_{in} * \text{block_size}, W_{in} * \text{block_size})`.
  3131. Supported Platforms:
  3132. ``Ascend``
  3133. Examples:
  3134. >>> x = Tensor(np.random.rand(1, 12, 1, 1), mindspore.float32)
  3135. >>> block_size = 2
  3136. >>> depth_to_space = ops.DepthToSpace(block_size)
  3137. >>> output = depth_to_space(x)
  3138. >>> print(output.shape)
  3139. (1, 3, 2, 2)
  3140. """
  3141. @prim_attr_register
  3142. def __init__(self, block_size):
  3143. """Initialize DepthToSpace"""
  3144. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  3145. validator.check_value_type('block_size', block_size, [int], self.name)
  3146. validator.check('block_size', block_size, '', 2, Rel.GE, self.name)
  3147. self.block_size = block_size
  3148. self.add_prim_attr("data_format", "NCHW")
  3149. def infer_shape(self, x_shape):
  3150. validator.check('x dimension', len(x_shape), '', 4, Rel.EQ)
  3151. out_shape = copy.deepcopy(x_shape)
  3152. for i in range(2):
  3153. out_shape[i + 2] *= self.block_size
  3154. validator.check_int(x_shape[1] % (self.block_size * self.block_size),
  3155. 0, Rel.EQ, 'x_shape[1] % (block_size*block_size)', self.name)
  3156. out_shape[1] //= self.block_size * self.block_size
  3157. return out_shape
  3158. def infer_dtype(self, x_dtype):
  3159. validator.check_subclass("x_dtype", x_dtype, mstype.tensor, self.name)
  3160. return x_dtype
  3161. class SpaceToBatch(PrimitiveWithInfer):
  3162. r"""
  3163. Divides spatial dimensions into blocks and combines the block size with the original batch.
  3164. This operation will divide spatial dimensions (H, W) into blocks with `block_size`, the output tensor's H and W
  3165. dimension is the corresponding number of blocks after division. The output tensor's batch dimension is the
  3166. product of the original batch and the square of block_size. Before division, the spatial dimensions
  3167. of the input are zero padded according to paddings if necessary.
  3168. Args:
  3169. block_size (int): The block size of dividing blocks with value greater than 2.
  3170. paddings (Union[tuple, list]): The padding values for H and W dimension, containing 2 subtraction lists.
  3171. Each subtraction list contains 2 integer value. All values must be greater than 0.
  3172. paddings[i] specifies the paddings for the spatial dimension i, which corresponds to the
  3173. input dimension i+2. It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1]
  3174. is divisible by block_size.
  3175. Inputs:
  3176. - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor.
  3177. Outputs:
  3178. Tensor, the output tensor with the same data type as input. Assume input shape is :math:`(n, c, h, w)` with
  3179. :math:`block\_size` and :math:`paddings`. The shape of the output tensor will be :math:`(n', c', h', w')`,
  3180. where
  3181. :math:`n' = n*(block\_size*block\_size)`
  3182. :math:`c' = c`
  3183. :math:`h' = (h+paddings[0][0]+paddings[0][1])//block\_size`
  3184. :math:`w' = (w+paddings[1][0]+paddings[1][1])//block\_size`
  3185. Supported Platforms:
  3186. ``Ascend``
  3187. Examples:
  3188. >>> block_size = 2
  3189. >>> paddings = [[0, 0], [0, 0]]
  3190. >>> space_to_batch = ops.SpaceToBatch(block_size, paddings)
  3191. >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)
  3192. >>> output = space_to_batch(input_x)
  3193. >>> print(output)
  3194. [[[[1.]]]
  3195. [[[2.]]]
  3196. [[[3.]]]
  3197. [[[4.]]]]
  3198. """
  3199. @prim_attr_register
  3200. def __init__(self, block_size, paddings):
  3201. """Initialize SpaceToBatch"""
  3202. logger.warning("WARN_DEPRECATED: The usage of SpaceToBatch is deprecated."
  3203. " Please use SpaceToBatchND.")
  3204. validator.check_value_type('block_size', block_size, [int], self.name)
  3205. validator.check('block_size', block_size, '', 2, Rel.GE, self.name)
  3206. self.block_size = block_size
  3207. validator.check('paddings shape', np.array(paddings).shape, '', (2, 2), Rel.EQ, self.name)
  3208. for elem in itertools.chain(*paddings):
  3209. validator.check_non_negative_int(elem, 'paddings element', self.name)
  3210. validator.check_value_type('paddings element', elem, [int], self.name)
  3211. self.paddings = paddings
  3212. def infer_dtype(self, x_dtype):
  3213. validator.check_tensor_dtype_valid('input_x', x_dtype, mstype.number_type, self.name)
  3214. return x_dtype
  3215. def infer_shape(self, x_shape):
  3216. validator.check_equal_int(len(x_shape), 4, 'rank of input_x', self.name)
  3217. out_shape = copy.deepcopy(x_shape)
  3218. for i in range(2):
  3219. padded = out_shape[i + 2] + self.paddings[i][0] + self.paddings[i][1]
  3220. if padded % self.block_size != 0:
  3221. raise ValueError(f'For \'{self.name}\' padded[{i}] {padded} should be divisible by '
  3222. f'block_size {self.block_size}')
  3223. out_shape[i + 2] = padded // self.block_size
  3224. out_shape[0] *= self.block_size * self.block_size
  3225. return out_shape
  3226. class BatchToSpace(PrimitiveWithInfer):
  3227. r"""
  3228. Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.
  3229. This operation will divide batch dimension N into blocks with block_size, the output tensor's N dimension
  3230. is the corresponding number of blocks after division. The output tensor's H, W dimension is product of original H, W
  3231. dimension and block_size with given amount to crop from dimension, respectively.
  3232. Args:
  3233. block_size (int): The block size of division, has the value not less than 2.
  3234. crops (Union[list(int), tuple(int)]): The crop value for H and W dimension, containing 2 subtraction lists.
  3235. Each list contains 2 integers.
  3236. All values must be not less than 0. crops[i] specifies the crop values for the spatial dimension i, which
  3237. corresponds to the input dimension i+2. It is required that
  3238. input_shape[i+2]*block_size >= crops[i][0]+crops[i][1].
  3239. Inputs:
  3240. - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor, dimension 0 must be divisible by
  3241. product of `block_shape`.
  3242. Outputs:
  3243. Tensor, the output tensor with the same type as input. Assume input shape is (n, c, h, w) with block_size
  3244. and crops. The output shape will be (n', c', h', w'), where
  3245. :math:`n' = n//(block\_size*block\_size)`
  3246. :math:`c' = c`
  3247. :math:`h' = h*block\_size-crops[0][0]-crops[0][1]`
  3248. :math:`w' = w*block\_size-crops[1][0]-crops[1][1]`
  3249. Supported Platforms:
  3250. ``Ascend``
  3251. Examples:
  3252. >>> block_size = 2
  3253. >>> crops = [[0, 0], [0, 0]]
  3254. >>> batch_to_space = ops.BatchToSpace(block_size, crops)
  3255. >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
  3256. >>> output = batch_to_space(input_x)
  3257. >>> print(output)
  3258. [[[[1. 2.]
  3259. [3. 4.]]]]
  3260. """
  3261. @prim_attr_register
  3262. def __init__(self, block_size, crops):
  3263. """Initialize BatchToSpace"""
  3264. logger.warning("WARN_DEPRECATED: The usage of BatchToSpace is deprecated."
  3265. " Please use BatchToSpaceND.")
  3266. validator.check_value_type('block_size', block_size, [int], self.name)
  3267. validator.check('block_size', block_size, '', 2, Rel.GE, self.name)
  3268. self.block_size = block_size
  3269. validator.check_value_type('crops type', crops, [list, tuple], self.name)
  3270. validator.check('crops shape', np.array(crops).shape, '', (2, 2))
  3271. for elem in itertools.chain(*crops):
  3272. validator.check_non_negative_int(elem, 'crops element', self.name)
  3273. validator.check_value_type('crops element', elem, [int], self.name)
  3274. self.crops = crops
  3275. def infer_dtype(self, x_dtype):
  3276. validator.check_tensor_dtype_valid('input_x', x_dtype, mstype.number_type, self.name)
  3277. return x_dtype
  3278. def infer_shape(self, x_shape):
  3279. validator.check('rank of input_x', len(x_shape), '', 4)
  3280. out_shape = copy.deepcopy(x_shape)
  3281. for i in range(2):
  3282. x_block_prod = out_shape[i + 2] * self.block_size
  3283. crops_sum = self.crops[i][0] + self.crops[i][1]
  3284. validator.check("x block shape prod", x_block_prod, 'crops sum', crops_sum, Rel.GT, self.name)
  3285. out_shape[i + 2] = x_block_prod - crops_sum
  3286. block_size_prod = self.block_size * self.block_size
  3287. if out_shape[0] % block_size_prod != 0:
  3288. raise ValueError(f'For \'{self.name}\' input_x dimension 0 {out_shape[0]} should be divisible by '
  3289. f'block_size_prod {block_size_prod}')
  3290. out_shape[0] = out_shape[0] // block_size_prod
  3291. return out_shape
  3292. class SpaceToBatchND(PrimitiveWithInfer):
  3293. r"""
  3294. Divides spatial dimensions into blocks and combines the block size with the original batch.
  3295. This operation will divide spatial dimensions (H, W) into blocks with block_shape, the output tensor's H and W
  3296. dimension is the corresponding number of blocks after division. The output tensor's batch dimension is the
  3297. product of the original batch and the product of `block_shape`. Before division,
  3298. the spatial dimensions of the input are zero padded according to paddings if necessary.
  3299. Args:
  3300. block_shape (Union[list(int), tuple(int), int]): The block shape of dividing block with all value greater
  3301. than 1. If `block_shape` is a tuple or list, the length of `block_shape` is M corresponding to the
  3302. number of spatial dimensions. If `block_shape` is a int, the block size of M dimendions are the same,
  3303. equal to `block_shape`. M must be 2.
  3304. paddings (Union[tuple, list]): The padding values for H and W dimension, containing 2 subtraction list.
  3305. Each contains 2 integer value. All values must be greater than 0.
  3306. `paddings[i]` specifies the paddings for the spatial dimension i,
  3307. which corresponds to the input dimension i+2.
  3308. It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1] is divisible by block_shape[i].
  3309. Inputs:
  3310. - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor.
  3311. Outputs:
  3312. Tensor, the output tensor with the same data type as input. Assume input shape is :math:`(n, c, h, w)` with
  3313. :math:`block\_shape` and :math:`padddings`. The shape of the output tensor will be :math:`(n', c', h', w')`,
  3314. where
  3315. :math:`n' = n*(block\_shape[0]*block\_shape[1])`
  3316. :math:`c' = c`
  3317. :math:`h' = (h+paddings[0][0]+paddings[0][1])//block\_shape[0]`
  3318. :math:`w' = (w+paddings[1][0]+paddings[1][1])//block\_shape[1]`
  3319. Supported Platforms:
  3320. ``Ascend``
  3321. Examples:
  3322. >>> block_shape = [2, 2]
  3323. >>> paddings = [[0, 0], [0, 0]]
  3324. >>> space_to_batch_nd = ops.SpaceToBatchND(block_shape, paddings)
  3325. >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)
  3326. >>> output = space_to_batch_nd(input_x)
  3327. >>> print(output)
  3328. [[[[1.]]]
  3329. [[[2.]]]
  3330. [[[3.]]]
  3331. [[[4.]]]]
  3332. """
  3333. @prim_attr_register
  3334. def __init__(self, block_shape, paddings):
  3335. """Initialize SpaceToBatchND"""
  3336. if isinstance(block_shape, int):
  3337. block_shape = (block_shape,) * 2
  3338. self.add_prim_attr("block_shape", block_shape)
  3339. validator.check_value_type('block_shape type', block_shape, [list, tuple], self.name)
  3340. validator.check('block_shape shape', len(np.array(block_shape).shape), '', 1, Rel.EQ, self.name)
  3341. block_rank = len(block_shape)
  3342. validator.check('block_shape length', block_rank, '', 2, Rel.EQ, self.name)
  3343. for elem in block_shape:
  3344. validator.check('block_shape element', elem, '', 1, Rel.GE, self.name)
  3345. validator.check_value_type('block_shape element', elem, [int], self.name)
  3346. self.block_shape = block_shape
  3347. validator.check_value_type('paddings type', paddings, [list, tuple], self.name)
  3348. validator.check('paddings length', len(paddings), '', 2, Rel.EQ, self.name)
  3349. validator.check('paddings shape', np.array(paddings).shape, '', (block_rank, 2), Rel.EQ, self.name)
  3350. for elem in itertools.chain(*paddings):
  3351. validator.check_non_negative_int(elem, 'paddings element', self.name)
  3352. validator.check_value_type('paddings element', elem, [int], self.name)
  3353. self.paddings = paddings
  3354. def infer_dtype(self, x_dtype):
  3355. validator.check_tensor_dtype_valid('input_x', x_dtype, mstype.number_type, self.name)
  3356. return x_dtype
  3357. def infer_shape(self, x_shape):
  3358. x_rank = len(x_shape)
  3359. validator.check_equal_int(x_rank, 4, 'x_shape rank', self.name)
  3360. out_shape = copy.deepcopy(x_shape)
  3361. block_shape_prod = 1
  3362. offset = 2
  3363. for i in range(len(self.block_shape)):
  3364. padded = out_shape[i + offset] + self.paddings[i][0] + \
  3365. self.paddings[i][1]
  3366. if padded % self.block_shape[i] != 0:
  3367. raise ValueError(f'For \'{self.name}\' padded[{i}] {padded} should be divisible by '
  3368. f'block_shape[{i}] {self.block_shape[i]}')
  3369. out_shape[i + offset] = padded // self.block_shape[i]
  3370. block_shape_prod = block_shape_prod * self.block_shape[i]
  3371. out_shape[0] *= block_shape_prod
  3372. return out_shape
  3373. class BatchToSpaceND(PrimitiveWithInfer):
  3374. r"""
  3375. Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.
  3376. This operation will divide batch dimension N into blocks with block_shape, the output tensor's N dimension
  3377. is the corresponding number of blocks after division. The output tensor's H, W dimension is product of original H, W
  3378. dimension and block_shape with given amount to crop from dimension, respectively.
  3379. Args:
  3380. block_shape (Union[list(int), tuple(int), int]): The block shape of dividing block with all value greater
  3381. than 1. If `block_shape` is a tuple or list, the length of `block_shape` is M corresponding to the
  3382. number of spatial dimensions. If `block_shape` is a int, the block size of M dimendions are the same,
  3383. equal to `block_shape`. M must be 2.
  3384. crops (Union[list(int), tuple(int)]): The crop value for H and W dimension, containing 2 subtraction list,
  3385. each containing 2 int value.
  3386. All values must be >= 0. crops[i] specifies the crop values for spatial dimension i, which corresponds to
  3387. input dimension i+2. It is required that input_shape[i+2]*block_shape[i] > crops[i][0]+crops[i][1].
  3388. Inputs:
  3389. - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor, dimension 0 must be divisible by
  3390. product of `block_shape`.
  3391. Outputs:
  3392. Tensor, the output tensor with the same type as input. Assume input shape is (n, c, h, w) with block_shape
  3393. and crops. The output shape will be (n', c', h', w'), where
  3394. :math:`n' = n//(block\_shape[0]*block\_shape[1])`
  3395. :math:`c' = c`
  3396. :math:`h' = h*block\_shape[0]-crops[0][0]-crops[0][1]`
  3397. :math:`w' = w*block\_shape[1]-crops[1][0]-crops[1][1]`
  3398. Supported Platforms:
  3399. ``Ascend``
  3400. Examples:
  3401. >>> block_shape = [2, 2]
  3402. >>> crops = [[0, 0], [0, 0]]
  3403. >>> batch_to_space_nd = ops.BatchToSpaceND(block_shape, crops)
  3404. >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
  3405. >>> output = batch_to_space_nd(input_x)
  3406. >>> print(output)
  3407. [[[[1. 2.]
  3408. [3. 4.]]]]
  3409. """
  3410. @prim_attr_register
  3411. def __init__(self, block_shape, crops):
  3412. """Initialize BatchToSpaceND"""
  3413. if isinstance(block_shape, int):
  3414. block_shape = (block_shape,) * 2
  3415. self.add_prim_attr("block_shape", block_shape)
  3416. validator.check_value_type('block_shape type', block_shape, [list, tuple], self.name)
  3417. validator.check('block_shape shape', len(np.array(block_shape).shape), '', 1, Rel.EQ, self.name)
  3418. block_rank = len(block_shape)
  3419. validator.check('block_shape length', block_rank, '', 2, Rel.EQ, self.name)
  3420. for elem in block_shape:
  3421. validator.check('block_shape element', elem, '', 1, Rel.GE, self.name)
  3422. validator.check_value_type('block_shape element', elem, [int], self.name)
  3423. self.block_shape = block_shape
  3424. validator.check_value_type('crops type', crops, [list, tuple], self.name)
  3425. validator.check('crops length', len(crops), '', 2, Rel.EQ, self.name)
  3426. validator.check('crops shape', np.array(crops).shape, '', (block_rank, 2), Rel.EQ, self.name)
  3427. for elem in itertools.chain(*crops):
  3428. validator.check_non_negative_int(elem, 'crops element', self.name)
  3429. validator.check_value_type('crops element', elem, [int], self.name)
  3430. self.crops = crops
  3431. def infer_dtype(self, x_dtype):
  3432. validator.check_tensor_dtype_valid('input_x', x_dtype, mstype.number_type, self.name)
  3433. return x_dtype
  3434. def infer_shape(self, x_shape):
  3435. x_rank = len(x_shape)
  3436. validator.check_int(x_rank, 4, Rel.EQ, 'x_shape rank', self.name)
  3437. out_shape = copy.deepcopy(x_shape)
  3438. block_shape_prod = 1
  3439. offset = 2
  3440. for i in range(len(self.block_shape)):
  3441. block_shape_prod = block_shape_prod * self.block_shape[i]
  3442. x_block_prod = out_shape[i + offset] * self.block_shape[i]
  3443. crops_sum = self.crops[i][0] + self.crops[i][1]
  3444. validator.check("x block shape prod", x_block_prod, 'crops sum', crops_sum, Rel.GT, self.name)
  3445. out_shape[i + offset] = x_block_prod - crops_sum
  3446. if out_shape[0] % block_shape_prod != 0:
  3447. raise ValueError(f'For \'{self.name}\' input_x dimension 0 {out_shape[0]} should be divisible by '
  3448. f'block_shape_prod {block_shape_prod}')
  3449. out_shape[0] = out_shape[0] // block_shape_prod
  3450. return out_shape
  3451. class BroadcastTo(PrimitiveWithInfer):
  3452. """
  3453. Broadcasts input tensor to a given shape.
  3454. Input shape can be broadcast to target shape if for each dimension pair they are either equal or input is one or
  3455. the target dimension is -1. In case of -1 in target shape, it will be replaced by the input shape's value
  3456. in that dimension.
  3457. When input shape is broadcast to target shape, it starts with the trailing dimensions.
  3458. Args:
  3459. shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
  3460. where it will be substituted by the input tensor's shape in that position, see example.
  3461. Inputs:
  3462. - **input_x** (Tensor) - The input tensor. The data type should be one of the following types: float16, float32,
  3463. int32, int8, uint8.
  3464. Outputs:
  3465. Tensor, with the given `shape` and the same data type as `input_x`.
  3466. Raises:
  3467. ValueError: Given a shape tuple, if it has several -1; or if the -1 is in an invalid position
  3468. such as one that does not have a opposing dimension in an input tensor; or if the target and
  3469. input shapes are incompatible.
  3470. Supported Platforms:
  3471. ``Ascend`` ``GPU``
  3472. Examples:
  3473. >>> shape = (2, 3)
  3474. >>> input_x = Tensor(np.array([1, 2, 3]).astype(np.float32))
  3475. >>> broadcast_to = ops.BroadcastTo(shape)
  3476. >>> output = broadcast_to(input_x)
  3477. >>> print(output)
  3478. [[1. 2. 3.]
  3479. [1. 2. 3.]]
  3480. >>> shape = (2, -1)
  3481. >>> input_x = Tensor(np.array([1, 2, 3]).astype(np.float32))
  3482. >>> broadcast_to = ops.BroadcastTo(shape)
  3483. >>> output = broadcast_to(input_x)
  3484. >>> print(output)
  3485. [[1. 2. 3.]
  3486. [1. 2. 3.]]
  3487. """
  3488. @prim_attr_register
  3489. def __init__(self, shape):
  3490. """Initialize BroadcastTo"""
  3491. validator.check_value_type("shape", shape, (tuple), self.name)
  3492. validator.check("shape length", len(shape), "", 0, Rel.GT, self.name)
  3493. for ix, i in enumerate(shape):
  3494. validator.check_value_type('target shape index -> ' + str(ix), i, [int], self.name)
  3495. validator.check("shape element", i, "shape element min limit", -1, Rel.GE, self.name)
  3496. self.shape = shape
  3497. if -1 in self.shape:
  3498. undef_dims = self.shape.count(-1)
  3499. if undef_dims > 1:
  3500. raise ValueError(f'The shape can only has one -1 at most, but has {undef_dims}.')
  3501. self.dyn = True
  3502. else:
  3503. self.dyn = False
  3504. def infer_shape(self, x_shape):
  3505. validator.check("input_x shape length", len(x_shape), "target shape", len(self.shape), Rel.LE, self.name)
  3506. target_shape = list(self.shape)
  3507. outer_dim_offset = len(target_shape) - len(x_shape)
  3508. if self.dyn:
  3509. for i, v in enumerate(target_shape):
  3510. if v == -1:
  3511. if i < outer_dim_offset:
  3512. raise ValueError(f" -1 in init shape is in an incompatible location"
  3513. f" with given input tensor, -1 index in init shape: {i}"
  3514. f" but -1 can only be in index {len(x_shape)} onwards for this input.")
  3515. target_shape[i] = x_shape[i - outer_dim_offset]
  3516. reversed_x_shape = tuple(reversed(x_shape))
  3517. reversed_target = tuple(reversed(target_shape))
  3518. for i, v in enumerate(reversed_x_shape):
  3519. if v not in (reversed_target[i], 1):
  3520. raise ValueError(f"Not supported shapes for broadcast, "
  3521. f"x_shape: {tuple(x_shape)}, target shape {target_shape}.")
  3522. self.shape = tuple(target_shape)
  3523. self.add_prim_attr('shape', self.shape)
  3524. return target_shape
  3525. def infer_dtype(self, x_dtype):
  3526. validator.check_subclass("input_x", x_dtype, mstype.tensor, self.name)
  3527. return x_dtype
  3528. class Meshgrid(PrimitiveWithInfer):
  3529. """
  3530. Generates coordinate matrices from given coordinate tensors.
  3531. Given N one-dimensional coordinate tensors, returns a tuple outputs of N N-D
  3532. coordinate tensors for evaluating expressions on an N-D grid.
  3533. Args:
  3534. indexing (str): Either 'xy' or 'ij'. Default: 'xy'.
  3535. When the indexing argument is set to 'xy' (the default), the broadcasting
  3536. instructions for the first two dimensions are swapped.
  3537. Inputs:
  3538. - **input** (Union[tuple]) - A Tuple of N 1-D Tensor objects.
  3539. The length of input should be greater than 1
  3540. Outputs:
  3541. Tensors, A Tuple of N N-D Tensor objects.
  3542. Supported Platforms:
  3543. ``Ascend``
  3544. Examples:
  3545. >>> x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))
  3546. >>> y = Tensor(np.array([5, 6, 7]).astype(np.int32))
  3547. >>> z = Tensor(np.array([8, 9, 0, 1, 2]).astype(np.int32))
  3548. >>> inputs = (x, y, z)
  3549. >>> meshgrid = ops.Meshgrid(indexing="xy")
  3550. >>> output = meshgrid(inputs)
  3551. >>> print(output)
  3552. (Tensor(shape=[3, 4, 5], dtype=Int32, value=
  3553. [[[1, 1, 1, 1, 1],
  3554. [2, 2, 2, 2, 2],
  3555. [3, 3, 3, 3, 3],
  3556. [4, 4, 4, 4, 4]],
  3557. [[1, 1, 1, 1, 1],
  3558. [2, 2, 2, 2, 2],
  3559. [3, 3, 3, 3, 3],
  3560. [4, 4, 4, 4, 4]],
  3561. [[1, 1, 1, 1, 1],
  3562. [2, 2, 2, 2, 2],
  3563. [3, 3, 3, 3, 3],
  3564. [4, 4, 4, 4, 4]]]),
  3565. Tensor(shape=[3, 4, 5], dtype=Int32, value=
  3566. [[[5, 5, 5, 5, 5],
  3567. [5, 5, 5, 5, 5],
  3568. [5, 5, 5, 5, 5],
  3569. [5, 5, 5, 5, 5]],
  3570. [[6, 6, 6, 6, 6],
  3571. [6, 6, 6, 6, 6],
  3572. [6, 6, 6, 6, 6],
  3573. [6, 6, 6, 6, 6]],
  3574. [[7, 7, 7, 7, 7],
  3575. [7, 7, 7, 7, 7],
  3576. [7, 7, 7, 7, 7],
  3577. [7, 7, 7, 7, 7]]]),
  3578. Tensor(shape=[3, 4, 5], dtype=Int32, value=
  3579. [[[8, 9, 0, 1, 2],
  3580. [8, 9, 0, 1, 2],
  3581. [8, 9, 0, 1, 2],
  3582. [8, 9, 0, 1, 2]],
  3583. [[8, 9, 0, 1, 2],
  3584. [8, 9, 0, 1, 2],
  3585. [8, 9, 0, 1, 2],
  3586. [8, 9, 0, 1, 2]],
  3587. [[8, 9, 0, 1, 2],
  3588. [8, 9, 0, 1, 2],
  3589. [8, 9, 0, 1, 2],
  3590. [8, 9, 0, 1, 2]]]))
  3591. """
  3592. @prim_attr_register
  3593. def __init__(self, indexing="xy"):
  3594. """Init Meshgrid"""
  3595. validator.check_value_type("indexing", indexing, (str), self.name)
  3596. if indexing not in ("xy", "ij"):
  3597. raise ValueError("indexing parameter must be either 'xy' or 'ij'")
  3598. self.indexing = indexing
  3599. def infer_shape(self, x_shape):
  3600. validator.check_value_type("shape", x_shape, [tuple], self.name)
  3601. validator.check_int(len(x_shape), 2, Rel.GE, "len of input", self.name)
  3602. n = len(x_shape)
  3603. shape_0 = []
  3604. for s in x_shape:
  3605. validator.check_int(len(s), 1, Rel.EQ, 'each input rank', self.name)
  3606. shape_0.append(s[0])
  3607. if self.indexing == "xy":
  3608. shape_0[0], shape_0[1] = shape_0[1], shape_0[0]
  3609. out_shape = tuple(tuple(shape_0) for _ in range(n))
  3610. return out_shape
  3611. def infer_dtype(self, x_type):
  3612. validator.check_subclass("input[0]", x_type[0], mstype.tensor, self.name)
  3613. n = len(x_type)
  3614. for i in range(1, n):
  3615. validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], Rel.EQ, self.name, TypeError)
  3616. return x_type
  3617. class InplaceUpdate(PrimitiveWithInfer):
  3618. r"""
  3619. Updates specified rows with values in `v`.
  3620. Args:
  3621. indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of x
  3622. to update with v. It is a int or tuple, whose value is in [0, the first dimension size of x).
  3623. Inputs:
  3624. - **x** (Tensor) - A tensor which to be inplace updated. It can be one of the following data types:
  3625. float32, float16 and int32.
  3626. - **v** (Tensor) - A tensor with the same type as `x` and the same dimension size as `x` except
  3627. the first dimension, which must be the same as the size of `indices`.
  3628. Outputs:
  3629. Tensor, with the same type and shape as the input `x`.
  3630. Supported Platforms:
  3631. ``Ascend``
  3632. Examples:
  3633. >>> indices = (0, 1)
  3634. >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
  3635. >>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
  3636. >>> inplace_update = ops.InplaceUpdate(indices)
  3637. >>> output = inplace_update(x, v)
  3638. >>> print(output)
  3639. [[0.5 1. ]
  3640. [1. 1.5]
  3641. [5. 6. ]]
  3642. """
  3643. @prim_attr_register
  3644. def __init__(self, indices):
  3645. """Initialize InplaceUpdate"""
  3646. self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
  3647. self.indices = indices
  3648. validator.check_value_type("indices", indices, [int, tuple], self.name)
  3649. if isinstance(indices, int):
  3650. self.indices = (indices,)
  3651. for item in self.indices:
  3652. validator.check_value_type("item of indices", item, [int], self.name)
  3653. def infer_dtype(self, x_dtype, v_dtype):
  3654. args = {'x': x_dtype, 'v': v_dtype}
  3655. valid_type = [mstype.int32, mstype.float16, mstype.float32]
  3656. validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
  3657. return x_dtype
  3658. def infer_shape(self, x_shape, v_shape):
  3659. validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
  3660. validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
  3661. Rel.EQ, self.name)
  3662. for i in self.indices:
  3663. if i < 0 or i >= x_shape[0]:
  3664. raise ValueError(f'The value of indices must be in [0, {x_shape[0]}), but got {i}.')
  3665. x_rank = len(x_shape)
  3666. for idx in range(x_rank)[1:]:
  3667. validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
  3668. return x_shape
  3669. class ReverseSequence(PrimitiveWithInfer):
  3670. """
  3671. Reverses variable length slices.
  3672. Args:
  3673. seq_dim (int): The dimension where reversal is performed. Required.
  3674. batch_dim (int): The input is sliced in this dimension. Default: 0.
  3675. Inputs:
  3676. - **x** (Tensor) - The input to reverse, supporting all number types including bool.
  3677. - **seq_lengths** (Tensor) - Must be a 1-D vector with int32 or int64 types.
  3678. Outputs:
  3679. Reversed tensor with the same shape and data type as input.
  3680. Supported Platforms:
  3681. ``Ascend``
  3682. Examples:
  3683. >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
  3684. >>> seq_lengths = Tensor(np.array([1, 2, 3]))
  3685. >>> reverse_sequence = ops.ReverseSequence(seq_dim=1)
  3686. >>> output = reverse_sequence(x, seq_lengths)
  3687. >>> print(output)
  3688. [[1. 2. 3.]
  3689. [5. 4. 6.]
  3690. [9. 8. 7.]]
  3691. """
  3692. @prim_attr_register
  3693. def __init__(self, seq_dim, batch_dim=0):
  3694. """Initialize ReverseSequence"""
  3695. self.init_prim_io_names(inputs=['x', 'seq_lengths'], outputs=['y'])
  3696. validator.check_value_type("seq_dim", seq_dim, [int], self.name)
  3697. self.seq_dim_ = seq_dim
  3698. validator.check_value_type("batch_dim", batch_dim, [int], self.name)
  3699. self.batch_dim_ = batch_dim
  3700. def infer_shape(self, x, seq_lengths):
  3701. validator.check("seq_dim", self.seq_dim_, "x rank", len(x), Rel.LE, self.name)
  3702. validator.check("batch_dim", self.batch_dim_, "x rank", len(x), Rel.LE, self.name)
  3703. validator.check("batch_dim", self.batch_dim_, "seq_dim", self.seq_dim_, Rel.NE, self.name)
  3704. validator.check("seq_lengths rank", len(seq_lengths), "expected", 1, Rel.EQ, self.name)
  3705. validator.check("seq_lengths vector size", seq_lengths[0],
  3706. "input size along batch_dim", x[self.batch_dim_], Rel.EQ, self.name)
  3707. return x
  3708. def infer_dtype(self, x, seq_lengths):
  3709. validator.check_tensor_dtype_valid("x_dtype", x, mstype.number_type + (mstype.bool_,), self.name)
  3710. validator.check_tensor_dtype_valid("seq_lengths_dtype", seq_lengths, [mstype.int32, mstype.int64], self.name)
  3711. return x
  3712. class EditDistance(PrimitiveWithInfer):
  3713. """
  3714. Computes the Levenshtein Edit Distance. It is used to measure the similarity of two sequences. The inputs are
  3715. variable-length sequences provided by SparseTensors (hypothesis_indices, hypothesis_values, hypothesis_shape)
  3716. and (truth_indices, truth_values, truth_shape).
  3717. Args:
  3718. normalize (bool): If true, edit distances are normalized by length of truth. Default: True.
  3719. Inputs:
  3720. - **hypothesis_indices** (Tensor) - The indices of the hypothesis list SparseTensor. With int64 data type.
  3721. The shape of tensor is :math:`(N, R)`.
  3722. - **hypothesis_values** (Tensor) - The values of the hypothesis list SparseTensor.
  3723. Must be 1-D vector with length of N.
  3724. - **hypothesis_shape** (Tensor) - The shape of the hypothesis list SparseTensor.
  3725. Must be R-length vector with int64 data type. Only constant value is allowed.
  3726. - **truth_indices** (Tensor) - The indices of the truth list SparseTensor. With int64 data type.
  3727. The shape of tensor is :math:`(M, R)`.
  3728. - **truth_values** (Tensor) - The values of the truth list SparseTensor. Must be 1-D vector with length of M.
  3729. - **truth_shape** (Tensor) - The shape of the truth list SparseTensor.
  3730. Must be R-length vector with int64 data type. Only constant value is allowed.
  3731. Outputs:
  3732. Tensor, a dense tensor with rank `R-1` and float32 data type.
  3733. Supported Platforms:
  3734. ``Ascend``
  3735. Examples:
  3736. >>> import numpy as np
  3737. >>> from mindspore import context
  3738. >>> from mindspore import Tensor
  3739. >>> import mindspore.nn as nn
  3740. >>> import mindspore.ops.operations as ops
  3741. >>> class EditDistance(nn.Cell):
  3742. ... def __init__(self, hypothesis_shape, truth_shape, normalize=True):
  3743. ... super(EditDistance, self).__init__()
  3744. ... self.edit_distance = ops.EditDistance(normalize)
  3745. ... self.hypothesis_shape = hypothesis_shape
  3746. ... self.truth_shape = truth_shape
  3747. ...
  3748. ... def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values):
  3749. ... return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape,
  3750. ... truth_indices, truth_values, self.truth_shape)
  3751. ...
  3752. >>> hypothesis_indices = Tensor(np.array([[0, 0, 0], [1, 0, 1], [1, 1, 1]]).astype(np.int64))
  3753. >>> hypothesis_values = Tensor(np.array([1, 2, 3]).astype(np.float32))
  3754. >>> hypothesis_shape = Tensor(np.array([1, 1, 2]).astype(np.int64))
  3755. >>> truth_indices = Tensor(np.array([[0, 1, 0], [0, 0, 1], [1, 1, 0], [1, 0, 1]]).astype(np.int64))
  3756. >>> truth_values = Tensor(np.array([1, 3, 2, 1]).astype(np.float32))
  3757. >>> truth_shape = Tensor(np.array([2, 2, 2]).astype(np.int64))
  3758. >>> edit_distance = EditDistance(hypothesis_shape, truth_shape)
  3759. >>> output = edit_distance(hypothesis_indices, hypothesis_values, truth_indices, truth_values)
  3760. >>> print(output)
  3761. [[1. 1.]
  3762. [1. 1.]]
  3763. """
  3764. @prim_attr_register
  3765. def __init__(self, normalize=True):
  3766. """Initialize EditDistance"""
  3767. self.normalize = validator.check_value_type("normalize", normalize, [bool], self.name)
  3768. self.set_const_input_indexes([2, 5])
  3769. def __infer__(self, h_indices, h_values, h_shape, truth_indices, truth_values, truth_shape):
  3770. validator.check_const_input('hypothesis_shape', h_shape['value'], self.name)
  3771. validator.check_const_input('truth_shape', truth_shape['value'], self.name)
  3772. args_int = {"hypothesis_indices": h_indices['dtype'], "hypothesis_shape": h_shape['dtype'],
  3773. "truth_indices": truth_indices['dtype'], "truth_shape": truth_shape['dtype']}
  3774. validator.check_tensors_dtypes_same_and_valid(args_int, [mstype.int64], self.name)
  3775. args = {"hypothesis_values": h_values['dtype'], "truth_values": truth_values['dtype']}
  3776. validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type, self.name)
  3777. hypothesis_indices_shp, truth_indices_shp = h_indices['shape'], truth_indices['shape']
  3778. validator.check("hypothesis_indices rank", len(hypothesis_indices_shp), "expected", 2, Rel.EQ, self.name)
  3779. validator.check("truth_indices rank", len(truth_indices_shp), "expected", 2, Rel.EQ, self.name)
  3780. validator.check("hypothesis_values rank", len(h_values['shape']), "expected", 1, Rel.EQ, self.name)
  3781. validator.check("hypothesis_shape rank", len(h_shape['shape']), "expected", 1, Rel.EQ, self.name)
  3782. validator.check("truth_values rank", len(truth_values['shape']), "expected", 1, Rel.EQ, self.name)
  3783. validator.check("truth_shape rank", len(truth_shape['shape']), "expected", 1, Rel.EQ, self.name)
  3784. validator.check("hypothesis_values shape", h_values['shape'][0],
  3785. "hypothesis_indices shape[0]", hypothesis_indices_shp[0], Rel.EQ, self.name)
  3786. validator.check("hypothesis_shape", h_shape['shape'][0],
  3787. "hypothesis_indices shape[1]", hypothesis_indices_shp[1], Rel.EQ, self.name)
  3788. validator.check("truth_values shape", truth_values['shape'][0],
  3789. "truth_indices shape[0]", truth_indices_shp[0], Rel.EQ, self.name)
  3790. validator.check("hypothesis_shape", h_shape['shape'][0],
  3791. "truth_shape", truth_shape['shape'][0], Rel.EQ, self.name)
  3792. hypothesis_shape_v = h_shape['value'].asnumpy()
  3793. truth_shape_v = truth_shape['value'].asnumpy()
  3794. out_shape_rank = len(hypothesis_shape_v) - 1
  3795. out_shape = []
  3796. for i in range(out_shape_rank):
  3797. out_shape.append(max(hypothesis_shape_v[i], truth_shape_v[i]))
  3798. return {'shape': tuple(out_shape),
  3799. 'dtype': mstype.tensor_type(mstype.float32),
  3800. 'value': None}
  3801. class TransShape(PrimitiveWithInfer):
  3802. """
  3803. Transforms the shape of input tensor to target shape.
  3804. Inputs:
  3805. - **input_x** (Tensor) - A input tensor.
  3806. - **out_shape** (tuple[int]) - The shape of output data.
  3807. Outputs:
  3808. Tensor, a tensor whose data type is same as 'input_x', and the shape is the same as the `out_shape`.
  3809. """
  3810. @prim_attr_register
  3811. def __init__(self):
  3812. self.__setattr_flag__ = True
  3813. def __infer__(self, x, shape):
  3814. shp = shape['value']
  3815. dtype = x['dtype']
  3816. validator.check_tensor_dtype_valid('x', dtype, mstype.number_type + (mstype.bool_,), self.name)
  3817. self.add_prim_attr('out_shape', tuple(shp))
  3818. return {'shape': shp,
  3819. 'dtype': dtype,
  3820. 'value': None}
  3821. class Sort(PrimitiveWithInfer):
  3822. """
  3823. Sorts the elements of the input tensor along a given dimension in ascending order by value.
  3824. Args:
  3825. axis (int): The dimension to sort along. Default: -1.
  3826. descending (bool): Controls the sorting order. If descending is True then the elements
  3827. are sorted in descending order by value. Default: False.
  3828. Inputs:
  3829. - **x** (Tensor) - The input to sort, with float16 or float32 data type.
  3830. Outputs:
  3831. - **y1** (Tensor) - A tensor whose values are the sorted values, with the same shape and data type as input.
  3832. - **y2** (Tensor) - The indices of the elements in the original input tensor. Data type is int32.
  3833. Supported Platforms:
  3834. ``Ascend``
  3835. Examples:
  3836. >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
  3837. >>> sort = ops.Sort()
  3838. >>> output = sort(x)
  3839. >>> print(output)
  3840. (Tensor(shape=[3, 3], dtype=Float16, value=
  3841. [[ 1.0000e+00, 2.0000e+00, 8.0000e+00],
  3842. [ 3.0000e+00, 5.0000e+00, 9.0000e+00],
  3843. [ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value=
  3844. [[2, 1, 0],
  3845. [2, 0, 1],
  3846. [0, 1, 2]]))
  3847. """
  3848. @prim_attr_register
  3849. def __init__(self, axis=-1, descending=False):
  3850. """Initialize Sort"""
  3851. self.axis = validator.check_value_type("axis", axis, [int], self.name)
  3852. self.descending = validator.check_value_type("descending", descending, [bool], self.name)
  3853. def infer_shape(self, x_shape):
  3854. return x_shape, x_shape
  3855. def infer_dtype(self, x_dtype):
  3856. validator.check_tensor_dtype_valid("x_dtype", x_dtype, [mstype.float32, mstype.float16], self.name)
  3857. return x_dtype, mstype.tensor_type(mstype.int32)
  3858. class EmbeddingLookup(PrimitiveWithInfer):
  3859. """
  3860. Returns a slice of input tensor based on the specified indices.
  3861. This Primitive has the similar functionality as GatherV2 operating on `axis = 0`, but has one more inputs:
  3862. `offset`.
  3863. Inputs:
  3864. - **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  3865. This represents a Tensor slice, instead of the entire Tensor. Currently, the dimension is restricted to be 2.
  3866. - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
  3867. Specifies the indices of elements of the original Tensor. Values can be out of range of `input_params`,
  3868. and the exceeding part will be filled with 0 in the output. Values does not support negative and the result
  3869. is undefined if values are negative.
  3870. - **offset** (int) - Specifies the offset value of this `input_params` slice. Thus the real indices
  3871. are equal to `input_indices` minus `offset`.
  3872. Outputs:
  3873. Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.
  3874. Supported Platforms:
  3875. ``Ascend`` ``CPU``
  3876. Examples:
  3877. >>> input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32)
  3878. >>> input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32)
  3879. >>> offset = 4
  3880. >>> output = ops.EmbeddingLookup()(input_params, input_indices, offset)
  3881. >>> print(output)
  3882. [[[10. 11.]
  3883. [ 0. 0.]]
  3884. [[ 0. 0.]
  3885. [10. 11.]]]
  3886. """
  3887. @prim_attr_register
  3888. def __init__(self):
  3889. """Initialize index_select"""
  3890. self.__setattr_flag__ = True
  3891. self.init_prim_io_names(inputs=['params', 'indices', 'offset'],
  3892. outputs=['output'])
  3893. def __infer__(self, params, indices, offset):
  3894. validator.check_subclass("params", params['dtype'], mstype.tensor, self.name)
  3895. validator.check_tensor_dtype_valid("indices", indices['dtype'], mstype.int_type, self.name)
  3896. validator.check_subclass("offset", offset['dtype'], mstype.int_, self.name)
  3897. params_shp = params['shape']
  3898. if len(params_shp) > 2:
  3899. raise ValueError("The dimension of 'params' in EmbeddingLookup must <= 2, but got %d." % len(params_shp))
  3900. out_shape = indices['shape'] + params_shp[1:]
  3901. if 'max_shape' in indices:
  3902. out_max_shape = indices['max_shape'] + params_shp[1:]
  3903. else:
  3904. out_max_shape = out_shape
  3905. if 'min_shape' in indices:
  3906. out_min_shape = indices['min_shape'] + params_shp[1:]
  3907. else:
  3908. out_min_shape = out_shape
  3909. out = {'shape': out_shape,
  3910. 'dtype': params['dtype'],
  3911. 'value': None,
  3912. 'max_shape': out_max_shape,
  3913. 'min_shape': out_min_shape}
  3914. return out
  3915. class GatherD(PrimitiveWithInfer):
  3916. """
  3917. Gathers values along an axis specified by dim.
  3918. For a 3-D tensor, the output is:
  3919. output[i][j][k] = x[index[i][j][k]][j][k] # if dim == 0
  3920. output[i][j][k] = x[i][index[i][j][k]][k] # if dim == 1
  3921. output[i][j][k] = x[i][j][index[i][j][k]] # if dim == 2
  3922. If `x` is an n-D tensor with shape :math:`(z_0, z_1, ..., z_i, ..., z_{n-1})` and `dim` = i,
  3923. the `index` must be an n-D tensor with shape :math:`(z_0, z_1, ..., y, ..., z_{n-1})`
  3924. where `y`>=1 and the output will have the same shape as `index`.
  3925. Inputs:
  3926. - **x** (Tensor) - The source tensor.
  3927. - **dim** (int) - The axis along which to index. It must be int32. Only constant value is allowed.
  3928. - **index** (Tensor) - The indices of elements to gather. It can be one of the following data types:
  3929. int32, int64. The value range of each index element is [-x_rank[dim], x_rank[dim]).
  3930. Outputs:
  3931. Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.
  3932. Supported Platforms:
  3933. ``Ascend`` ``GPU`` ``CPU``
  3934. Examples:
  3935. >>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
  3936. >>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
  3937. >>> dim = 1
  3938. >>> output = ops.GatherD()(x, dim, index)
  3939. >>> print(output)
  3940. [[1 1]
  3941. [4 3]]
  3942. """
  3943. @prim_attr_register
  3944. def __init__(self):
  3945. """Initialize GatherD"""
  3946. self.init_prim_io_names(inputs=['x', 'dim', 'index'], outputs=['output'])
  3947. def __infer__(self, x, dim, index):
  3948. validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
  3949. validator.check_tensor_dtype_valid("index", index['dtype'], [mstype.int32, mstype.int64], self.name)
  3950. validator.check_subclass("dim", dim['dtype'], [mstype.int32, mstype.int64], self.name)
  3951. x_shp = x['shape']
  3952. idx_shp = index['shape']
  3953. x_rank = len(x_shp)
  3954. idx_rank = len(idx_shp)
  3955. validator.check("x_rank, idx_rank", x_rank, "expected", idx_rank, Rel.EQ, self.name)
  3956. dim_v = dim['value']
  3957. validator.check("dim value", dim_v, "expected", -x_rank, Rel.GE, self.name)
  3958. validator.check("dim value", dim_v, "expected", x_rank, Rel.LT, self.name)
  3959. if dim_v < 0:
  3960. dim['value'] = dim_v + x_rank
  3961. for i in range(x_rank):
  3962. if i == dim['value']:
  3963. continue
  3964. validator.check("x_shp[{0}], idx_shp[{0}]".format(i), x_shp[i], "expected", idx_shp[i], Rel.EQ, self.name)
  3965. out = {'shape': index['shape'],
  3966. 'dtype': x['dtype'],
  3967. 'value': None}
  3968. return out
  3969. class Identity(PrimitiveWithInfer):
  3970. """
  3971. Returns a Tensor with the same shape and contents as input.
  3972. Inputs:
  3973. - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  3974. Outputs:
  3975. Tensor, the shape of tensor is the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.
  3976. Supported Platforms:
  3977. ``Ascend``
  3978. Examples:
  3979. >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
  3980. >>> output = ops.Identity()(x)
  3981. >>> print(output)
  3982. [1 2 3 4]
  3983. """
  3984. # Side effect is identity with input.
  3985. side_effect_propagate = 1
  3986. @prim_attr_register
  3987. def __init__(self):
  3988. """Initialize identity"""
  3989. self.add_prim_attr('side_effect_propagate', 1)
  3990. def __infer__(self, x):
  3991. validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
  3992. validator.check_tensor_dtype_valid('x', x['dtype'], mstype.number_type + (mstype.bool_,), self.name)
  3993. out = {'shape': x['shape'],
  3994. 'dtype': x['dtype'],
  3995. 'value': None}
  3996. return out
  3997. class Range(PrimitiveWithCheck):
  3998. r"""
  3999. Creates a sequence of numbers that begins at `start` and extends by increments of
  4000. `delta` up to but not including `limit`.
  4001. The types of all 3 inputs must be the same. The type of the resulting tensor is
  4002. the same as the type of the inputs.
  4003. Args:
  4004. maxlen (int): Memory that can fit `maxlen` many elements
  4005. will be allocated for the output. Optional, must be positive, defaults to 1000000.
  4006. If the output has more than `maxlen` elements, a runtime error
  4007. will occur.
  4008. Inputs:
  4009. - **start** (Tensor) - A scalar Tensor. The first number in the sequence. Must have
  4010. type: int32 or float32
  4011. - **limit** (Tensor) - A scalar Tensor. Upper limit of the sequence, exclusive. Must
  4012. have type: int32 or float32
  4013. - **delta** (Tensor) - A scalar Tensor. Number that increments `start`. Must have
  4014. type: int32 or float32
  4015. Outputs:
  4016. A 1-D Tensor, with the same type as the inputs.
  4017. Examples:
  4018. >>> start = Tensor(0, mstype.int32)
  4019. >>> limit = Tensor(10, mstype.int32)
  4020. >>> delta = Tensor(4, mstype.int32)
  4021. >>> output = ops.Range()(start, limit, delta)
  4022. >>> print(output)
  4023. [0, 4, 8]
  4024. Supported Platforms:
  4025. ``GPU``
  4026. """
  4027. @prim_attr_register
  4028. def __init__(self, maxlen=1000000):
  4029. self.init_prim_io_names(inputs=['start', 'limit', 'delta'], outputs=['output'])
  4030. validator.check_value_type("maxlen", maxlen, [int], self.name)
  4031. validator.check_positive_int(maxlen, "maxlen", self.name)
  4032. self.maxlen = maxlen
  4033. self.add_prim_attr('maxlen', maxlen)
  4034. self.add_prim_attr("dynamic_shape_depends", [0])
  4035. self.add_prim_attr("dynamic_shape_depends", [1])
  4036. self.add_prim_attr("dynamic_shape_depends", [2])
  4037. def check_shape(self, start_shape, limit_shape, delta_shape):
  4038. validator.check("start_shape", len(start_shape), "", 0, Rel.EQ, self.name)
  4039. validator.check("limit_shape", len(limit_shape), "", 0, Rel.EQ, self.name)
  4040. validator.check("delta_shape", len(delta_shape), "", 0, Rel.EQ, self.name)
  4041. def check_dtype(self, start_dtype, limit_dtype, delta_dtype):
  4042. valid_dtypes = [mstype.int32, mstype.float32]
  4043. inputs = {"start": start_dtype, "limit": limit_dtype, "delta": delta_dtype}
  4044. validator.check_tensors_dtypes_same_and_valid(inputs, valid_dtypes, self.name)