You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math_ops.py 130 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Operators for math."""
  16. import copy
  17. import numpy as np
  18. from ... import context
  19. from .. import signature as sig
  20. from ..._checkparam import Validator as validator
  21. from ..._checkparam import Rel
  22. from ...common import dtype as mstype
  23. from ...common.tensor import Tensor, MetaTensor
  24. from .._utils import get_broadcast_shape
  25. from ..primitive import PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
  26. def _infer_shape_reduce(x, axis, keep_dims, prim_name):
  27. """Common infer for reduce operator"""
  28. def reduce_one_axis(one_axis):
  29. validator.check_int_range(one_axis, -dim, dim, Rel.INC_LEFT, 'axis', prim_name)
  30. if one_axis < 0:
  31. one_axis += dim
  32. axis_reduce.add(one_axis)
  33. validator.check_value_type('axis', axis, [int, tuple, list], prim_name)
  34. dim = len(x)
  35. axis_reduce = set()
  36. if isinstance(axis, int):
  37. reduce_one_axis(axis)
  38. else:
  39. if not axis:
  40. if keep_dims:
  41. return [1] * dim
  42. return []
  43. for index, one_axis in enumerate(axis):
  44. validator.check_value_type('axis[%d]' % index, one_axis, [int], prim_name)
  45. reduce_one_axis(one_axis)
  46. out_shape = []
  47. for i in range(dim):
  48. if i in axis_reduce:
  49. if keep_dims:
  50. out_shape.append(1)
  51. else:
  52. out_shape.append(x[i])
  53. return out_shape
  54. class _BinaryOp(PrimitiveWithInfer):
  55. """
  56. Define binary operators.
  57. """
  58. __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
  59. @prim_attr_register
  60. def __init__(self):
  61. """Initialize _BinaryOp"""
  62. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  63. def infer_shape(self, x_shape, y_shape):
  64. return get_broadcast_shape(x_shape, y_shape, self.name)
  65. class _MathBinaryOp(_BinaryOp):
  66. """
  67. Define math binary operators.
  68. """
  69. @staticmethod
  70. def do_infer_dtype(x_dtype, y_dtype, valid_dtype=mstype.number_type, prim_name=None):
  71. args_type = {"x": x_dtype, "y": y_dtype}
  72. validator.check_tensors_dtypes_same_and_valid(args_type, valid_dtype, prim_name)
  73. return x_dtype
  74. def infer_dtype(self, x_dtype, y_dtype):
  75. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.name)
  76. class _BitwiseBinaryOp(_MathBinaryOp):
  77. """
  78. Define bitwise binary operators.
  79. """
  80. @prim_attr_register
  81. def __init__(self):
  82. """Initialize _BitwiseBinaryOp"""
  83. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
  84. @staticmethod
  85. def _check_bitwise_op_input_type(x1_type, x2_type, prim):
  86. args = {'x1': x1_type, 'x2': x2_type}
  87. valid_dtypes = mstype.int_type + mstype.uint_type
  88. validator.check_tensors_dtypes_same_and_valid(args, valid_dtypes, prim)
  89. return x1_type
  90. def infer_dtype(self, x1_type, x2_type):
  91. return _BitwiseBinaryOp._check_bitwise_op_input_type(x1_type, x2_type, self.name)
  92. class TensorAdd(_MathBinaryOp):
  93. """
  94. Adds two input tensors element-wise.
  95. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  96. The inputs must be two tensors or one tensor and one scalar.
  97. When the inputs are two tensors,
  98. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  99. When the inputs are one tensor and one scalar,
  100. the scalar could only be a constant.
  101. Inputs:
  102. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  103. or a tensor whose data type is number or bool.
  104. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  105. is a tensor, or a tensor whose data type is number or bool.
  106. Outputs:
  107. Tensor, the shape is the same as the one after broadcasting,
  108. and the data type is the one with higher precision or higher digits among the two inputs.
  109. Examples:
  110. >>> add = P.TensorAdd()
  111. >>> input_x = Tensor(np.array([1,2,3]).astype(np.float32))
  112. >>> input_y = Tensor(np.array([4,5,6]).astype(np.float32))
  113. >>> output = add(input_x, input_y)
  114. >>> print(output)
  115. [5. 7. 9.]
  116. """
  117. def infer_value(self, x, y):
  118. if x is not None and y is not None:
  119. x = x.asnumpy()
  120. y = y.asnumpy()
  121. out = x + y
  122. out = np.array(out, x.dtype)
  123. return Tensor(out)
  124. return None
  125. class AssignAdd(PrimitiveWithInfer):
  126. """
  127. Updates a `Parameter` by adding a value to it.
  128. Inputs of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
  129. If they have different data types, lower priority data type will be converted to
  130. relatively highest priority data type.
  131. If `value` is a number, the number is automatically converted to Tensor,
  132. and the data type is consistent with the Tensor data type involved in the operation.
  133. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  134. Inputs:
  135. - **variable** (Parameter) - The `Parameter`.
  136. - **value** (Union[numbers.Number, Tensor]) - The value to be added to the `variable`.
  137. It must have the same shape as `variable` if it is a Tensor.
  138. Examples:
  139. >>> class Net(nn.Cell):
  140. ... def __init__(self):
  141. ... super(Net, self).__init__()
  142. ... self.AssignAdd = P.AssignAdd()
  143. ... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
  144. ...
  145. ... def construct(self, x):
  146. ... self.AssignAdd(self.variable, x)
  147. ... return self.variable
  148. ...
  149. >>> net = Net()
  150. >>> value = Tensor(np.ones([1]).astype(np.int64)*100)
  151. >>> output = net(value)
  152. >>> print(output)
  153. Parameter (name=global_step)
  154. """
  155. __mindspore_signature__ = (
  156. sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
  157. sig.make_sig('value', dtype=sig.sig_dtype.T)
  158. )
  159. @prim_attr_register
  160. def __init__(self):
  161. """Initialize AssignAdd"""
  162. self.init_prim_io_names(inputs=['ref', 'value'], outputs=['output'])
  163. def infer_shape(self, variable, value):
  164. return value
  165. def infer_dtype(self, variable, value):
  166. args = {"variable": variable, "value": value}
  167. validator.check_scalar_or_tensor_types_same(args, mstype.number_type, self.name)
  168. return value
  169. class AssignSub(PrimitiveWithInfer):
  170. """
  171. Updates a `Parameter` by subtracting a value from it.
  172. Inputs of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
  173. If they have different data types, lower priority data type will be converted to
  174. relatively highest priority data type.
  175. If `value` is a number, the number is automatically converted to Tensor,
  176. and the data type is consistent with the Tensor data type involved in the operation.
  177. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  178. Inputs:
  179. - **variable** (Parameter) - The `Parameter`.
  180. - **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`.
  181. It must have the same shape as `variable` if it is a Tensor.
  182. Examples:
  183. >>> class Net(nn.Cell):
  184. ... def __init__(self):
  185. ... super(Net, self).__init__()
  186. ... self.AssignSub = P.AssignSub()
  187. ... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
  188. ...
  189. ... def construct(self, x):
  190. ... self.AssignSub(self.variable, x)
  191. ... return self.variable
  192. ...
  193. >>> net = Net()
  194. >>> value = Tensor(np.ones([1]).astype(np.int32)*100)
  195. >>> output = net(value)
  196. >>> print(output)
  197. Parameter (name=global_step)
  198. """
  199. __mindspore_signature__ = (
  200. sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
  201. sig.make_sig('value', dtype=sig.sig_dtype.T)
  202. )
  203. @prim_attr_register
  204. def __init__(self):
  205. """Initialize AssignSub"""
  206. def infer_shape(self, variable, value):
  207. return value
  208. def infer_dtype(self, variable, value):
  209. args = {"variable": variable, "value": value}
  210. validator.check_scalar_or_tensor_types_same(args, mstype.number_type, self.name)
  211. return value
  212. class _Reduce(PrimitiveWithInfer):
  213. """
  214. Definition of base class of reduction class operators.
  215. Args:
  216. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  217. If false, don't keep these dimensions.
  218. """
  219. __mindspore_signature__ = (
  220. sig.make_sig('input_x'),
  221. sig.make_sig('axis', default=())
  222. )
  223. @prim_attr_register
  224. def __init__(self, keep_dims=False):
  225. """Initialize Reduce"""
  226. validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
  227. self.init_prim_io_names(inputs=['input_x', 'axis'], outputs=['y'])
  228. self.add_prim_attr("io_format", "ND")
  229. def __call__(self, x, axis=()):
  230. args = [x, axis]
  231. output = _run_op(self, self.name, args)
  232. return output
  233. def do_infer(self, input_x, axis, valid_dtype=mstype.number_type):
  234. """ return meta infos of input parameters """
  235. axis_v = axis['value']
  236. input_shp = input_x['shape']
  237. args = {'input_x': input_x['dtype']}
  238. validator.check_tensors_dtypes_same_and_valid(args, valid_dtype, self.name)
  239. if axis_v is None:
  240. raise ValueError(f"For {self.name}, axis must be const.")
  241. input_shp = _infer_shape_reduce(input_shp, axis_v, self.keep_dims, self.name)
  242. value = None
  243. if input_x['value'] is not None:
  244. prim_map = {
  245. 'ReduceSum': np.sum,
  246. 'ReduceMax': np.max,
  247. 'ReduceMin': np.min,
  248. }
  249. np_reduce_func = prim_map.get(self.name, None)
  250. if np_reduce_func is not None:
  251. value = input_x['value'].asnumpy()
  252. if not axis_v:
  253. axis_v = [i for i in range(len(input_x['shape']))]
  254. axis_v = tuple(axis_v)
  255. value = np_reduce_func(value, axis_v, keepdims=self.keep_dims)
  256. value = np.array(value)
  257. value = Tensor(value)
  258. return {'shape': input_shp,
  259. 'dtype': input_x['dtype'],
  260. 'value': value}
  261. def __infer__(self, input_x, axis):
  262. return self.do_infer(input_x, axis)
  263. class ReduceMean(_Reduce):
  264. """
  265. Reduce a dimension of a tensor by averaging all elements in the dimension.
  266. The dtype of the tensor to be reduced is number.
  267. Args:
  268. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  269. If false, don't keep these dimensions. Default: False.
  270. Inputs:
  271. - **input_x** (Tensor[Number]) - The input tensor.
  272. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  273. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  274. Outputs:
  275. Tensor, has the same dtype as the `input_x`.
  276. - If axis is (), and keep_dims is False,
  277. the output is a 0-D tensor representing the mean of all elements in the input tensor.
  278. - If axis is int, set as 2, and keep_dims is False,
  279. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  280. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  281. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  282. Examples:
  283. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  284. >>> op = P.ReduceMean(keep_dims=True)
  285. >>> output = op(input_x, 1)
  286. >>> result = output.shape
  287. >>> print(result)
  288. (3, 1, 5, 6)
  289. """
  290. class ReduceSum(_Reduce):
  291. """
  292. Reduce a dimension of a tensor by summing all elements in the dimension.
  293. The dtype of the tensor to be reduced is number.
  294. Args:
  295. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  296. If false, don't keep these dimensions. Default: False.
  297. Inputs:
  298. - **input_x** (Tensor[Number]) - The input tensor.
  299. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  300. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  301. Outputs:
  302. Tensor, has the same dtype as the `input_x`.
  303. - If axis is (), and keep_dims is False,
  304. the output is a 0-D tensor representing the sum of all elements in the input tensor.
  305. - If axis is int, set as 2, and keep_dims is False,
  306. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  307. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  308. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  309. Examples:
  310. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  311. >>> op = P.ReduceSum(keep_dims=True)
  312. >>> output = op(input_x, 1)
  313. >>> output.shape
  314. (3, 1, 5, 6)
  315. """
  316. @prim_attr_register
  317. def __init__(self, keep_dims=False):
  318. """Initialize ReduceSum"""
  319. super(ReduceSum, self).__init__(keep_dims)
  320. self.__setattr_flag__ = True
  321. class ReduceAll(_Reduce):
  322. """
  323. Reduce a dimension of a tensor by the "logical and" of all elements in the dimension.
  324. The dtype of the tensor to be reduced is bool.
  325. Args:
  326. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  327. If false, don't keep these dimensions.
  328. Default : False, don't keep these reduced dimensions.
  329. Inputs:
  330. - **input_x** (Tensor[bool]) - The input tensor.
  331. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  332. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  333. Outputs:
  334. Tensor, the dtype is bool.
  335. - If axis is (), and keep_dims is False,
  336. the output is a 0-D tensor representing the "logical and" of all elements in the input tensor.
  337. - If axis is int, set as 2, and keep_dims is False,
  338. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  339. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  340. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  341. Examples:
  342. >>> input_x = Tensor(np.array([[True, False], [True, True]]))
  343. >>> op = P.ReduceAll(keep_dims=True)
  344. >>> output = op(input_x, 1)
  345. >>> print(output)
  346. [[False]
  347. [ True]]
  348. """
  349. def __infer__(self, input_x, axis):
  350. return self.do_infer(input_x, axis, (mstype.bool_,))
  351. class ReduceAny(_Reduce):
  352. """
  353. Reduce a dimension of a tensor by the "logical OR" of all elements in the dimension.
  354. The dtype of the tensor to be reduced is bool.
  355. Args:
  356. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  357. If false, don't keep these dimensions.
  358. Default : False, don't keep these reduced dimensions.
  359. Inputs:
  360. - **input_x** (Tensor[bool]) - The input tensor.
  361. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  362. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  363. Outputs:
  364. Tensor, the dtype is bool.
  365. - If axis is (), and keep_dims is False,
  366. the output is a 0-D tensor representing the "logical or" of all elements in the input tensor.
  367. - If axis is int, set as 2, and keep_dims is False,
  368. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  369. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  370. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  371. Examples:
  372. >>> input_x = Tensor(np.array([[True, False], [True, True]]))
  373. >>> op = P.ReduceAny(keep_dims=True)
  374. >>> output = op(input_x, 1)
  375. >>> print(output)
  376. [[ True]
  377. [ True]]
  378. """
  379. def __infer__(self, input_x, axis):
  380. return self.do_infer(input_x, axis, (mstype.bool_,))
  381. class ReduceMax(_Reduce):
  382. """
  383. Reduce a dimension of a tensor by the maximum value in this dimension.
  384. The dtype of the tensor to be reduced is number.
  385. Args:
  386. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  387. If false, don't keep these dimensions.
  388. Default : False, don't keep these reduced dimensions.
  389. Inputs:
  390. - **input_x** (Tensor[Number]) - The input tensor.
  391. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  392. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  393. Outputs:
  394. Tensor, has the same dtype as the `input_x`.
  395. - If axis is (), and keep_dims is False,
  396. the output is a 0-D tensor representing the maximum of all elements in the input tensor.
  397. - If axis is int, set as 2, and keep_dims is False,
  398. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  399. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  400. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  401. Examples:
  402. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  403. >>> op = P.ReduceMax(keep_dims=True)
  404. >>> output = op(input_x, 1)
  405. >>> result = output.shape
  406. >>> print(result)
  407. (3, 1, 5, 6)
  408. """
  409. @prim_attr_register
  410. def __init__(self, keep_dims=False):
  411. """ReduceMax"""
  412. super(ReduceMax, self).__init__(keep_dims)
  413. self.__setattr_flag__ = True
  414. def __infer__(self, input_x, axis):
  415. return self.do_infer(input_x, axis, mstype.number_type + (mstype.bool_,))
  416. class ReduceMin(_Reduce):
  417. """
  418. Reduce a dimension of a tensor by the minimum value in the dimension.
  419. The dtype of the tensor to be reduced is number.
  420. Args:
  421. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  422. If false, don't keep these dimensions.
  423. Default : False, don't keep these reduced dimensions.
  424. Inputs:
  425. - **input_x** (Tensor[Number]) - The input tensor.
  426. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  427. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  428. Outputs:
  429. Tensor, has the same dtype as the `input_x`.
  430. - If axis is (), and keep_dims is False,
  431. the output is a 0-D tensor representing the minimum of all elements in the input tensor.
  432. - If axis is int, set as 2, and keep_dims is False,
  433. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  434. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  435. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  436. Examples:
  437. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  438. >>> op = P.ReduceMin(keep_dims=True)
  439. >>> output = op(input_x, 1)
  440. >>> result = output.shape
  441. >>> print(result)
  442. (3, 1, 5, 6)
  443. """
  444. class ReduceProd(_Reduce):
  445. """
  446. Reduce a dimension of a tensor by multiplying all elements in the dimension.
  447. The dtype of the tensor to be reduced is number.
  448. Args:
  449. keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
  450. If false, don't keep these dimensions.
  451. Default : False, don't keep these reduced dimensions.
  452. Inputs:
  453. - **input_x** (Tensor[Number]) - The input tensor.
  454. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
  455. Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
  456. Outputs:
  457. Tensor, has the same dtype as the `input_x`.
  458. - If axis is (), and keep_dims is False,
  459. the output is a 0-D tensor representing the product of all elements in the input tensor.
  460. - If axis is int, set as 2, and keep_dims is False,
  461. the shape of output is :math:`(x_1, x_3, ..., x_R)`.
  462. - If axis is tuple(int), set as (2, 3), and keep_dims is False,
  463. the shape of output is :math:`(x_1, x_4, ..., x_R)`.
  464. Examples:
  465. >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
  466. >>> op = P.ReduceProd(keep_dims=True)
  467. >>> output = op(input_x, 1)
  468. >>> reuslt = output.shape
  469. >>> print(result)
  470. (3, 1, 5, 6)
  471. """
  472. class CumProd(PrimitiveWithInfer):
  473. """
  474. Compute the cumulative product of the tensor x along axis.
  475. Args:
  476. exclusive (bool): If true, perform exclusive cumulative product. Default: False.
  477. reverse (bool): If true, reverse the result along axis. Default: False
  478. Inputs:
  479. - **input_x** (Tensor[Number]) - The input tensor.
  480. - **axis** (int) - The dimensions to compute the cumulative product.
  481. Only constant value is allowed.
  482. Outputs:
  483. Tensor, has the same shape and dtype as the `input_x`.
  484. Examples:
  485. >>> a, b, c, = 1, 2, 3
  486. >>> input_x = Tensor(np.array([a, b, c]).astype(np.float32))
  487. >>> op0 = P.CumProd()
  488. >>> output0 = op0(input_x, 0) # output=[a, a * b, a * b * c]
  489. >>> op1 = P.CumProd(exclusive=True)
  490. >>> output1 = op1(input_x, 0) # output=[1, a, a * b]
  491. >>> op2 = P.CumProd(reverse=True)
  492. >>> output2 = op2(input_x, 0) # output=[a * b * c, b * c, c]
  493. >>> op3 = P.CumProd(exclusive=True, reverse=True)
  494. >>> output3 = op3(input_x, 0) # output=[b * c, c, 1]
  495. >>> print(output0)
  496. [1. 2. 6.]
  497. >>> print(output1)
  498. [1. 1. 2.]
  499. >>> print(output2)
  500. [6. 6. 3.]
  501. >>> print(output3)
  502. [6. 3. 1.]
  503. """
  504. @prim_attr_register
  505. def __init__(self, exclusive=False, reverse=False):
  506. cls_name = self.name
  507. self.exclusive = validator.check_value_type("exclusive", exclusive, [bool], cls_name)
  508. self.reverse = validator.check_value_type("reverse", reverse, [bool], cls_name)
  509. self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
  510. def infer_shape(self, x_shape, axis_shape):
  511. return x_shape
  512. def infer_dtype(self, x_type, axis_type):
  513. cls_name = self.name
  514. validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, cls_name)
  515. validator.check_subclass("axis", axis_type, mstype.int_, cls_name)
  516. return x_type
  517. def infer_value(self, x, axis):
  518. if axis is None:
  519. raise ValueError(f"For {self.name}, axis must be const.")
  520. class MatMul(PrimitiveWithInfer):
  521. """
  522. Multiplies matrix `a` and matrix `b`.
  523. The rank of input tensors must equal to `2`.
  524. Args:
  525. transpose_a (bool): If true, `a` is transposed before multiplication. Default: False.
  526. transpose_b (bool): If true, `b` is transposed before multiplication. Default: False.
  527. Inputs:
  528. - **input_x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If
  529. `transpose_a` is True, its shape must be :math:`(N, C)` after transposing.
  530. - **input_y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If
  531. `transpose_b` is True, its shape must be :math:`(C, M)` after transpose.
  532. Outputs:
  533. Tensor, the shape of the output tensor is :math:`(N, M)`.
  534. Examples:
  535. >>> input_x1 = Tensor(np.ones(shape=[1, 3]), mindspore.float32)
  536. >>> input_x2 = Tensor(np.ones(shape=[3, 4]), mindspore.float32)
  537. >>> matmul = P.MatMul()
  538. >>> output = matmul(input_x1, input_x2)
  539. """
  540. @prim_attr_register
  541. def __init__(self, transpose_a=False, transpose_b=False):
  542. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
  543. cls_name = self.name
  544. validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
  545. validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
  546. self.add_prim_attr("io_format", "ND")
  547. def check_shape_size(self, x1, x2):
  548. if len(x1) != 2 or len(x2) != 2:
  549. raise ValueError('P.MatMul inputs x1, x2 should has the same dimension size and '
  550. + f'equal to 2, while x1 size is ({len(x1)}) and x2 size is ({len(x2)}).')
  551. def infer_shape(self, x1, x2):
  552. self.check_shape_size(x1, x2)
  553. cls_name = self.name
  554. # expected dimension of x, y, x:[...,a,b] y:[..., c,d], the dim size should be the same except the last two
  555. for i in range(len(x1) - 2):
  556. if x1[i] != x2[i]:
  557. raise ValueError(f'For \'{cls_name}\' shape in dim[{i}] not the same, '
  558. + f'while x1 is {x1[i]}, x2 is {x2[i]}')
  559. # validate whether last two dims satisfying matrix multiply
  560. x1_last = x1[-2:]
  561. x2_last = x2[-2:]
  562. # x1_col = x1_last[1] if (not transpose_a) else x1_last[0]
  563. x1_col = x1_last[not self.transpose_a]
  564. # x2_row = x2_last[0] if (not transpose_b) else x2_last[1]
  565. x2_row = x2_last[self.transpose_b]
  566. if x1_col != x2_row:
  567. raise ValueError(f'For \'{cls_name}\' evaluator shapes of inputs can not do this operator,'
  568. + f' got {x1_col} and {x2_row}, with x1 shape {x1}(transpose_a={self.transpose_a})'
  569. + f', x2 shape {x2}(transpose_b={self.transpose_b}).')
  570. # set attribute
  571. self.add_prim_attr('transpose_x1', self.transpose_a)
  572. self.add_prim_attr('transpose_x2', self.transpose_b)
  573. ret_dims = x1[: -2] + [x1_last[self.transpose_a], x2_last[not self.transpose_b]]
  574. return ret_dims
  575. def infer_dtype(self, x1, x2):
  576. args = {"x1": x1, "x2": x2}
  577. validator.check_tensors_dtypes_same_and_valid(args, mstype.float_type + mstype.int_type, self.name)
  578. if x1.element_type() == mstype.int8:
  579. return mstype.tensor_type(mstype.int32)
  580. return x1
  581. class BatchMatMul(MatMul):
  582. """
  583. Computes matrix multiplication between two tensors by batch
  584. `result[..., :, :] = tensor(a[..., :, :]) * tensor(b[..., :, :])`.
  585. The two input tensors must have the same rank and the rank must be not less than `3`.
  586. Args:
  587. transpose_a (bool): If true, the last two dimensions of `a` is transposed before multiplication.
  588. Default: False.
  589. transpose_b (bool): If true, the last two dimensions of `b` is transposed before multiplication.
  590. Default: False.
  591. Inputs:
  592. - **input_x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(*B, N, C)`,
  593. where :math:`*B` represents the batch size which can be multidimensional, :math:`N` and :math:`C` are the
  594. size of the last two dimensions. If `transpose_a` is True, its shape must be :math:`(*B, C, N)`.
  595. - **input_y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(*B, C, M)`. If
  596. `transpose_b` is True, its shape must be :math:`(*B, M, C)`.
  597. Outputs:
  598. Tensor, the shape of the output tensor is :math:`(*B, N, M)`.
  599. Examples:
  600. >>> input_x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32)
  601. >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
  602. >>> batmatmul = P.BatchMatMul()
  603. >>> output = batmatmul(input_x, input_y)
  604. >>> print(output)
  605. [[[[3. 3. 3. 3.]]
  606. [[3. 3. 3. 3.]]
  607. [[3. 3. 3. 3.]]
  608. [[3. 3. 3. 3.]]],
  609. [[[3. 3. 3. 3.]]
  610. [[3. 3. 3. 3.]]
  611. [[3. 3. 3. 3.]]
  612. [[3. 3. 3. 3.]]]]
  613. >>>
  614. >>> input_x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
  615. >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
  616. >>> batmatmul = P.BatchMatMul(transpose_a=True)
  617. >>> output = batmatmul(input_x, input_y)
  618. >>> print(output)
  619. [[[[3. 3. 3. 3.]]
  620. [[3. 3. 3. 3.]]
  621. [[3. 3. 3. 3.]]
  622. [[3. 3. 3. 3.]]],
  623. [[[3. 3. 3. 3.]]
  624. [[3. 3. 3. 3.]]
  625. [[3. 3. 3. 3.]]
  626. [[3. 3. 3. 3.]]]]
  627. """
  628. @prim_attr_register
  629. def __init__(self, transpose_a=False, transpose_b=False):
  630. self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output'])
  631. cls_name = self.name
  632. validator.check_value_type("transpose_a", transpose_a, [bool], cls_name)
  633. validator.check_value_type("transpose_b", transpose_b, [bool], cls_name)
  634. def check_shape_size(self, x, y):
  635. if len(x) != len(y) or len(x) < 3:
  636. raise ValueError('For \'BatchMatMul\' input x, y should be the same dimension size and should be '
  637. 'greater or equal to 3,' + f' while x size = {len(x)}, y size= {len(y)}')
  638. class CumSum(PrimitiveWithInfer):
  639. """
  640. Computes the cumulative sum of input tensor along axis.
  641. Args:
  642. exclusive (bool): If true, perform exclusive mode. Default: False.
  643. reverse (bool): If true, perform inverse cumulative sum. Default: False.
  644. Inputs:
  645. - **input** (Tensor) - The input tensor to accumulate.
  646. - **axis** (int) - The axis to accumulate the tensor's value. Only constant value is allowed.
  647. Must be in the range [-rank(input), rank(input)).
  648. Outputs:
  649. Tensor, the shape of the output tensor is consistent with the input tensor's.
  650. Examples:
  651. >>> input = Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float32))
  652. >>> cumsum = P.CumSum()
  653. >>> output = cumsum(input, 1)
  654. >>> print(output)
  655. [[ 3. 7. 13. 23.]
  656. [ 1. 7. 14. 23.]
  657. [ 4. 7. 15. 22.]
  658. [ 1. 4. 11. 20.]]
  659. """
  660. @prim_attr_register
  661. def __init__(self, exclusive=False, reverse=False):
  662. """Initialize cumsum"""
  663. cls_name = self.name
  664. validator.check_value_type('exclusive', exclusive, [bool], cls_name)
  665. validator.check_value_type('reverse', reverse, [bool], cls_name)
  666. self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y'])
  667. def __infer__(self, x, axis):
  668. cls_name = self.name
  669. x_shp = x['shape']
  670. if axis['value'] is None:
  671. raise ValueError(f"For {self.name}, axis must be const.")
  672. validator.check_value_type('axis', axis['value'], [int], cls_name)
  673. valid_dtypes = [mstype.uint8, mstype.int8, mstype.int32, mstype.float16, mstype.float32]
  674. validator.check_tensor_dtype_valid('x', x['dtype'], valid_dtypes, cls_name)
  675. return {'shape': x_shp,
  676. 'dtype': x['dtype'],
  677. 'value': None}
  678. class AddN(PrimitiveWithInfer):
  679. """
  680. Computes addition of all input tensors element-wise.
  681. All input tensors must have the same shape.
  682. Inputs:
  683. - **input_x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
  684. is made up of multiple tensors whose dtype is number or bool to be added together.
  685. Outputs:
  686. Tensor, has the same shape and dtype as each entry of the `input_x`.
  687. Examples:
  688. >>> class NetAddN(nn.Cell):
  689. ... def __init__(self):
  690. ... super(NetAddN, self).__init__()
  691. ... self.addN = P.AddN()
  692. ...
  693. ... def construct(self, *z):
  694. ... return self.addN(z)
  695. ...
  696. >>> net = NetAddN()
  697. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  698. >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.float32)
  699. >>> output = net(input_x, input_y, input_x, input_y)
  700. >>> print(output)
  701. [10. 14. 18.]
  702. """
  703. @prim_attr_register
  704. def __init__(self):
  705. self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
  706. def check_elim(self, inputs):
  707. if len(inputs) != 1:
  708. return (False, None)
  709. if isinstance(inputs[0], Tensor):
  710. return (True, inputs[0])
  711. raise TypeError("Expecting Tensor, got : {}".format(type(inputs[0])))
  712. def infer_shape(self, inputs):
  713. cls_name = self.name
  714. validator.check_int(len(inputs), 1, Rel.GE, "inputs", cls_name)
  715. self.add_prim_attr('n', len(inputs))
  716. shp0 = inputs[0]
  717. for i, shp in enumerate(inputs):
  718. validator.check(f"shape of inputs[{i}]", shp, 'shape of inputs[0]', shp0, Rel.EQ, cls_name)
  719. return shp0
  720. def infer_dtype(self, inputs):
  721. cls_name = self.name
  722. validator.check_value_type("inputs", inputs, [tuple, list], cls_name)
  723. validator.check_int(len(inputs), 1, Rel.GE, "inputs", cls_name)
  724. args = {}
  725. contains_undetermined = False
  726. for i, dtype in enumerate(inputs):
  727. args[f"inputs[{i}]"] = dtype
  728. if dtype == mstype.undetermined:
  729. contains_undetermined = True
  730. if not contains_undetermined:
  731. validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type + (mstype.bool_,), cls_name)
  732. return inputs[0]
  733. def infer_value(self, inputs):
  734. if inputs is None:
  735. return None
  736. for x in inputs:
  737. if x is None:
  738. return None
  739. added = copy.deepcopy(inputs[0].asnumpy())
  740. for x in inputs[1:]:
  741. added += x.asnumpy()
  742. out = np.array(added, inputs[0].asnumpy().dtype)
  743. return Tensor(out)
  744. class AccumulateNV2(PrimitiveWithInfer):
  745. """
  746. Computes accumulation of all input tensors element-wise.
  747. AccumulateNV2 is similar to AddN, but there is a significant difference
  748. among them: AccumulateNV2 will not wait for all of its inputs to be ready
  749. before summing. That is to say, AccumulateNV2 is able to save
  750. memory when inputs are ready at different time since the minimum temporary
  751. storage is proportional to the output size rather than the input size.
  752. Inputs:
  753. - **input_x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
  754. is made up of multiple tensors whose dtype is number to be added together.
  755. Outputs:
  756. Tensor, has the same shape and dtype as each entry of the `input_x`.
  757. Examples:
  758. >>> class NetAccumulateNV2(nn.Cell):
  759. ... def __init__(self):
  760. ... super(NetAccumulateNV2, self).__init__()
  761. ... self.accumulateNV2 = P.AccumulateNV2()
  762. ...
  763. ... def construct(self, *z):
  764. ... return self.accumulateNV2(z)
  765. ...
  766. >>> net = NetAccumulateNV2()
  767. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  768. >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.float32)
  769. >>> output = net(input_x, input_y, input_x, input_y)
  770. >>> print(output)
  771. [10. 14. 18.]
  772. """
  773. @prim_attr_register
  774. def __init__(self):
  775. self.__setattr_flag__ = True
  776. self.init_prim_io_names(inputs=["inputs"], outputs=["sum"])
  777. def check_elim(self, inputs):
  778. if len(inputs) != 1:
  779. return (False, None)
  780. if isinstance(inputs[0], Tensor):
  781. return (True, inputs[0])
  782. raise TypeError("Expecting Tensor, got : {}".format(type(inputs[0])))
  783. def infer_shape(self, inputs):
  784. cls_name = self.name
  785. validator.check_int(len(inputs), 1, Rel.GE, "inputs", cls_name)
  786. self.add_prim_attr('n', len(inputs))
  787. shp0 = inputs[0]
  788. for i, shp in enumerate(inputs):
  789. validator.check(f"shape of inputs[{i}]", shp, 'shape of inputs[0]', shp0, Rel.EQ, cls_name)
  790. return shp0
  791. def infer_dtype(self, inputs):
  792. cls_name = self.name
  793. validator.check_value_type("inputs", inputs, [tuple, list], cls_name)
  794. validator.check_int(len(inputs), 1, Rel.GE, "inputs", cls_name)
  795. args = {}
  796. for i, dtype in enumerate(inputs):
  797. args[f"inputs[{i}]"] = dtype
  798. validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type + (mstype.bool_,), cls_name)
  799. return inputs[0]
  800. class Neg(PrimitiveWithInfer):
  801. """
  802. Returns a tensor with negative values of the input tensor element-wise.
  803. Inputs:
  804. - **input_x** (Tensor) - The input tensor whose dtype is number.
  805. Outputs:
  806. Tensor, has the same shape and dtype as input.
  807. Examples:
  808. >>> neg = P.Neg()
  809. >>> input_x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
  810. >>> output = neg(input_x)
  811. >>> print(output)
  812. [-1. -2. 1. -2. 0. 3.5]
  813. """
  814. @prim_attr_register
  815. def __init__(self):
  816. """Initialize Neg"""
  817. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  818. def infer_shape(self, x_shape):
  819. return x_shape
  820. def infer_dtype(self, x_dtype):
  821. validator.check_tensor_dtype_valid("x", x_dtype, mstype.number_type, self.name)
  822. return x_dtype
  823. def infer_value(self, input_x):
  824. if input_x is not None:
  825. input_x = input_x.asnumpy()
  826. out = np.array(-input_x, input_x.dtype)
  827. return Tensor(out)
  828. return None
  829. class InplaceAdd(PrimitiveWithInfer):
  830. """
  831. Adds v into specified rows of x. Computes y = x; y[i,] += v.
  832. Args:
  833. indices (Union[int, tuple]): Indices into the left-most dimension of x, and determines which rows of x
  834. to add with v. It is an integer or a tuple, whose value is in [0, the first dimension size of x).
  835. Inputs:
  836. - **input_x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32.
  837. - **input_v** (Tensor) - The second input is a tensor that has the same dimension sizes as x except
  838. the first dimension, which must be the same as indices's size. It has the same data type with `input_x`.
  839. Outputs:
  840. Tensor, has the same shape and dtype as input_x.
  841. Examples:
  842. >>> indices = (0, 1)
  843. >>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
  844. >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
  845. >>> inplaceAdd = P.InplaceAdd(indices)
  846. >>> output = inplaceAdd(input_x, input_v)
  847. >>> print(output)
  848. [[1.5 3. ]
  849. [4. 5.5]
  850. [5. 6. ]]
  851. """
  852. @prim_attr_register
  853. def __init__(self, indices):
  854. """Initialize InplaceAdd"""
  855. self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
  856. self.indices = indices
  857. validator.check_value_type('indices', indices, [tuple, int], self.name)
  858. if isinstance(indices, int):
  859. self.indices = (indices,)
  860. for item in self.indices:
  861. validator.check_value_type("item of indices", item, [int], self.name)
  862. def infer_dtype(self, x_dtype, v_dtype):
  863. args = {'x': x_dtype, 'v': v_dtype}
  864. valid_type = [mstype.int32, mstype.float16, mstype.float32]
  865. validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
  866. return x_dtype
  867. def infer_shape(self, x_shape, v_shape):
  868. validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
  869. validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
  870. Rel.EQ, self.name)
  871. for i in self.indices:
  872. if i < 0 or i >= x_shape[0]:
  873. raise ValueError(f'The value of indices must be in [0, {x_shape[0]}), but got {i}.')
  874. x_rank = len(x_shape)
  875. for idx in range(x_rank)[1:]:
  876. validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
  877. return x_shape
  878. class InplaceSub(PrimitiveWithInfer):
  879. """
  880. Subtracts v into specified rows of x. Computes y = x; y[i, :] -= v.
  881. Args:
  882. indices (Union[int, tuple]): Indices into the left-most dimension of x, and determines which rows of x
  883. to subtract with v. It is a int or tuple, whose value is in [0, the first dimension size of x).
  884. Inputs:
  885. - **input_x** (Tensor) - The first input is a tensor whose data type is float16, float32 or int32.
  886. - **input_v** (Tensor) - The second input is a tensor who has the same dimension sizes as x except
  887. the first dimension, which must be the same as indices's size. It has the same data type with `input_x`.
  888. Outputs:
  889. Tensor, has the same shape and dtype as input_x.
  890. Examples:
  891. >>> indices = (0, 1)
  892. >>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
  893. >>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
  894. >>> inplaceSub = P.InplaceSub(indices)
  895. >>> output = inplaceSub(input_x, input_v)
  896. >>> print(output)
  897. [[0.5 1. ]
  898. [2. 2.5]
  899. [5. 6. ]]
  900. """
  901. @prim_attr_register
  902. def __init__(self, indices):
  903. """Initialize InplaceSub"""
  904. self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])
  905. self.indices = indices
  906. validator.check_value_type('indices', indices, [tuple, int], self.name)
  907. if isinstance(indices, int):
  908. self.indices = (indices,)
  909. for item in self.indices:
  910. validator.check_value_type("item of indices", item, [int], self.name)
  911. def infer_dtype(self, x_dtype, v_dtype):
  912. args = {'x': x_dtype, 'v': v_dtype}
  913. valid_type = [mstype.int32, mstype.float16, mstype.float32]
  914. validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)
  915. return x_dtype
  916. def infer_shape(self, x_shape, v_shape):
  917. validator.check("x", len(x_shape), "v", len(v_shape), Rel.EQ, self.name)
  918. validator.check("size of indices", len(self.indices), "v's first dimension", v_shape[0],
  919. Rel.EQ, self.name)
  920. for i in self.indices:
  921. if i < 0 or i >= x_shape[0]:
  922. raise ValueError(f'The value of indices must be in [0, {x_shape[0]}), but got {i}.')
  923. x_rank = len(x_shape)
  924. for idx in range(x_rank)[1:]:
  925. validator.check('v dim %d' % idx, v_shape[idx], "x dim %d" % idx, x_shape[idx], Rel.EQ, self.name)
  926. return x_shape
  927. class Sub(_MathBinaryOp):
  928. """
  929. Subtracts the second input tensor from the first input tensor element-wise.
  930. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  931. The inputs must be two tensors or one tensor and one scalar.
  932. When the inputs are two tensors,
  933. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  934. When the inputs are one tensor and one scalar,
  935. the scalar could only be a constant.
  936. Inputs:
  937. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  938. or a tensor whose data type is number or bool.
  939. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  940. is a tensor, or a tensor whose data type is number or bool.
  941. Outputs:
  942. Tensor, the shape is the same as the one after broadcasting,
  943. and the data type is the one with higher precision or higher digits among the two inputs.
  944. Examples:
  945. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  946. >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.int32)
  947. >>> sub = P.Sub()
  948. >>> output = sub(input_x, input_y)
  949. >>> print(output)
  950. [-3 -3 -3]
  951. """
  952. def infer_value(self, x, y):
  953. if x is not None and y is not None:
  954. x = x.asnumpy()
  955. y = y.asnumpy()
  956. out = x - y
  957. out = np.array(out, x.dtype)
  958. return Tensor(out)
  959. return None
  960. class Mul(_MathBinaryOp):
  961. """
  962. Multiplies two tensors element-wise.
  963. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  964. The inputs must be two tensors or one tensor and one scalar.
  965. When the inputs are two tensors,
  966. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  967. When the inputs are one tensor and one scalar,
  968. the scalar could only be a constant.
  969. Inputs:
  970. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  971. a bool or a tensor whose data type is number or bool.
  972. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  973. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  974. Outputs:
  975. Tensor, the shape is the same as the one after broadcasting,
  976. and the data type is the one with higher precision or higher digits among the two inputs.
  977. Examples:
  978. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  979. >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
  980. >>> mul = P.Mul()
  981. >>> output = mul(input_x, input_y)
  982. >>> print(output)
  983. [ 4. 10. 18.]
  984. """
  985. def infer_value(self, x, y):
  986. if x is not None and y is not None:
  987. x = x.asnumpy()
  988. y = y.asnumpy()
  989. out = x * y
  990. out = np.array(out, x.dtype)
  991. return Tensor(out)
  992. return None
  993. class SquaredDifference(_MathBinaryOp):
  994. """
  995. Subtracts the second input tensor from the first input tensor element-wise and returns square of it.
  996. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  997. The inputs must be two tensors or one tensor and one scalar.
  998. When the inputs are two tensors,
  999. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1000. When the inputs are one tensor and one scalar,
  1001. the scalar could only be a constant.
  1002. Inputs:
  1003. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  1004. or a tensor whose data type is float16, float32, int32 or bool.
  1005. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  1006. is a tensor or a tensor whose data type isfloat16, float32, int32 or bool.
  1007. Outputs:
  1008. Tensor, the shape is the same as the one after broadcasting,
  1009. and the data type is the one with higher precision or higher digits among the two inputs.
  1010. Examples:
  1011. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  1012. >>> input_y = Tensor(np.array([2.0, 4.0, 6.0]), mindspore.float32)
  1013. >>> squared_difference = P.SquaredDifference()
  1014. >>> output = squared_difference(input_x, input_y)
  1015. >>> print(output)
  1016. [1. 4. 9.]
  1017. """
  1018. def infer_dtype(self, x_dtype, y_dtype):
  1019. valid_type = [mstype.float16, mstype.float32, mstype.int32]
  1020. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, valid_type, self.name)
  1021. class Square(PrimitiveWithInfer):
  1022. """
  1023. Returns square of a tensor element-wise.
  1024. Inputs:
  1025. - **input_x** (Tensor) - The input tensor whose dtype is number.
  1026. Outputs:
  1027. Tensor, has the same shape and dtype as the `input_x`.
  1028. Examples:
  1029. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  1030. >>> square = P.Square()
  1031. >>> output = square(input_x)
  1032. >>> print(output)
  1033. [1. 4. 9.]
  1034. """
  1035. @prim_attr_register
  1036. def __init__(self):
  1037. """Initialize Square"""
  1038. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  1039. def infer_shape(self, x_shape):
  1040. return x_shape
  1041. def infer_dtype(self, x_dtype):
  1042. validator.check_tensor_dtype_valid("x", x_dtype, mstype.number_type, self.name)
  1043. return x_dtype
  1044. def infer_value(self, x):
  1045. if x is not None:
  1046. x = x.asnumpy()
  1047. out = x * x
  1048. out = np.array(out, x.dtype)
  1049. return Tensor(out)
  1050. return None
  1051. class Rsqrt(PrimitiveWithInfer):
  1052. """
  1053. Computes reciprocal of square root of input tensor element-wise.
  1054. Inputs:
  1055. - **input_x** (Tensor) - The input of Rsqrt. Each element must be a non-negative number.
  1056. Outputs:
  1057. Tensor, has the same type and shape as `input_x`.
  1058. Examples:
  1059. >>> input_tensor = Tensor([[4, 4], [9, 9]], mindspore.float32)
  1060. >>> rsqrt = P.Rsqrt()
  1061. >>> output = rsqrt(input_tensor)
  1062. >>> print(output)
  1063. [[0.5 0.5 ]
  1064. [0.333334 0.333334]]
  1065. """
  1066. @prim_attr_register
  1067. def __init__(self):
  1068. """Initialize Rsqrt"""
  1069. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1070. def infer_shape(self, x_shape):
  1071. return x_shape
  1072. def infer_dtype(self, x_dtype):
  1073. validator.check_tensor_dtype_valid("x", x_dtype, mstype.number_type, self.name)
  1074. return x_dtype
  1075. def infer_value(self, x):
  1076. if x is not None:
  1077. x = x.asnumpy()
  1078. out = 1.0 / np.sqrt(x)
  1079. out = np.array(out, x.dtype)
  1080. return Tensor(out)
  1081. return None
  1082. class Sqrt(PrimitiveWithCheck):
  1083. """
  1084. Returns square root of a tensor element-wise.
  1085. Inputs:
  1086. - **input_x** (Tensor) - The input tensor whose dtype is number.
  1087. Outputs:
  1088. Tensor, has the same shape as the `input_x`.
  1089. Examples:
  1090. >>> input_x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
  1091. >>> sqrt = P.Sqrt()
  1092. >>> output = sqrt(input_x)
  1093. >>> print(output)
  1094. [1. 2. 3.]
  1095. """
  1096. @prim_attr_register
  1097. def __init__(self):
  1098. """Initialize Sqrt"""
  1099. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  1100. def check_dtype(self, x_type):
  1101. validator.check_tensor_dtype_valid("x", x_type, mstype.number_type, self.name)
  1102. def infer_value(self, x):
  1103. if x is not None:
  1104. x = x.asnumpy()
  1105. out = np.sqrt(x)
  1106. out = np.array(out, x.dtype)
  1107. return Tensor(out)
  1108. return None
  1109. class Reciprocal(PrimitiveWithInfer):
  1110. """
  1111. Returns reciprocal of a tensor element-wise.
  1112. Inputs:
  1113. - **input_x** (Tensor) - The input tensor.
  1114. Outputs:
  1115. Tensor, has the same shape as the `input_x`.
  1116. Examples:
  1117. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1118. >>> reciprocal = P.Reciprocal()
  1119. >>> output = reciprocal(input_x)
  1120. >>> print(output)
  1121. [1. 0.5 0.25]
  1122. """
  1123. @prim_attr_register
  1124. def __init__(self):
  1125. """Initialize Reciprocal"""
  1126. if context.get_context("device_target") == "GPU":
  1127. self.target = "GPU"
  1128. else:
  1129. self.target = "OTHER"
  1130. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1131. def infer_shape(self, x):
  1132. return x
  1133. def infer_dtype(self, x):
  1134. validator.check_subclass("x", x, mstype.tensor, self.name)
  1135. return x
  1136. def infer_value(self, x):
  1137. if x is not None:
  1138. x = x.asnumpy()
  1139. out = 1.0 / x
  1140. out = np.array(out, x.dtype)
  1141. return Tensor(out)
  1142. return None
  1143. class Pow(_MathBinaryOp):
  1144. """
  1145. Computes a tensor to the power of the second input.
  1146. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1147. The inputs must be two tensors or one tensor and one scalar.
  1148. When the inputs are two tensors,
  1149. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1150. When the inputs are one tensor and one scalar,
  1151. the scalar could only be a constant.
  1152. Inputs:
  1153. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1154. a bool or a tensor whose data type is number or bool.
  1155. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1156. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1157. Outputs:
  1158. Tensor, the shape is the same as the one after broadcasting,
  1159. and the data type is the one with higher precision or higher digits among the two inputs.
  1160. Examples:
  1161. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1162. >>> input_y = 3.0
  1163. >>> pow = P.Pow()
  1164. >>> output = pow(input_x, input_y)
  1165. >>> print(output)
  1166. [ 1. 8. 64.]
  1167. >>>
  1168. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1169. >>> input_y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
  1170. >>> pow = P.Pow()
  1171. >>> output = pow(input_x, input_y)
  1172. >>> print(output)
  1173. [ 1. 16. 64.]
  1174. """
  1175. def infer_value(self, x, power):
  1176. if x is not None and power is not None:
  1177. x = x.asnumpy()
  1178. power = power.asnumpy()
  1179. out = np.power(x, power)
  1180. out = np.array(out, x.dtype)
  1181. return Tensor(out)
  1182. return None
  1183. class Exp(PrimitiveWithInfer):
  1184. """
  1185. Returns exponential of a tensor element-wise.
  1186. Inputs:
  1187. - **input_x** (Tensor) - The input tensor. The data type mast be float16 or float32.
  1188. Outputs:
  1189. Tensor, has the same shape and dtype as the `input_x`.
  1190. Examples:
  1191. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1192. >>> exp = P.Exp()
  1193. >>> output = exp(input_x)
  1194. >>> print(output)
  1195. [ 2.718282 7.389056 54.598152]
  1196. """
  1197. @prim_attr_register
  1198. def __init__(self):
  1199. """Initialize Exp"""
  1200. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1201. def infer_shape(self, x_shape):
  1202. return x_shape
  1203. def infer_dtype(self, x_type):
  1204. validator.check_subclass("x", x_type, mstype.tensor, self.name)
  1205. return x_type
  1206. def infer_value(self, x):
  1207. if x is not None:
  1208. x = x.asnumpy()
  1209. out = np.exp(x)
  1210. out = np.array(out, x.dtype)
  1211. return Tensor(out)
  1212. return None
  1213. class Expm1(PrimitiveWithInfer):
  1214. """
  1215. Returns exponential then minus 1 of a tensor element-wise.
  1216. Inputs:
  1217. - **input_x** (Tensor) - The input tensor. With float16 or float32 data type.
  1218. Outputs:
  1219. Tensor, has the same shape as the `input_x`.
  1220. Examples:
  1221. >>> input_x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
  1222. >>> expm1 = P.Expm1()
  1223. >>> output = expm1(input_x)
  1224. >>> print(output)
  1225. [ 0. 1.718282 6.389056 53.598152]
  1226. """
  1227. @prim_attr_register
  1228. def __init__(self):
  1229. """Initialize Exp"""
  1230. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1231. def infer_shape(self, x_shape):
  1232. return x_shape
  1233. def infer_dtype(self, x_type):
  1234. validator.check_tensor_dtype_valid("x", x_type, [mstype.float16, mstype.float32], self.name)
  1235. return x_type
  1236. class HistogramFixedWidth(PrimitiveWithInfer):
  1237. """
  1238. Returns a rank 1 histogram counting the number of entries in values that fall into every bin. The bins are equal
  1239. width and determined by the arguments range and nbins.
  1240. Args:
  1241. dtype (str): An optional attribute. Must be one of the following types: "int32", "int64". Default: "int32".
  1242. nbins (int): The number of histogram bins, the type is a positive integer.
  1243. Inputs:
  1244. - **x** (Tensor) - Numeric Tensor. Must be one of the following types: int32, float32, float16.
  1245. - **range** (Tensor) - Must has the same data type as `x`, and the shape is [2].
  1246. x <= range[0] will be mapped to hist[0], x >= range[1] will be mapped to hist[-1].
  1247. Outputs:
  1248. Tensor, the type is int32.
  1249. Examples:
  1250. >>> x = Tensor([-1.0, 0.0, 1.5, 2.0, 5.0, 15], mindspore.float16)
  1251. >>> range = Tensor([0.0, 5.0], mindspore.float16)
  1252. >>> hist = P.HistogramFixedWidth(5)
  1253. >>> output = hist(x, range)
  1254. >>> print(output)
  1255. [2 1 1 0 2]
  1256. """
  1257. @prim_attr_register
  1258. def __init__(self, nbins, dtype='int32'):
  1259. self.nbins = validator.check_value_type("nbins", nbins, [int], self.name)
  1260. validator.check_int(nbins, 1, Rel.GE, "nbins", self.name)
  1261. valid_values = ['int32', 'int64']
  1262. self.dtype = validator.check_string(dtype, valid_values, "dtype", self.name)
  1263. self.init_prim_io_names(inputs=['x', 'range'], outputs=['y'])
  1264. def infer_shape(self, x_shape, range_shape):
  1265. return (self.nbins,)
  1266. def infer_dtype(self, x_dtype, range_dtype):
  1267. valid_dtypes = (mstype.float16, mstype.float32, mstype.int32)
  1268. validator.check_tensor_dtype_valid("x", x_dtype, valid_dtypes, self.name)
  1269. validator.check_tensor_dtype_valid("range", range_dtype, valid_dtypes, self.name)
  1270. y_dtype = mstype.int32
  1271. return y_dtype
  1272. class Log(PrimitiveWithInfer):
  1273. """
  1274. Returns the natural logarithm of a tensor element-wise.
  1275. Inputs:
  1276. - **input_x** (Tensor) - The input tensor. With float16 or float32 data type. The value must be greater than 0.
  1277. Outputs:
  1278. Tensor, has the same shape as the `input_x`.
  1279. Examples:
  1280. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1281. >>> log = P.Log()
  1282. >>> output = log(input_x)
  1283. >>> print(output)
  1284. [0. 0.6931472 1.38629444]
  1285. """
  1286. @prim_attr_register
  1287. def __init__(self):
  1288. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1289. def infer_shape(self, x):
  1290. return x
  1291. def infer_dtype(self, x):
  1292. validator.check_subclass("x", x, mstype.tensor, self.name)
  1293. return x
  1294. def infer_value(self, x):
  1295. if x is not None:
  1296. x = x.asnumpy()
  1297. out = np.log(x)
  1298. out = np.array(out, x.dtype)
  1299. return Tensor(out)
  1300. return None
  1301. class Log1p(PrimitiveWithInfer):
  1302. """
  1303. Returns the natural logarithm of one plus the input tensor element-wise.
  1304. Inputs:
  1305. - **input_x** (Tensor) - The input tensor. With float16 or float32 data type. The value must be greater than -1.
  1306. Outputs:
  1307. Tensor, has the same shape as the `input_x`.
  1308. Examples:
  1309. >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
  1310. >>> log1p = P.Log1p()
  1311. >>> output = log1p(input_x)
  1312. >>> print(output)
  1313. [0.6931472 1.0986123 1.609438 ]
  1314. """
  1315. @prim_attr_register
  1316. def __init__(self):
  1317. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1318. def infer_shape(self, x_shape):
  1319. return x_shape
  1320. def infer_dtype(self, x_dtype):
  1321. validator.check_subclass("x", x_dtype, mstype.tensor, self.name)
  1322. validator.check_tensor_dtype_valid("x", x_dtype, [mstype.float16, mstype.float32], self.name)
  1323. return x_dtype
  1324. class Erf(PrimitiveWithInfer):
  1325. r"""
  1326. Computes the Gauss error function of `input_x` element-wise.
  1327. Inputs:
  1328. - **input_x** (Tensor) - The input tensor. The data type must be float16 or float32.
  1329. Outputs:
  1330. Tensor, has the same shape and dtype as the `input_x`.
  1331. Examples:
  1332. >>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
  1333. >>> erf = P.Erf()
  1334. >>> output = erf(input_x)
  1335. >>> print(output)
  1336. [-0.8427168 0. 0.8427168 0.99530876 0.99997765]
  1337. """
  1338. @prim_attr_register
  1339. def __init__(self):
  1340. """Initialize Erf"""
  1341. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1342. def infer_shape(self, x_shape):
  1343. return x_shape
  1344. def infer_dtype(self, x_dtype):
  1345. validator.check_tensor_dtype_valid("x", x_dtype, [mstype.float16, mstype.float32], self.name)
  1346. return x_dtype
  1347. class Erfc(PrimitiveWithInfer):
  1348. r"""
  1349. Computes the complementary error function of `input_x` element-wise.
  1350. Inputs:
  1351. - **input_x** (Tensor) - The input tensor. The data type must be float16 or float32.
  1352. Outputs:
  1353. Tensor, has the same shape and dtype as the `input_x`.
  1354. Examples:
  1355. >>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
  1356. >>> erfc = P.Erfc()
  1357. >>> output = erfc(input_x)
  1358. >>> print(output)
  1359. [1.8427168e+00 1.0000000e+00 1.5728319e-01 4.6912432e-03 2.2351742e-05]
  1360. """
  1361. @prim_attr_register
  1362. def __init__(self):
  1363. """Initialize Erfc"""
  1364. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1365. def infer_shape(self, x_shape):
  1366. return x_shape
  1367. def infer_dtype(self, x_type):
  1368. validator.check_tensor_dtype_valid("x", x_type, [mstype.float16, mstype.float32], self.name)
  1369. return x_type
  1370. class Minimum(_MathBinaryOp):
  1371. """
  1372. Computes the minimum of input tensors element-wise.
  1373. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1374. The inputs must be two tensors or one tensor and one scalar.
  1375. When the inputs are two tensors,
  1376. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1377. When the inputs are one tensor and one scalar,
  1378. the scalar could only be a constant.
  1379. Inputs:
  1380. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1381. a bool or a tensor whose data type is number or bool.
  1382. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1383. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1384. Outputs:
  1385. Tensor, the shape is the same as the one after broadcasting,
  1386. and the data type is the one with higher precision or higher digits among the two inputs.
  1387. Examples:
  1388. >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
  1389. >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
  1390. >>> minimum = P.Minimum()
  1391. >>> output = minimum(input_x, input_y)
  1392. >>> print(output)
  1393. [1. 2. 3.]
  1394. """
  1395. def infer_value(self, x, y):
  1396. if x is not None and y is not None:
  1397. x = x.asnumpy()
  1398. y = y.asnumpy()
  1399. out = np.minimum(x, y)
  1400. out = np.array(out, x.dtype)
  1401. return Tensor(out)
  1402. return None
  1403. class Maximum(_MathBinaryOp):
  1404. """
  1405. Computes the maximum of input tensors element-wise.
  1406. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1407. The inputs must be two tensors or one tensor and one scalar.
  1408. When the inputs are two tensors,
  1409. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1410. When the inputs are one tensor and one scalar,
  1411. the scalar could only be a constant.
  1412. Inputs:
  1413. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1414. a bool or a tensor whose data type is number or bool.
  1415. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1416. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1417. Outputs:
  1418. Tensor, the shape is the same as the one after broadcasting,
  1419. and the data type is the one with higher precision or higher digits among the two inputs.
  1420. Examples:
  1421. >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
  1422. >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
  1423. >>> maximum = P.Maximum()
  1424. >>> output = maximum(input_x, input_y)
  1425. >>> print(output)
  1426. [4. 5. 6.]
  1427. """
  1428. def infer_value(self, x, y):
  1429. if x is not None and y is not None:
  1430. x = x.asnumpy()
  1431. y = y.asnumpy()
  1432. out = np.maximum(x, y)
  1433. out = np.array(out, x.dtype)
  1434. return Tensor(out)
  1435. return None
  1436. class RealDiv(_MathBinaryOp):
  1437. """
  1438. Divide the first input tensor by the second input tensor in floating-point type element-wise.
  1439. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1440. The inputs must be two tensors or one tensor and one scalar.
  1441. When the inputs are two tensors,
  1442. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1443. When the inputs are one tensor and one scalar,
  1444. the scalar could only be a constant.
  1445. Inputs:
  1446. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1447. a bool or a tensor whose data type is number or bool.
  1448. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1449. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1450. Outputs:
  1451. Tensor, the shape is the same as the one after broadcasting,
  1452. and the data type is the one with higher precision or higher digits among the two inputs.
  1453. Examples:
  1454. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  1455. >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
  1456. >>> realdiv = P.RealDiv()
  1457. >>> output = realdiv(input_x, input_y)
  1458. >>> print(output)
  1459. [0.25 0.4 0.5 ]
  1460. """
  1461. def infer_value(self, x, y):
  1462. if x is not None and y is not None:
  1463. x = x.asnumpy()
  1464. y = y.asnumpy()
  1465. out = x / y
  1466. out = np.array(out, x.dtype)
  1467. return Tensor(out)
  1468. return None
  1469. class Div(_MathBinaryOp):
  1470. """
  1471. Computes the quotient of dividing the first input tensor by the second input tensor element-wise.
  1472. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1473. The inputs must be two tensors or one tensor and one scalar.
  1474. When the inputs are two tensors,
  1475. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1476. When the inputs are one tensor and one scalar,
  1477. the scalar could only be a constant.
  1478. Inputs:
  1479. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1480. a bool or a tensor whose data type is number or bool.
  1481. - **input_y** (Union[Tensor, Number, bool]) - When the first input is a tensor, The second input
  1482. could be a number, a bool, or a tensor whose data type is number or bool. When the first input
  1483. is a number or a bool, the second input must be a tensor whose data type is number or bool.
  1484. Outputs:
  1485. Tensor, the shape is the same as the one after broadcasting,
  1486. and the data type is the one with higher precision or higher digits among the two inputs.
  1487. Examples:
  1488. >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
  1489. >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
  1490. >>> div = P.Div()
  1491. >>> output = div(input_x, input_y)
  1492. >>> print(output)
  1493. [-1.3333334 2.5 2. ]
  1494. """
  1495. def infer_value(self, x, y):
  1496. if x is not None and y is not None:
  1497. x = x.asnumpy()
  1498. y = y.asnumpy()
  1499. out = np.array(x / y, x.dtype)
  1500. return Tensor(out)
  1501. return None
  1502. class DivNoNan(_MathBinaryOp):
  1503. """
  1504. Computes a safe divide which returns 0 if the y is zero.
  1505. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1506. The inputs must be two tensors or one tensor and one scalar.
  1507. When the inputs are two tensors,
  1508. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1509. When the inputs are one tensor and one scalar,
  1510. the scalar could only be a constant.
  1511. Inputs:
  1512. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1513. a bool or a tensor whose data type is number or bool.
  1514. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1515. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1516. Outputs:
  1517. Tensor, the shape is the same as the one after broadcasting,
  1518. and the data type is the one with higher precision or higher digits among the two inputs.
  1519. Examples:
  1520. >>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)
  1521. >>> input_y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32)
  1522. >>> div_no_nan = P.DivNoNan()
  1523. >>> output = div_no_nan(input_x, input_y)
  1524. >>> print(output)
  1525. [0. 0. 0. 2.5 2. ]
  1526. """
  1527. @prim_attr_register
  1528. def __init__(self):
  1529. """Initialize _BinaryOp"""
  1530. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  1531. def infer_value(self, x, y):
  1532. if x is not None and y is not None:
  1533. x = x.asnumpy()
  1534. y = y.asnumpy()
  1535. with np.errstate(divide='ignore', invalid='ignore'):
  1536. out = np.true_divide(x, y)
  1537. out[~np.isfinite(out)] = 0
  1538. return out
  1539. return None
  1540. class FloorDiv(_MathBinaryOp):
  1541. """
  1542. Divide the first input tensor by the second input tensor element-wise and round down to the closest integer.
  1543. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1544. The inputs must be two tensors or one tensor and one scalar.
  1545. When the inputs are two tensors,
  1546. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1547. When the inputs are one tensor and one scalar,
  1548. the scalar could only be a constant.
  1549. Inputs:
  1550. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1551. a bool or a tensor whose data type is number or bool.
  1552. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1553. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1554. Outputs:
  1555. Tensor, the shape is the same as the one after broadcasting,
  1556. and the data type is the one with higher precision or higher digits among the two inputs.
  1557. Examples:
  1558. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  1559. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  1560. >>> floor_div = P.FloorDiv()
  1561. >>> output = floor_div(input_x, input_y)
  1562. >>> print(output)
  1563. [ 0 1 -1]
  1564. """
  1565. class TruncateDiv(_MathBinaryOp):
  1566. """
  1567. Divide the first input tensor by the second input tensor element-wise for integer types, negative numbers will
  1568. round fractional quantities towards zero.
  1569. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1570. The inputs must be two tensors or one tensor and one scalar.
  1571. When the inputs are two tensors,
  1572. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1573. When the inputs are one tensor and one scalar,
  1574. the scalar could only be a constant.
  1575. Inputs:
  1576. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  1577. or a tensor whose data type is number or bool.
  1578. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  1579. is a tensor, or a tensor whose data type is number or bool.
  1580. Outputs:
  1581. Tensor, the shape is the same as the one after broadcasting,
  1582. and the data type is the one with higher precision or higher digits among the two inputs.
  1583. Examples:
  1584. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  1585. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  1586. >>> truncate_div = P.TruncateDiv()
  1587. >>> output = truncate_div(input_x, input_y)
  1588. >>> print(output)
  1589. [0 1 0]
  1590. """
  1591. class TruncateMod(_MathBinaryOp):
  1592. """
  1593. Returns element-wise remainder of division.
  1594. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1595. The inputs must be two tensors or one tensor and one scalar.
  1596. When the inputs are two tensors,
  1597. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1598. When the inputs are one tensor and one scalar,
  1599. the scalar could only be a constant.
  1600. Inputs:
  1601. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  1602. or a tensor whose data type is number or bool.
  1603. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number, or a bool when the first input
  1604. is a tensor, or a tensor whose data type is number or bool.
  1605. Outputs:
  1606. Tensor, the shape is the same as the one after broadcasting,
  1607. and the data type is the one with higher precision or higher digits among the two inputs.
  1608. Examples:
  1609. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  1610. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  1611. >>> truncate_mod = P.TruncateMod()
  1612. >>> output = truncate_mod(input_x, input_y)
  1613. >>> print(output)
  1614. [ 2 1 -1]
  1615. """
  1616. class Mod(_MathBinaryOp):
  1617. """
  1618. Computes the remainder of dividing the first input tensor by the second input tensor element-wise.
  1619. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1620. The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors,
  1621. both dtypes cannot be bool, and the shapes of them could be broadcast. When the inputs are one tensor
  1622. and one scalar, the scalar could only be a constant.
  1623. Inputs:
  1624. - **input_x** (Union[Tensor, Number]) - The first input is a number or a tensor whose data type is number.
  1625. - **input_y** (Union[Tensor, Number]) - When the first input is a tensor, The second input
  1626. could be a number or a tensor whose data type is number. When the first input is a number,
  1627. the second input must be a tensor whose data type is number.
  1628. Outputs:
  1629. Tensor, the shape is the same as the one after broadcasting,
  1630. and the data type is the one with higher precision or higher digits among the two inputs.
  1631. Raises:
  1632. ValueError: When `input_x` and `input_y` are not the same dtype.
  1633. Examples:
  1634. >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
  1635. >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
  1636. >>> mod = P.Mod()
  1637. >>> output = mod(input_x, input_y)
  1638. >>> print(output)
  1639. [-1. 1. 0.]
  1640. """
  1641. def infer_value(self, x, y):
  1642. if x is not None and y is not None:
  1643. x = x.asnumpy()
  1644. y = y.asnumpy()
  1645. return Tensor(np.fmod(x, y))
  1646. return None
  1647. class Floor(PrimitiveWithInfer):
  1648. """
  1649. Round a tensor down to the closest integer element-wise.
  1650. Inputs:
  1651. - **input_x** (Tensor) - The input tensor. Its element data type must be float.
  1652. Outputs:
  1653. Tensor, has the same shape as `input_x`.
  1654. Examples:
  1655. >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
  1656. >>> floor = P.Floor()
  1657. >>> output = floor(input_x)
  1658. >>> print(output)
  1659. [ 1. 2. -2.]
  1660. """
  1661. @prim_attr_register
  1662. def __init__(self):
  1663. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1664. def infer_shape(self, x_shape):
  1665. return x_shape
  1666. def infer_dtype(self, x_dtype):
  1667. validator.check_tensor_dtype_valid("x", x_dtype, mstype.float_type, self.name)
  1668. return x_dtype
  1669. class FloorMod(_MathBinaryOp):
  1670. """
  1671. Compute the remainder of division element-wise.
  1672. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1673. The inputs must be two tensors or one tensor and one scalar.
  1674. When the inputs are two tensors,
  1675. dtypes of them cannot be both bool , and the shapes of them could be broadcast.
  1676. When the inputs are one tensor and one scalar,
  1677. the scalar could only be a constant.
  1678. Inputs:
  1679. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1680. a bool or a tensor whose data type is number or bool.
  1681. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1682. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1683. Outputs:
  1684. Tensor, the shape is the same as the one after broadcasting,
  1685. and the data type is the one with higher precision or higher digits among the two inputs.
  1686. Examples:
  1687. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
  1688. >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
  1689. >>> floor_mod = P.FloorMod()
  1690. >>> output = floor_mod(input_x, input_y)
  1691. >>> print(output)
  1692. [2 1 2]
  1693. """
  1694. class Ceil(PrimitiveWithInfer):
  1695. """
  1696. Round a tensor up to the closest integer element-wise.
  1697. Inputs:
  1698. - **input_x** (Tensor) - The input tensor. It's element data type must be float16 or float32.
  1699. Outputs:
  1700. Tensor, has the same shape as `input_x`.
  1701. Examples:
  1702. >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
  1703. >>> ceil_op = P.Ceil()
  1704. >>> output = ceil_op(input_x)
  1705. >>> print(output)
  1706. [ 2. 3. -1.]
  1707. """
  1708. @prim_attr_register
  1709. def __init__(self):
  1710. self.init_prim_io_names(inputs=['x'], outputs=['y'])
  1711. def infer_shape(self, x_shape):
  1712. return x_shape
  1713. def infer_dtype(self, x_dtype):
  1714. validator.check_tensor_dtype_valid("x", x_dtype, [mstype.float16, mstype.float32], self.name)
  1715. return x_dtype
  1716. class Xdivy(_MathBinaryOp):
  1717. """
  1718. Divide the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero.
  1719. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1720. The inputs must be two tensors or one tensor and one scalar.
  1721. When the inputs are two tensors,
  1722. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1723. When the inputs are one tensor and one scalar,
  1724. the scalar could only be a constant.
  1725. Inputs:
  1726. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number, or a bool,
  1727. or a tensor whose data type is float16, float32 or bool.
  1728. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number,
  1729. or a bool when the first input is a tensor, or a tensor whose data type is float16, float32 or bool.
  1730. Outputs:
  1731. Tensor, the shape is the same as the one after broadcasting,
  1732. and the data type is the one with higher precision or higher digits among the two inputs.
  1733. Examples:
  1734. >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.float32)
  1735. >>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32)
  1736. >>> xdivy = P.Xdivy()
  1737. >>> output = xdivy(input_x, input_y)
  1738. >>> print(output)
  1739. [ 1. 2. -0.5]
  1740. """
  1741. def infer_dtype(self, x_dtype, y_dtype):
  1742. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, [mstype.float16, mstype.float32], self.name)
  1743. class Xlogy(_MathBinaryOp):
  1744. """
  1745. Computes first input tensor multiplied by the logarithm of second input tensor element-wise.
  1746. Returns zero when `x` is zero.
  1747. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1748. The inputs must be two tensors or one tensor and one scalar.
  1749. When the inputs are two tensors,
  1750. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  1751. When the inputs are one tensor and one scalar,
  1752. the scalar could only be a constant.
  1753. Inputs:
  1754. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1755. a bool or a tensor whose data type is float16, float32 or bool.
  1756. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1757. a bool when the first input is a tensor or a tensor whose data type is float16, float32 or bool.
  1758. The value must be positive.
  1759. Outputs:
  1760. Tensor, the shape is the same as the one after broadcasting,
  1761. and the data type is the one with higher precision or higher digits among the two inputs.
  1762. Examples:
  1763. >>> input_x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
  1764. >>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32)
  1765. >>> xlogy = P.Xlogy()
  1766. >>> output = xlogy(input_x, input_y)
  1767. >>> print(output)
  1768. [-3.465736 0. 2.7725887]
  1769. """
  1770. def infer_dtype(self, x_dtype, y_dtype):
  1771. return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, [mstype.float16, mstype.float32], self.name)
  1772. class Acosh(PrimitiveWithInfer):
  1773. """
  1774. Compute inverse hyperbolic cosine of the input element-wise.
  1775. Inputs:
  1776. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1777. Outputs:
  1778. Tensor, has the same shape as `input_x`.
  1779. Examples:
  1780. >>> acosh = P.Acosh()
  1781. >>> input_x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
  1782. >>> output = acosh(input_x)
  1783. """
  1784. @prim_attr_register
  1785. def __init__(self):
  1786. """Initialize Acosh"""
  1787. def infer_shape(self, x_shape):
  1788. return x_shape
  1789. def infer_dtype(self, x_dtype):
  1790. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  1791. return x_dtype
  1792. class Cosh(PrimitiveWithInfer):
  1793. """
  1794. Computes hyperbolic cosine of input element-wise.
  1795. Inputs:
  1796. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1797. Outputs:
  1798. Tensor, has the same shape as `input_x`.
  1799. Examples:
  1800. >>> cosh = P.Cosh()
  1801. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  1802. >>> output = cosh(input_x)
  1803. >>> print(output)
  1804. [1.0289385 1.364684 1.048436 1.0040528]
  1805. """
  1806. @prim_attr_register
  1807. def __init__(self):
  1808. """Initialize Cosh"""
  1809. def infer_shape(self, x_shape):
  1810. return x_shape
  1811. def infer_dtype(self, x_dtype):
  1812. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  1813. return x_dtype
  1814. class Asinh(PrimitiveWithInfer):
  1815. """
  1816. Compute inverse hyperbolic sine of the input element-wise.
  1817. Inputs:
  1818. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1819. Outputs:
  1820. Tensor, has the same shape as `input_x`.
  1821. Examples:
  1822. >>> asinh = P.Asinh()
  1823. >>> input_x = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
  1824. >>> output = asinh(input_x)
  1825. >>> print(output)
  1826. [-2.3124385 1.1947632 1.8184465 5.298342 ]
  1827. """
  1828. @prim_attr_register
  1829. def __init__(self):
  1830. """Initialize Asinh"""
  1831. def infer_shape(self, x_shape):
  1832. return x_shape
  1833. def infer_dtype(self, x_dtype):
  1834. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  1835. return x_dtype
  1836. class Sinh(PrimitiveWithInfer):
  1837. """
  1838. Computes hyperbolic sine of input element-wise.
  1839. Inputs:
  1840. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  1841. Outputs:
  1842. Tensor, has the same shape as `input_x`.
  1843. Examples:
  1844. >>> sinh = P.Sinh()
  1845. >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
  1846. >>> output = sinh(input_x)
  1847. >>> print(output)
  1848. [0.6604918 0.28367308 0.44337422 0.6604918 ]
  1849. """
  1850. @prim_attr_register
  1851. def __init__(self):
  1852. """Initialize Sinh"""
  1853. def infer_shape(self, x_shape):
  1854. return x_shape
  1855. def infer_dtype(self, x_dtype):
  1856. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  1857. return x_dtype
  1858. class _LogicBinaryOp(_BinaryOp):
  1859. """
  1860. Define logic binary operators.
  1861. """
  1862. @staticmethod
  1863. def do_infer_dtype(x_dtype, y_dtype, valid_type=mstype.number_type, prim_name=None):
  1864. args_dtype = {"x": x_dtype, "y": y_dtype}
  1865. validator.check_tensors_dtypes_same_and_valid(args_dtype, valid_type, prim_name)
  1866. return mstype.tensor_type(mstype.bool_)
  1867. def infer_dtype(self, x_dtype, y_dtype):
  1868. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.name)
  1869. class Equal(_LogicBinaryOp):
  1870. """
  1871. Computes the equivalence between two tensors element-wise.
  1872. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1873. The inputs must be two tensors or one tensor and one scalar.
  1874. When the inputs are two tensors, the shapes of them could be broadcast.
  1875. When the inputs are one tensor and one scalar, the scalar could only be a constant.
  1876. Inputs:
  1877. - **input_x** (Union[Tensor, Number]) - The first input is a number or
  1878. a tensor whose data type is number.
  1879. - **input_y** (Union[Tensor, Number]) - The second input is a number
  1880. when the first input is a tensor or a tensor whose data type is number.
  1881. Outputs:
  1882. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  1883. Examples:
  1884. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  1885. >>> equal = P.Equal()
  1886. >>> equal(input_x, 2.0)
  1887. [False, True, False]
  1888. >>>
  1889. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1890. >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  1891. >>> equal = P.Equal()
  1892. >>> output = equal(input_x, input_y)
  1893. >>> print(output)
  1894. [ True True False]
  1895. """
  1896. def infer_dtype(self, x_dtype, y_dtype):
  1897. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name)
  1898. def infer_value(self, x, y):
  1899. if x is None or y is None:
  1900. return None
  1901. if isinstance(x, MetaTensor):
  1902. x = x.to_tensor()
  1903. if isinstance(y, MetaTensor):
  1904. y = y.to_tensor()
  1905. return Tensor(x.asnumpy() == y.asnumpy())
  1906. class ApproximateEqual(_LogicBinaryOp):
  1907. """
  1908. Returns true if abs(x1-x2) is smaller than tolerance element-wise, otherwise false.
  1909. Inputs of `x1` and `x2` comply with the implicit type conversion rules to make the data types consistent.
  1910. If they have different data types, lower priority data type will be converted to
  1911. relatively highest priority data type.
  1912. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  1913. Args:
  1914. tolerance (float): The maximum deviation that two elements can be considered equal. Default: 1e-05.
  1915. Inputs:
  1916. - **x1** (Tensor) - A tensor. Must be one of the following types: float32, float16.
  1917. - **x2** (Tensor) - A tensor of the same type and shape as 'x1'.
  1918. Outputs:
  1919. Tensor, the shape is the same as the shape of 'x1', and the data type is bool.
  1920. Examples:
  1921. >>> x1 = Tensor(np.array([1, 2, 3]), mindspore.float32)
  1922. >>> x2 = Tensor(np.array([2, 4, 6]), mindspore.float32)
  1923. >>> approximate_equal = P.ApproximateEqual(2.)
  1924. >>> output = approximate_equal(x1, x2)
  1925. >>> print(output)
  1926. [ True True False]
  1927. """
  1928. @prim_attr_register
  1929. def __init__(self, tolerance=1e-05):
  1930. """Initialize ApproximateEqual"""
  1931. validator.check_value_type("tolerance", tolerance, [float], self.name)
  1932. def infer_shape(self, x_shape, y_shape):
  1933. validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
  1934. return x_shape
  1935. def infer_dtype(self, x_dtype, y_dtype):
  1936. args_dtype = {"x": x_dtype, "y": y_dtype}
  1937. valid_type = [mstype.float32, mstype.float16]
  1938. validator.check_tensors_dtypes_same_and_valid(args_dtype, valid_type, prim_name=self.name)
  1939. return mstype.tensor_type(mstype.bool_)
  1940. class EqualCount(PrimitiveWithInfer):
  1941. """
  1942. Computes the number of the same elements of two tensors.
  1943. The two input tensors must have the same data type and shape.
  1944. Inputs:
  1945. - **input_x** (Tensor) - The first input tensor.
  1946. - **input_y** (Tensor) - The second input tensor.
  1947. Outputs:
  1948. Tensor, with the type same as input tensor and size as (1,).
  1949. Examples:
  1950. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1951. >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  1952. >>> equal_count = P.EqualCount()
  1953. >>> output = equal_count(input_x, input_y)
  1954. >>> print(output)
  1955. [2]
  1956. """
  1957. @prim_attr_register
  1958. def __init__(self):
  1959. """Initialize EqualCount"""
  1960. self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
  1961. def infer_shape(self, x_shape, y_shape):
  1962. validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
  1963. output_shape = (1,)
  1964. return output_shape
  1965. def infer_dtype(self, x_dtype, y_dtype):
  1966. args = {'x': x_dtype, 'y': y_dtype}
  1967. validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type + (mstype.bool_,), self.name)
  1968. return x_dtype
  1969. class NotEqual(_LogicBinaryOp):
  1970. """
  1971. Computes the non-equivalence of two tensors element-wise.
  1972. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  1973. The inputs must be two tensors or one tensor and one scalar.
  1974. When the inputs are two tensors, the shapes of them could be broadcast.
  1975. When the inputs are one tensor and one scalar, the scalar could only be a constant.
  1976. Inputs:
  1977. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  1978. a bool or a tensor whose data type is number or bool.
  1979. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  1980. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  1981. Outputs:
  1982. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  1983. Examples:
  1984. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
  1985. >>> not_equal = P.NotEqual()
  1986. >>> output = not_equal(input_x, 2.0)
  1987. >>> print(output)
  1988. [ True False True]
  1989. >>>
  1990. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  1991. >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
  1992. >>> not_equal = P.NotEqual()
  1993. >>> output = not_equal(input_x, input_y)
  1994. >>> print(output)
  1995. [False False True]
  1996. """
  1997. def infer_dtype(self, x_dtype, y_dtype):
  1998. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name)
  1999. class Greater(_LogicBinaryOp):
  2000. """
  2001. Computes the boolean value of :math:`x > y` element-wise.
  2002. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2003. The inputs must be two tensors or one tensor and one scalar.
  2004. When the inputs are two tensors,
  2005. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  2006. When the inputs are one tensor and one scalar,
  2007. the scalar could only be a constant.
  2008. Inputs:
  2009. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  2010. a bool or a tensor whose data type is number or bool.
  2011. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  2012. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2013. Outputs:
  2014. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2015. Examples:
  2016. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2017. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  2018. >>> greater = P.Greater()
  2019. >>> output = greater(input_x, input_y)
  2020. >>> print(output)
  2021. [False True False]
  2022. """
  2023. def infer_value(self, x, y):
  2024. if x is not None and y is not None:
  2025. x = x.asnumpy()
  2026. y = y.asnumpy()
  2027. out = np.array(np.greater(x, y))
  2028. return Tensor(out)
  2029. return None
  2030. class GreaterEqual(_LogicBinaryOp):
  2031. """
  2032. Computes the boolean value of :math:`x >= y` element-wise.
  2033. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2034. The inputs must be two tensors or one tensor and one scalar.
  2035. When the inputs are two tensors,
  2036. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  2037. When the inputs are one tensor and one scalar,
  2038. the scalar could only be a constant.
  2039. Inputs:
  2040. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  2041. a bool or a tensor whose data type is number or bool.
  2042. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  2043. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2044. Outputs:
  2045. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2046. Examples:
  2047. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2048. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  2049. >>> greater_equal = P.GreaterEqual()
  2050. >>> output = greater_equal(input_x, input_y)
  2051. >>> print(output)
  2052. [ True True False]
  2053. """
  2054. def infer_value(self, x, y):
  2055. if x is not None and y is not None:
  2056. x = x.asnumpy()
  2057. y = y.asnumpy()
  2058. out = np.array(np.greater_equal(x, y))
  2059. return Tensor(out)
  2060. return None
  2061. class Less(_LogicBinaryOp):
  2062. """
  2063. Computes the boolean value of :math:`x < y` element-wise.
  2064. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2065. The inputs must be two tensors or one tensor and one scalar.
  2066. When the inputs are two tensors,
  2067. dtypes of them cannot be both bool, and the shapes of them could be broadcast.
  2068. When the inputs are one tensor and one scalar,
  2069. the scalar could only be a constant.
  2070. Inputs:
  2071. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  2072. a bool or a tensor whose data type is number or bool.
  2073. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  2074. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2075. Outputs:
  2076. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2077. Examples:
  2078. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2079. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  2080. >>> less = P.Less()
  2081. >>> output = less(input_x, input_y)
  2082. >>> print(output)
  2083. [False False True]
  2084. """
  2085. def infer_value(self, x, y):
  2086. if x is not None and y is not None:
  2087. x = x.asnumpy()
  2088. y = y.asnumpy()
  2089. out = np.array(np.less(x, y))
  2090. return Tensor(out)
  2091. return None
  2092. class LessEqual(_LogicBinaryOp):
  2093. """
  2094. Computes the boolean value of :math:`x <= y` element-wise.
  2095. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2096. The inputs must be two tensors or one tensor and one scalar.
  2097. When the inputs are two tensors,
  2098. dtypes of them cannot be both bool , and the shapes of them could be broadcast.
  2099. When the inputs are one tensor and one scalar,
  2100. the scalar could only be a constant.
  2101. Inputs:
  2102. - **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
  2103. a bool or a tensor whose data type is number or bool.
  2104. - **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
  2105. a bool when the first input is a tensor or a tensor whose data type is number or bool.
  2106. Outputs:
  2107. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2108. Examples:
  2109. >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
  2110. >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
  2111. >>> less_equal = P.LessEqual()
  2112. >>> output = less_equal(input_x, input_y)
  2113. >>> print(output)
  2114. [ True False True]
  2115. """
  2116. def infer_value(self, x, y):
  2117. if x is not None and y is not None:
  2118. x = x.asnumpy()
  2119. y = y.asnumpy()
  2120. out = np.array(np.less_equal(x, y))
  2121. return Tensor(out)
  2122. return None
  2123. class LogicalNot(PrimitiveWithInfer):
  2124. """
  2125. Computes the "logical NOT" of a tensor element-wise.
  2126. Inputs:
  2127. - **input_x** (Tensor) - The input tensor whose dtype is bool.
  2128. Outputs:
  2129. Tensor, the shape is the same as the `input_x`, and the dtype is bool.
  2130. Examples:
  2131. >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
  2132. >>> logical_not = P.LogicalNot()
  2133. >>> output = logical_not(input_x)
  2134. >>> print(output)
  2135. [False True False]
  2136. """
  2137. @prim_attr_register
  2138. def __init__(self):
  2139. """Initialize LogicalNot"""
  2140. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2141. def infer_shape(self, x_shape):
  2142. return x_shape
  2143. def infer_dtype(self, x_dtype):
  2144. validator.check_tensor_dtype_valid("x", x_dtype, [mstype.bool_], self.name)
  2145. return mstype.tensor_type(mstype.bool_)
  2146. class LogicalAnd(_LogicBinaryOp):
  2147. """
  2148. Computes the "logical AND" of two tensors element-wise.
  2149. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2150. The inputs must be two tensors or one tensor and one bool.
  2151. When the inputs are two tensors, the shapes of them could be broadcast,
  2152. and the data types of them must be bool.
  2153. When the inputs are one tensor and one bool, the bool object could only be a constant,
  2154. and the data type of the tensor must be bool.
  2155. Inputs:
  2156. - **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool.
  2157. - **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
  2158. a tensor whose data type is bool.
  2159. Outputs:
  2160. Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
  2161. Examples:
  2162. >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
  2163. >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_)
  2164. >>> logical_and = P.LogicalAnd()
  2165. >>> output = logical_and(input_x, input_y)
  2166. >>> print(output)
  2167. [ True False False]
  2168. """
  2169. def infer_dtype(self, x_dtype, y_dtype):
  2170. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name)
  2171. class LogicalOr(_LogicBinaryOp):
  2172. """
  2173. Computes the "logical OR" of two tensors element-wise.
  2174. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2175. The inputs must be two tensors or one tensor and one bool.
  2176. When the inputs are two tensors, the shapes of them could be broadcast,
  2177. and the data types of them must be bool.
  2178. When the inputs are one tensor and one bool, the bool object could only be a constant,
  2179. and the data type of the tensor must be bool.
  2180. Inputs:
  2181. - **input_x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type is bool.
  2182. - **input_y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
  2183. a tensor whose data type is bool.
  2184. Outputs:
  2185. Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
  2186. Examples:
  2187. >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
  2188. >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_)
  2189. >>> logical_or = P.LogicalOr()
  2190. >>> output = logical_or(input_x, input_y)
  2191. >>> print(output)
  2192. [ True True True]
  2193. """
  2194. def infer_dtype(self, x_dtype, y_dtype):
  2195. return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name)
  2196. class IsNan(PrimitiveWithInfer):
  2197. """
  2198. Judge which elements are nan for each position.
  2199. Inputs:
  2200. - **input_x** (Tensor) - The input tensor.
  2201. Outputs:
  2202. Tensor, has the same shape of input, and the dtype is bool.
  2203. Examples:
  2204. >>> is_nan = P.IsNan()
  2205. >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  2206. >>> result = is_nan(input_x)
  2207. """
  2208. @prim_attr_register
  2209. def __init__(self):
  2210. """Initialize IsNan"""
  2211. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2212. def infer_shape(self, x_shape):
  2213. return x_shape
  2214. def infer_dtype(self, x_dtype):
  2215. return mstype.bool_
  2216. class IsInf(PrimitiveWithInfer):
  2217. """
  2218. Judging which elements are inf or -inf for each position
  2219. Inputs:
  2220. - **input_x** (Tensor) - The input tensor.
  2221. Outputs:
  2222. Tensor, has the same shape of input, and the dtype is bool.
  2223. Examples:
  2224. >>> is_inf = P.IsInf()
  2225. >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  2226. >>> result = is_inf(input_x)
  2227. """
  2228. @prim_attr_register
  2229. def __init__(self):
  2230. """Initialize IsInf"""
  2231. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2232. def infer_shape(self, x_shape):
  2233. return x_shape
  2234. def infer_dtype(self, x_dtype):
  2235. return mstype.bool_
  2236. class IsFinite(PrimitiveWithInfer):
  2237. """
  2238. Judge which elements are finite for each position.
  2239. Inputs:
  2240. - **input_x** (Tensor) - The input tensor.
  2241. Outputs:
  2242. Tensor, has the same shape of input, and the dtype is bool.
  2243. Examples:
  2244. >>> is_finite = P.IsFinite()
  2245. >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  2246. >>> output = is_finite(input_x)
  2247. >>> print(output)
  2248. [False True False]
  2249. """
  2250. @prim_attr_register
  2251. def __init__(self):
  2252. """Initialize IsFinite"""
  2253. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2254. def infer_shape(self, x_shape):
  2255. return x_shape
  2256. def infer_dtype(self, x_dtype):
  2257. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type + (mstype.bool_,), self.name)
  2258. return mstype.bool_
  2259. class FloatStatus(PrimitiveWithInfer):
  2260. """
  2261. Determine if the elements contain Not a Number(NaN), infinite or negative infinite. 0 for normal, 1 for overflow.
  2262. Inputs:
  2263. - **input_x** (Tensor) - The input tensor. The data type must be float16 or float32.
  2264. Outputs:
  2265. Tensor, has the shape of `(1,)`, and has the same dtype of input `mindspore.dtype.float32` or
  2266. `mindspore.dtype.float16`.
  2267. Examples:
  2268. >>> float_status = P.FloatStatus()
  2269. >>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
  2270. >>> result = float_status(input_x)
  2271. >>> print(result)
  2272. [1.]
  2273. """
  2274. @prim_attr_register
  2275. def __init__(self):
  2276. """Initialize FloatStatus"""
  2277. self.init_prim_io_names(inputs=['x'], outputs=['output'])
  2278. def infer_shape(self, x_shape):
  2279. return [1]
  2280. def infer_dtype(self, x_dtype):
  2281. validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float32, mstype.float16], self.name)
  2282. return x_dtype
  2283. class NPUAllocFloatStatus(PrimitiveWithInfer):
  2284. """
  2285. Allocates a flag to store the overflow status.
  2286. The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
  2287. Note:
  2288. Examples: see `NPUGetFloatStatus`.
  2289. Outputs:
  2290. Tensor, has the shape of `(8,)`.
  2291. Examples:
  2292. >>> alloc_status = P.NPUAllocFloatStatus()
  2293. >>> output = alloc_status()
  2294. >>> print(output)
  2295. [0. 0. 0. 0. 0. 0. 0. 0.]
  2296. """
  2297. @prim_attr_register
  2298. def __init__(self):
  2299. """Initialize NPUAllocFloatStatus"""
  2300. self.add_prim_attr("_side_effect_flag", True)
  2301. def infer_shape(self):
  2302. return [8]
  2303. def infer_dtype(self):
  2304. return mstype.float32
  2305. class NPUGetFloatStatus(PrimitiveWithInfer):
  2306. """
  2307. Updates the flag which is the output tensor of `NPUAllocFloatStatus` with latest overflow status.
  2308. The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`.
  2309. If the sum of the flag equals to 0, there is no overflow happened. If the sum of the flag is bigger than 0, there
  2310. is overflow happened.
  2311. Inputs:
  2312. - **input_x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
  2313. The data type must be float16 or float32.
  2314. Outputs:
  2315. Tensor, has the same shape as `input_x`. All the elements in the tensor will be zero.
  2316. Examples:
  2317. >>> alloc_status = P.NPUAllocFloatStatus()
  2318. >>> get_status = P.NPUGetFloatStatus()
  2319. >>> init = alloc_status()
  2320. >>> output = get_status(init)
  2321. >>> print(output)
  2322. [0. 0. 0. 0. 0. 0. 0. 0.]
  2323. """
  2324. @prim_attr_register
  2325. def __init__(self):
  2326. """Initialize NPUGetFloatStatus"""
  2327. self.add_prim_attr("_side_effect_flag", True)
  2328. def infer_shape(self, x_shape):
  2329. cls_name = self.name
  2330. validator.check_equal_int(len(x_shape), 1, "len(x_shape)", cls_name)
  2331. validator.check_equal_int(x_shape[0], 8, "x_shape[0]", cls_name)
  2332. return [8]
  2333. def infer_dtype(self, x_dtype):
  2334. validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float16, mstype.float32], self.name)
  2335. return mstype.float32
  2336. class NPUClearFloatStatus(PrimitiveWithInfer):
  2337. """
  2338. Clear the flag which stores the overflow status.
  2339. Note:
  2340. The flag is in the register on the `Ascend` device. It will be reset and can not be reused again after the
  2341. `NPUClearFloatStatus` is called.
  2342. Examples: see `NPUGetFloatStatus`.
  2343. Inputs:
  2344. - **input_x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
  2345. The data type must be float16 or float32.
  2346. Outputs:
  2347. Tensor, has the same shape as `input_x`. All the elements in the tensor will be zero.
  2348. Examples:
  2349. >>> alloc_status = P.NPUAllocFloatStatus()
  2350. >>> get_status = P.NPUGetFloatStatus()
  2351. >>> clear_status = P.NPUClearFloatStatus()
  2352. >>> init = alloc_status()
  2353. >>> flag = get_status(init)
  2354. >>> output = clear_status(init)
  2355. >>> print(output)
  2356. [0. 0. 0. 0. 0. 0. 0. 0.]
  2357. """
  2358. @prim_attr_register
  2359. def __init__(self):
  2360. """Initialize NPUClearFloatStatus"""
  2361. self.add_prim_attr("_side_effect_flag", True)
  2362. def infer_shape(self, x_shape):
  2363. cls_name = self.name
  2364. validator.check_equal_int(len(x_shape), 1, "len(x_shape)", cls_name)
  2365. validator.check_equal_int(x_shape[0], 8, "x_shape[0]", cls_name)
  2366. return [8]
  2367. def infer_dtype(self, x_dtype):
  2368. validator.check_tensor_dtype_valid('x', x_dtype, [mstype.float16, mstype.float32], self.name)
  2369. return mstype.float32
  2370. class Cos(PrimitiveWithInfer):
  2371. """
  2372. Computes cosine of input element-wise.
  2373. Inputs:
  2374. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2375. Outputs:
  2376. Tensor, has the same shape as `input_x`.
  2377. Examples:
  2378. >>> cos = P.Cos()
  2379. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  2380. >>> output = cos(input_x)
  2381. >>> print(output)
  2382. [0.971338 0.67487574 0.95233357 0.9959527 ]
  2383. """
  2384. @prim_attr_register
  2385. def __init__(self):
  2386. """Initialize Cos"""
  2387. def infer_shape(self, x_shape):
  2388. return x_shape
  2389. def infer_dtype(self, x_dtype):
  2390. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2391. return x_dtype
  2392. class ACos(PrimitiveWithInfer):
  2393. """
  2394. Computes arccosine of input element-wise.
  2395. Inputs:
  2396. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2397. Outputs:
  2398. Tensor, has the same shape as `input_x`.
  2399. Examples:
  2400. >>> acos = P.ACos()
  2401. >>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
  2402. >>> output = acos(input_x)
  2403. """
  2404. @prim_attr_register
  2405. def __init__(self):
  2406. """Initialize ACos"""
  2407. def infer_shape(self, x_shape):
  2408. return x_shape
  2409. def infer_dtype(self, x_dtype):
  2410. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2411. return x_dtype
  2412. class Sin(PrimitiveWithInfer):
  2413. """
  2414. Computes sine of input element-wise.
  2415. Inputs:
  2416. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2417. Outputs:
  2418. Tensor, has the same shape as `input_x`.
  2419. Examples:
  2420. >>> sin = P.Sin()
  2421. >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
  2422. >>> output = sin(input_x)
  2423. >>> print(output)
  2424. [0.5810352 0.27635565 0.41687083 0.5810352 ]
  2425. """
  2426. @prim_attr_register
  2427. def __init__(self):
  2428. """Initialize Sin."""
  2429. def infer_shape(self, x_shape):
  2430. return x_shape
  2431. def infer_dtype(self, x_dtype):
  2432. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2433. return x_dtype
  2434. class Asin(PrimitiveWithInfer):
  2435. """
  2436. Computes arcsine of input element-wise.
  2437. Inputs:
  2438. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2439. Outputs:
  2440. Tensor, has the same shape as `input_x`.
  2441. Examples:
  2442. >>> asin = P.Asin()
  2443. >>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
  2444. >>> output = asin(input_x)
  2445. >>> print(output)
  2446. [0.8330927 0.04001068 0.30469266 0.59438497]
  2447. """
  2448. @prim_attr_register
  2449. def __init__(self):
  2450. """Initialize Asin"""
  2451. def infer_shape(self, x_shape):
  2452. return x_shape
  2453. def infer_dtype(self, x_dtype):
  2454. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2455. return x_dtype
  2456. class NMSWithMask(PrimitiveWithInfer):
  2457. """
  2458. Select some bounding boxes in descending order of score.
  2459. Args:
  2460. iou_threshold (float): Specifies the threshold of overlap boxes with respect to
  2461. IOU. Default: 0.5.
  2462. Raises:
  2463. ValueError: If the iou_threshold is not a float number, or if the first dimension
  2464. of input Tensor is less than or equal to 0, or if the data type of the input
  2465. Tensor is not float16 or float32.
  2466. Inputs:
  2467. - **bboxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. Input bounding boxes.
  2468. `N` is the number of input bounding boxes. Every bounding box
  2469. contains 5 values, the first 4 values are the coordinates of bounding
  2470. box, and the last value is the score of this bounding box.
  2471. The data type must be float16 or float32.
  2472. Outputs:
  2473. tuple[Tensor], tuple of three tensors, they are selected_boxes, selected_idx and selected_mask.
  2474. - **selected_boxes** (Tensor) - The shape of tensor is :math:`(N, 5)`. The list of bounding boxes
  2475. after non-max suppression calculation.
  2476. - **selected_idx** (Tensor) - The shape of tensor is :math:`(N,)`. The indexes list of
  2477. valid input bounding boxes.
  2478. - **selected_mask** (Tensor) - The shape of tensor is :math:`(N,)`. A mask list of
  2479. valid output bounding boxes.
  2480. Examples:
  2481. >>> bbox = np.random.rand(128, 5)
  2482. >>> bbox[:, 2] += bbox[:, 0]
  2483. >>> bbox[:, 3] += bbox[:, 1]
  2484. >>> inputs = Tensor(bbox, mindspore.float32)
  2485. >>> nms = P.NMSWithMask(0.5)
  2486. >>> output_boxes, indices, mask = nms(inputs)
  2487. """
  2488. @prim_attr_register
  2489. def __init__(self, iou_threshold=0.5):
  2490. """Initialize NMSWithMask"""
  2491. validator.check_value_type("iou_threshold", iou_threshold, [float], self.name)
  2492. self.init_prim_io_names(inputs=['bboxes'], outputs=['selected_boxes', 'selected_idx', 'selected_mask'])
  2493. self.is_ge = context.get_context("enable_ge")
  2494. def infer_shape(self, bboxes_shape):
  2495. cls_name = self.name
  2496. validator.check_equal_int(len(bboxes_shape), 2, "bboxes rank", cls_name)
  2497. validator.check_positive_int(bboxes_shape[0], "bboxes.shape[0]", cls_name)
  2498. validator.check_equal_int(bboxes_shape[1], 5, "bboxes.shape[1]", cls_name)
  2499. num = bboxes_shape[0]
  2500. return (bboxes_shape, (num,), (num,))
  2501. def infer_dtype(self, bboxes_dtype):
  2502. validator.check_tensor_dtype_valid("bboxes", bboxes_dtype, [mstype.float16, mstype.float32], self.name)
  2503. return (bboxes_dtype, mstype.int32, mstype.bool_)
  2504. class Abs(PrimitiveWithInfer):
  2505. """
  2506. Returns absolute value of a tensor element-wise.
  2507. Inputs:
  2508. - **input_x** (Tensor) - The input tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2509. Outputs:
  2510. Tensor, has the same shape as the `input_x`.
  2511. Examples:
  2512. >>> input_x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
  2513. >>> abs = P.Abs()
  2514. >>> output = abs(input_x)
  2515. >>> print(output)
  2516. [1. 1. 0.]
  2517. """
  2518. @prim_attr_register
  2519. def __init__(self):
  2520. """Initialize Abs"""
  2521. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  2522. def infer_shape(self, x_shape):
  2523. return x_shape
  2524. def infer_dtype(self, x_type):
  2525. validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, self.name)
  2526. return x_type
  2527. def infer_value(self, x):
  2528. if x is not None:
  2529. x = x.asnumpy()
  2530. out = np.array(np.abs(x, dtype=x.dtype))
  2531. return Tensor(out)
  2532. return None
  2533. class Sign(PrimitiveWithInfer):
  2534. r"""
  2535. Perform :math:`sign` on tensor element-wise.
  2536. Note:
  2537. .. math::
  2538. sign(x) = \begin{cases} -1, &if\ x < 0 \cr
  2539. 0, &if\ x = 0 \cr
  2540. 1, &if\ x > 0\end{cases}
  2541. Inputs:
  2542. - **input_x** (Tensor) - The input tensor.
  2543. Outputs:
  2544. Tensor, has the same shape and type as the `input_x`.
  2545. Examples:
  2546. >>> input_x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32)
  2547. >>> sign = P.Sign()
  2548. >>> output = sign(input_x)
  2549. >>> print(output)
  2550. [[ 1. 0. -1.]]
  2551. """
  2552. @prim_attr_register
  2553. def __init__(self):
  2554. pass
  2555. def infer_shape(self, x_shape):
  2556. return x_shape
  2557. def infer_dtype(self, x_dtype):
  2558. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2559. return x_dtype
  2560. class Round(PrimitiveWithInfer):
  2561. """
  2562. Returns half to even of a tensor element-wise.
  2563. Inputs:
  2564. - **input_x** (Tensor) - The input tensor.
  2565. Outputs:
  2566. Tensor, has the same shape and type as the `input_x`.
  2567. Examples:
  2568. >>> input_x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
  2569. >>> round = P.Round()
  2570. >>> output = round(input_x)
  2571. >>> print(output)
  2572. [ 1. 2. 2. 2. -4.]
  2573. """
  2574. @prim_attr_register
  2575. def __init__(self):
  2576. """Initialize Round"""
  2577. self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
  2578. def infer_shape(self, x_shape):
  2579. return x_shape
  2580. def infer_dtype(self, x_dtype):
  2581. validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)
  2582. return x_dtype
  2583. class Tan(PrimitiveWithInfer):
  2584. """
  2585. Computes tangent of `input_x` element-wise.
  2586. Inputs:
  2587. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. Data type must be
  2588. float16, float32 or int32.
  2589. Outputs:
  2590. Tensor, has the same shape as `input_x`.
  2591. Examples:
  2592. >>> tan = P.Tan()
  2593. >>> input_x = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
  2594. >>> output = tan(input_x)
  2595. [-1.5574081 0. 1.5574081]
  2596. """
  2597. @prim_attr_register
  2598. def __init__(self):
  2599. """Initialize Tan"""
  2600. def infer_shape(self, x_shape):
  2601. return x_shape
  2602. def infer_dtype(self, x_type):
  2603. valid_dtypes = [mstype.float16, mstype.float32, mstype.int32]
  2604. validator.check_tensor_dtype_valid('x', x_type, valid_dtypes, self.name)
  2605. return x_type
  2606. class Atan(PrimitiveWithInfer):
  2607. """
  2608. Computes the trigonometric inverse tangent of the input element-wise.
  2609. Inputs:
  2610. - **input_x** (Tensor): The input tensor.
  2611. Outputs:
  2612. A Tensor, has the same type as the input.
  2613. Examples:
  2614. >>> input_x = Tensor(np.array([1.047, 0.785]), mindspore.float32)
  2615. >>> tan = P.Tan()
  2616. >>> output_y = tan(input_x)
  2617. >>> atan = P.Atan()
  2618. >>> output = atan(output_y)
  2619. >>> print(output)
  2620. [1.047 0.7850001]
  2621. """
  2622. @prim_attr_register
  2623. def __init__(self):
  2624. pass
  2625. def infer_shape(self, x_shape):
  2626. return x_shape
  2627. def infer_dtype(self, x_type):
  2628. validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, self.name)
  2629. return x_type
  2630. class Atanh(PrimitiveWithInfer):
  2631. """
  2632. Computes inverse hyperbolic tangent of the input element-wise.
  2633. Inputs:
  2634. - **input_x** (Tensor): The input tensor.
  2635. Outputs:
  2636. A Tensor, has the same type as the input.
  2637. Examples:
  2638. >>> input_x = Tensor(np.array([1.047, 0.785]), mindspore.float32)
  2639. >>> atanh = P.Atanh()
  2640. >>> output = atanh(input_x)
  2641. >>> print(output)
  2642. [1.8869909 1.058268 ]
  2643. """
  2644. @prim_attr_register
  2645. def __init__(self):
  2646. pass
  2647. def infer_shape(self, x_shape):
  2648. return x_shape
  2649. def infer_dtype(self, x_type):
  2650. validator.check_tensor_dtype_valid('x', x_type, mstype.number_type, self.name)
  2651. return x_type
  2652. class Atan2(_MathBinaryOp):
  2653. r"""
  2654. Returns arctangent of input_x/input_y element-wise.
  2655. It returns :math:`\theta\ \in\ [-\pi, \pi]`
  2656. such that :math:`x = r*\sin(\theta), y = r*\cos(\theta)`, where :math:`r = \sqrt{x^2 + y^2}`.
  2657. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent.
  2658. If they have different data types, lower priority data type will be converted to
  2659. relatively highest priority data type.
  2660. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2661. Inputs:
  2662. - **input_x** (Tensor) - The input tensor.
  2663. - **input_y** (Tensor) - The input tensor.
  2664. Outputs:
  2665. Tensor, the shape is the same as the one after broadcasting,and the data type is same as `input_x`.
  2666. Examples:
  2667. >>> input_x = Tensor(np.array([0, 1]), mindspore.float32)
  2668. >>> input_y = Tensor(np.array([1, 1]), mindspore.float32)
  2669. >>> atan2 = P.Atan2()
  2670. >>> output = atan2(input_x, input_y)
  2671. >>> print(output)
  2672. [0. 0.7853982]
  2673. """
  2674. class SquareSumAll(PrimitiveWithInfer):
  2675. """
  2676. Returns square sum all of a tensor element-wise
  2677. Inputs:
  2678. - **input_x1** (Tensor) - The input tensor. The data type must be float16 or float32.
  2679. - **input_x2** (Tensor) - The input tensor has the same type and shape as the `input_x1`.
  2680. Note:
  2681. SquareSumAll only supports float16 and float32 data type.
  2682. Outputs:
  2683. - **output_y1** (Tensor) - The same type as the `input_x1`.
  2684. - **output_y2** (Tensor) - The same type as the `input_x1`.
  2685. Examples:
  2686. >>> input_x1 = Tensor(np.array([0, 0, 2, 0]), mindspore.float32)
  2687. >>> input_x2 = Tensor(np.array([0, 0, 2, 4]), mindspore.float32)
  2688. >>> square_sum_all = P.SquareSumAll()
  2689. >>> output = square_sum_all(input_x1, input_x2)
  2690. >>> print(output)
  2691. (Tensor(shape=[], dtype=Float32, value= 4),
  2692. Tensor(shape=[], dtype=Float32, value= 20))
  2693. """
  2694. @prim_attr_register
  2695. def __init__(self):
  2696. """Initialize SquareSumAll"""
  2697. def infer_shape(self, x_shape, y_shape):
  2698. validator.check("x1_shape", x_shape, "x2_shape", y_shape, Rel.EQ, self.name)
  2699. return [], []
  2700. def infer_dtype(self, x_type, y_type):
  2701. valid_types = (mstype.float16, mstype.float32)
  2702. validator.check_tensor_dtype_valid('x1_type', x_type, valid_types, self.name)
  2703. validator.check_tensor_dtype_valid('x2_type', y_type, valid_types, self.name)
  2704. return x_type, y_type
  2705. class BitwiseAnd(_BitwiseBinaryOp):
  2706. """
  2707. Returns bitwise `and` of two tensors element-wise.
  2708. Inputs of `input_x1` and `input_x2` comply with the implicit type conversion rules to
  2709. make the data types consistent.
  2710. If they have different data types, lower priority data type will be converted to
  2711. relatively highest priority data type.
  2712. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2713. Inputs:
  2714. - **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type.
  2715. - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
  2716. Outputs:
  2717. Tensor, has the same type as the `input_x1`.
  2718. Examples:
  2719. >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
  2720. >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
  2721. >>> bitwise_and = P.BitwiseAnd()
  2722. >>> output = bitwise_and(input_x1, input_x2)
  2723. >>> print(output)
  2724. [ 0 0 1 -1 1 0 1]
  2725. """
  2726. class BitwiseOr(_BitwiseBinaryOp):
  2727. """
  2728. Returns bitwise `or` of two tensors element-wise.
  2729. Inputs of `input_x1` and `input_x2` comply with the implicit type conversion rules to
  2730. make the data types consistent.
  2731. If they have different data types, lower priority data type will be converted to
  2732. relatively highest priority data type.
  2733. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2734. Inputs:
  2735. - **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type.
  2736. - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
  2737. Outputs:
  2738. Tensor, has the same type as the `input_x1`.
  2739. Examples:
  2740. >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
  2741. >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
  2742. >>> bitwise_or = P.BitwiseOr()
  2743. >>> boutput = itwise_or(input_x1, input_x2)
  2744. >>> print(output)
  2745. [ 0 1 1 -1 -1 3 3]
  2746. """
  2747. class BitwiseXor(_BitwiseBinaryOp):
  2748. """
  2749. Returns bitwise `xor` of two tensors element-wise.
  2750. Inputs of `input_x1` and `input_x2` comply with the implicit type conversion rules to
  2751. make the data types consistent.
  2752. If they have different data types, lower priority data type will be converted to
  2753. relatively highest priority data type.
  2754. RuntimeError exception will be thrown when the data type conversion of Parameter is required.
  2755. Inputs:
  2756. - **input_x1** (Tensor) - The input tensor with int16, int32 or uint16 data type.
  2757. - **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
  2758. Outputs:
  2759. Tensor, has the same type as the `input_x1`.
  2760. Examples:
  2761. >>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
  2762. >>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
  2763. >>> bitwise_xor = P.BitwiseXor()
  2764. >>> output = bitwise_xor(input_x1, input_x2)
  2765. >>> print(output)
  2766. [ 0 1 0 0 -2 3 2]
  2767. """
  2768. class BesselI0e(PrimitiveWithInfer):
  2769. """
  2770. Computes BesselI0e of input element-wise.
  2771. Inputs:
  2772. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. Data type must be float16 or
  2773. float32.
  2774. Outputs:
  2775. Tensor, has the same shape as `input_x`.
  2776. Examples:
  2777. >>> bessel_i0e = P.BesselI0e()
  2778. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  2779. >>> output = bessel_i0e(input_x)
  2780. >>> print(output)
  2781. [0.7979961 0.5144438 0.75117415 0.9157829 ]
  2782. """
  2783. @prim_attr_register
  2784. def __init__(self):
  2785. """Initialize BesselI0e"""
  2786. def infer_shape(self, x):
  2787. return x
  2788. def infer_dtype(self, x):
  2789. validator.check_tensor_dtype_valid('x', x, mstype.number_type, self.name)
  2790. return x
  2791. class BesselI1e(PrimitiveWithInfer):
  2792. """
  2793. Computes BesselI1e of input element-wise.
  2794. Inputs:
  2795. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. Data type must be float16 or
  2796. float32.
  2797. Outputs:
  2798. Tensor, has the same shape as `input_x`.
  2799. Examples:
  2800. >>> bessel_i1e = P.BesselI1e()
  2801. >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
  2802. >>> output = bessel_i1e(input_x)
  2803. >>> print(output)
  2804. [0.09507662 0.19699717 0.11505538 0.04116856]
  2805. """
  2806. @prim_attr_register
  2807. def __init__(self):
  2808. """Initialize BesselI1e"""
  2809. def infer_shape(self, x):
  2810. return x
  2811. def infer_dtype(self, x):
  2812. validator.check_tensor_dtype_valid('x', x, mstype.number_type, self.name)
  2813. return x
  2814. class Inv(PrimitiveWithInfer):
  2815. """
  2816. Computes Inv(Reciprocal) of input tensor element-wise.
  2817. Inputs:
  2818. - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2819. Must be one of the following types: float16, float32, int32.
  2820. Outputs:
  2821. Tensor, has the same shape and data type as `input_x`.
  2822. Examples:
  2823. >>> inv = P.Inv()
  2824. >>> input_x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
  2825. >>> output = inv(input_x)
  2826. >>> print(output)
  2827. [4. 2.5 3.2258065 1.923077 ]
  2828. """
  2829. @prim_attr_register
  2830. def __init__(self):
  2831. pass
  2832. def infer_shape(self, x_shape):
  2833. return x_shape
  2834. def infer_dtype(self, x_dtype):
  2835. validator.check_tensor_dtype_valid('x_dtype', x_dtype, [mstype.float16, mstype.float32,
  2836. mstype.int32], self.name)
  2837. return x_dtype
  2838. class Invert(PrimitiveWithInfer):
  2839. """
  2840. Flips all bits of input tensor element-wise.
  2841. Inputs:
  2842. - **input_x** (Tensor[int16], Tensor[uint16]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
  2843. Outputs:
  2844. Tensor, has the same shape as `input_x`.
  2845. Examples:
  2846. >>> invert = P.Invert()
  2847. >>> input_x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
  2848. >>> output = invert(input_x)
  2849. >>> print(output)
  2850. [-26 -5 -14 -10]
  2851. """
  2852. @prim_attr_register
  2853. def __init__(self):
  2854. pass
  2855. def infer_shape(self, x_shape):
  2856. return x_shape
  2857. def infer_dtype(self, x_dtype):
  2858. validator.check_tensor_dtype_valid('x_dtype', x_dtype, [mstype.int16, mstype.uint16], self.name)
  2859. return x_dtype
  2860. class Eps(PrimitiveWithInfer):
  2861. """
  2862. Creates a tensor filled with `input_x` dtype minimum val.
  2863. Inputs:
  2864. - **input_x** (Tensor) - Input tensor. The data type must be float16 or float32.
  2865. Outputs:
  2866. Tensor, has the same type and shape as `input_x`, but filled with `input_x` dtype minimum val.
  2867. Examples:
  2868. >>> input_x = Tensor([4, 1, 2, 3], mindspore.float32)
  2869. >>> output = P.Eps()(input_x)
  2870. >>> print(output)
  2871. [1.5258789e-05 1.5258789e-05 1.5258789e-05 1.5258789e-05]
  2872. """
  2873. @prim_attr_register
  2874. def __init__(self):
  2875. """Initialize Eps"""
  2876. self.init_prim_io_names(inputs=['input_x'], outputs=['y'])
  2877. def __infer__(self, input_x):
  2878. valid_dtypes = [mstype.float16, mstype.float32]
  2879. validator.check_tensor_dtype_valid('input_x', input_x['dtype'], valid_dtypes, self.name)
  2880. x_nptype = mstype.dtype_to_nptype(input_x['dtype'].element_type())
  2881. if x_nptype == np.float16:
  2882. min_val = 2 ** (-14)
  2883. else:
  2884. min_val = 2 ** (-16)
  2885. res = np.full(input_x['shape'], min_val, x_nptype)
  2886. out = {
  2887. 'value': Tensor(res),
  2888. 'shape': input_x['shape'],
  2889. 'dtype': input_x['dtype'],
  2890. }
  2891. return out