You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math_ops.py 128 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """math operations, the function docs are adapted from Numpy API."""
  16. import operator
  17. import functools
  18. from ..ops import operations as P
  19. from ..ops import functional as F
  20. from ..ops import composite as C
  21. from ..ops.primitive import constexpr
  22. from ..common import dtype as mstype
  23. from ..common import Tensor
  24. from .dtypes import nan, pi
  25. from .array_creations import asarray_const, ones, zeros, empty, full, full_like
  26. from .array_ops import where as where_
  27. from .array_ops import ravel, expand_dims, moveaxis, concatenate
  28. from .utils_const import _infer_out_shape, _check_axis_valid, _get_device, \
  29. _check_shape_aligned, _raise_type_error, _check_same_type, _check_is_float, \
  30. _raise_value_error, _promote, _check_axis_type, _canonicalize_axis, \
  31. _is_shape_empty, _check_is_int, _expanded_shape, _check_axis_in_range, \
  32. _check_dtype, _list_comprehensions, _tuple_setitem, _add_unit_axes, _seq_prod, \
  33. _make_tensor, _promote_for_trigonometric, _raise_runtime_error, _max
  34. from .utils import _expand, _broadcast_to, _broadcast_to_shape, _get_size, \
  35. _check_input_tensor, _to_tensor, _isnan
  36. ZERO_TENSOR = asarray_const(0)
  37. _mean_keepdims = P.ReduceMean(True)
  38. _matmul = P.MatMul(False, False)
  39. _matmul_T = P.MatMul(False, True)
  40. _reduce_sum_default = P.ReduceSum()
  41. _reduce_sum_keepdims = P.ReduceSum(True)
  42. _reduce_min_default = P.ReduceMin()
  43. _reduce_min_keepdims = P.ReduceMin(True)
  44. _reduce_max_default = P.ReduceMax()
  45. _reduce_max_keepdims = P.ReduceMax(True)
  46. _cumsum_default = P.CumSum()
  47. _concat = P.Concat(-1)
  48. def absolute(x, dtype=None):
  49. """
  50. Calculates the absolute value element-wise.
  51. Note:
  52. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  53. not supported.
  54. Currently the backend kernel only supports float calculation, if the input
  55. is not a `float`, then it will be casted to :class:`mstype.float32` and casted back.
  56. Args:
  57. x (Tensor): Tensor to be used for calculation.
  58. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  59. output Tensor.
  60. Returns:
  61. Tensor.
  62. Raises:
  63. TypeError: If input arguments have types not specified above.
  64. Supported Platforms:
  65. ``Ascend`` ``GPU`` ``CPU``
  66. Examples:
  67. >>> import mindspore.numpy as np
  68. >>> x = np.asarray([1, 2, 3, -4, -5], np.float32)
  69. >>> output = np.absolute(x)
  70. >>> print(output)
  71. [1. 2. 3. 4. 5.]
  72. """
  73. original_dtype = x.dtype
  74. if not _check_is_float(original_dtype) and dtype is None:
  75. x = x.astype(mstype.float32)
  76. return _apply_tensor_op(F.absolute, x, dtype=dtype).astype(original_dtype)
  77. return _apply_tensor_op(F.absolute, x, dtype=dtype)
  78. def count_nonzero(x, axis=None, keepdims=False):
  79. """
  80. Counts the number of non-zero values in the tensor `x`.
  81. Args:
  82. x (Tensor): The tensor for which to count non-zeros.
  83. axis (Union[int,tuple], optional): Axis or tuple of axes along which to
  84. count non-zeros. Default is None, meaning that non-zeros will be counted
  85. along a flattened version of `x`.
  86. keepdims (bool, optional): If this is set to True, the axes that are counted
  87. are left in the result as dimensions with size one. With this option,
  88. the result will broadcast correctly against `x`.
  89. Returns:
  90. Tensor, indicating number of non-zero values in the `x` along a given axis.
  91. Otherwise, the total number of non-zero values in `x` is returned.
  92. Supported Platforms:
  93. ``Ascend`` ``GPU`` ``CPU``
  94. Examples:
  95. >>> import mindspore.numpy as np
  96. >>> x = np.asarray([1, 2, 3, -4, 0, 3, 2, 0])
  97. >>> output = np.count_nonzero(x)
  98. >>> print(output)
  99. 6
  100. """
  101. if _is_shape_empty(x.shape):
  102. return ZERO_TENSOR
  103. if axis is None:
  104. axis = ()
  105. return C.count_nonzero(x=x, axis=axis, keep_dims=keepdims)
  106. def clip(x, xmin, xmax, dtype=None):
  107. """
  108. Clips (limits) the values in an array.
  109. Given an interval, values outside the interval are clipped to the interval edges.
  110. For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0,
  111. and values larger than 1 become 1.
  112. Args:
  113. x (Tensor): Tensor containing elements to clip.
  114. xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed
  115. on lower interval edge. Not more than one of `xmin` and `xmax` may be None.
  116. xmax (Tensor, scalar, None): Maximum value. If None, clipping is not performed
  117. on upper interval edge. Not more than one of `xmin` and `xmax` may be None.
  118. If `xmin` or `xmax` are tensors, then the three tensors will be broadcasted
  119. to match their shapes.
  120. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  121. output Tensor.
  122. Returns:
  123. Tensor, a tensor with the elements of `x`, but where values
  124. < `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`.
  125. Supported Platforms:
  126. ``Ascend`` ``GPU`` ``CPU``
  127. Examples:
  128. >>> import mindspore.numpy as np
  129. >>> x = np.asarray([1, 2, 3, -4, 0, 3, 2, 0])
  130. >>> output = np.clip(x, 0, 2)
  131. >>> print(output)
  132. [1 2 2 0 0 2 2 0]
  133. """
  134. if xmin is None and xmax is None:
  135. _raise_value_error("One of max or min must be given.")
  136. if xmin is not None:
  137. x = maximum(x, xmin, dtype=dtype)
  138. if xmax is not None:
  139. x = minimum(x, xmax, dtype=dtype)
  140. return x
  141. def deg2rad(x, dtype=None):
  142. """
  143. Converts angles from degrees to radians.
  144. Args:
  145. x (Tensor): Angles in degrees.
  146. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  147. output Tensor.
  148. Returns:
  149. Tensor, the corresponding angle in radians. This is a tensor scalar if `x`
  150. is a tensor scalar.
  151. Raises:
  152. TypeError: if `x` is not a tensor.
  153. Supported Platforms:
  154. ``Ascend`` ``GPU`` ``CPU``
  155. Examples:
  156. >>> import mindspore.numpy as np
  157. >>> x = np.asarray([1, 2, 3, -4, -5])
  158. >>> output = np.deg2rad(x)
  159. >>> print(output)
  160. [ 0.01745329 0.03490658 0.05235988 -0.06981317 -0.08726647]
  161. """
  162. _check_input_tensor(x)
  163. def convert(a):
  164. return a * pi / 180.0
  165. return _apply_tensor_op(convert, x, dtype=dtype)
  166. def rad2deg(x, dtype=None):
  167. """
  168. Converts angles from radians to degrees.
  169. Args:
  170. x (Tensor): Angles in radians.
  171. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  172. output Tensor.
  173. Returns:
  174. Tensor, the corresponding angle in degrees. This is a tensor scalar if `x`
  175. is a tensor scalar.
  176. Supported Platforms:
  177. ``Ascend`` ``GPU`` ``CPU``
  178. Examples:
  179. >>> import mindspore.numpy as np
  180. >>> x = np.asarray([1, 2, 3, -4, -5])
  181. >>> output = np.rad2deg(x)
  182. >>> print(output)
  183. [ 57.295776 114.59155 171.88733 -229.1831 -286.47888 ]
  184. """
  185. _check_input_tensor(x)
  186. def convert(a):
  187. return a * 180.0 / pi
  188. return _apply_tensor_op(convert, x, dtype=dtype)
  189. def add(x1, x2, dtype=None):
  190. """
  191. Adds arguments element-wise.
  192. Note:
  193. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  194. not supported.
  195. Args:
  196. x1 (Tensor): input to be added.
  197. x2 (Tensor): input to be added.
  198. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  199. output Tensor.
  200. Returns:
  201. Tensor or scalar, the sum of `x1` and `x2`, element-wise. This is a scalar
  202. if both `x1` and `x2` are scalars.
  203. Supported Platforms:
  204. ``Ascend`` ``GPU`` ``CPU``
  205. Examples:
  206. >>> import mindspore.numpy as np
  207. >>> x1 = np.full((3, 2), [1, 2])
  208. >>> x2 = np.full((3, 2), [3, 4])
  209. >>> output = np.add(x1, x2)
  210. >>> print(output)
  211. [[4, 6],
  212. [4, 6],
  213. [4, 6]]
  214. """
  215. # broadcast is not fully supported in tensor_add on CPU,
  216. # so we use tensor_sub as a substitute solution
  217. if _get_device() == 'CPU':
  218. _check_input_tensor(x1, x2)
  219. return subtract(x1, F.neg_tensor(x2), dtype=dtype)
  220. return _apply_tensor_op(F.tensor_add, x1, x2, dtype=dtype)
  221. def subtract(x1, x2, dtype=None):
  222. """
  223. Subtracts arguments, element-wise.
  224. Note:
  225. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  226. not supported.
  227. Args:
  228. x1 (Tensor): the input to be subtracted from.
  229. x2 (Tensor): the input to be subtracted by.
  230. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  231. output Tensor.
  232. Returns:
  233. Tensor or scalar, the difference of `x1` and `x2`, element-wise. This is a
  234. scalar if both `x1` and `x2` are scalars.
  235. Supported Platforms:
  236. ``Ascend`` ``GPU`` ``CPU``
  237. Examples:
  238. >>> import mindspore.numpy as np
  239. >>> x1 = np.full((3, 2), [1, 2])
  240. >>> x2 = np.full((3, 2), [3, 4])
  241. >>> output = np.subtract(x1, x2)
  242. >>> print(output)
  243. [[-2, -2],
  244. [-2, -2],
  245. [-2, -2]]
  246. """
  247. return _apply_tensor_op(F.tensor_sub, x1, x2, dtype=dtype)
  248. def multiply(x1, x2, dtype=None):
  249. """
  250. Multiplies arguments element-wise.
  251. Note:
  252. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  253. not supported.
  254. Args:
  255. x1 (Tensor): input tensor to be multiplied.
  256. x2 (Tensor): input tensor to be multiplied.
  257. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  258. output Tensor.
  259. Returns:
  260. Tensor or scalar, the product of `x1` and `x2`, element-wise. This is a scalar
  261. if both `x1` and `x2` are scalars.
  262. Supported Platforms:
  263. ``Ascend`` ``GPU`` ``CPU``
  264. Examples:
  265. >>> import mindspore.numpy as np
  266. >>> x1 = np.full((3, 2), [1, 2])
  267. >>> x2 = np.full((3, 2), [3, 4])
  268. >>> output = np.multiply(x1, x2)
  269. >>> print(output)
  270. [[3, 8],
  271. [3, 8],
  272. [3, 8]]
  273. """
  274. if _get_device() == 'CPU':
  275. _check_input_tensor(x1, x2)
  276. # broadcast is not fully supported on CPU backend,
  277. # and explicit broadcasting is performed
  278. shape_out = _infer_out_shape(F.shape(x1), F.shape(x2))
  279. x1 = _broadcast_to_shape(x1, shape_out)
  280. x2 = _broadcast_to_shape(x2, shape_out)
  281. return _apply_tensor_op(F.tensor_mul, x1, x2, dtype=dtype)
  282. def divide(x1, x2, dtype=None):
  283. """
  284. Returns a true division of the inputs, element-wise.
  285. Instead of the Python traditional ‘floor division’, this returns a true
  286. division.
  287. Note:
  288. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  289. not supported.
  290. Args:
  291. x1 (Tensor): the divident.
  292. x2 (Tensor): the divisor.
  293. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  294. output Tensor.
  295. Returns:
  296. Tensor or scalar, this is a scalar if both `x1` and `x2` are scalars.
  297. Supported Platforms:
  298. ``Ascend`` ``GPU`` ``CPU``
  299. Examples:
  300. >>> import mindspore.numpy as np
  301. >>> x1 = np.full((3, 2), [1, 2])
  302. >>> x2 = np.full((3, 2), [3, 4])
  303. >>> output = np.divide(x1, x2)
  304. >>> print(output)
  305. [[0.33333333, 0.5],
  306. [0.33333333, 0.5],
  307. [0.33333333, 0.5]]
  308. """
  309. if not _check_is_float(F.dtype(x1)) and not _check_is_float(F.dtype(x2)):
  310. x1 = F.cast(x1, mstype.float32)
  311. x2 = F.cast(x2, mstype.float32)
  312. return _apply_tensor_op(F.tensor_div, x1, x2, dtype=dtype)
  313. def true_divide(x1, x2, dtype=None):
  314. """
  315. Returns a true division of the inputs, element-wise.
  316. Instead of the Python traditional ‘floor division’, this returns a true
  317. division.
  318. Note:
  319. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  320. not supported.
  321. Args:
  322. x1 (Tensor): the divident.
  323. x2 (Tensor): the divisor.
  324. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  325. output Tensor.
  326. Returns:
  327. Tensor or scalar, this is a scalar if both `x1` and `x2` are scalars.
  328. Supported Platforms:
  329. ``Ascend`` ``GPU`` ``CPU``
  330. Examples:
  331. >>> import mindspore.numpy as np
  332. >>> x1 = np.full((3, 2), [1, 2])
  333. >>> x2 = np.full((3, 2), [3, 4])
  334. >>> output = np.true_divide(x1, x2)
  335. >>> print(output)
  336. [[0.33333333, 0.5],
  337. [0.33333333, 0.5],
  338. [0.33333333, 0.5]]
  339. """
  340. return divide(x1, x2, dtype=dtype)
  341. def power(x1, x2, dtype=None):
  342. """
  343. First array elements raised to powers from second array, element-wise.
  344. Raises each base in `x1` to the positionally-corresponding power in `x2`.
  345. Note:
  346. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  347. not supported.
  348. On GPU, the supported dtypes are np.float16, and np.float32.
  349. Args:
  350. x1 (Tensor): the bases.
  351. x2 (Tensor): the exponents.
  352. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  353. output Tensor.
  354. Returns:
  355. Tensor or scalar, the bases in `x1` raised to the exponents in `x2`. This
  356. is a scalar if both `x1` and `x2` are scalars.
  357. Supported Platforms:
  358. ``Ascend`` ``GPU`` ``CPU``
  359. Examples:
  360. >>> import mindspore.numpy as np
  361. >>> x1 = np.full((3, 2), [1, 2]).astype('float32')
  362. >>> x2 = np.full((3, 2), [3, 4]).astype('float32')
  363. >>> output = np.power(x1, x2)
  364. >>> print(output)
  365. [[ 1, 16],
  366. [ 1, 16],
  367. [ 1, 16]]
  368. """
  369. return _apply_tensor_op(F.tensor_pow, x1, x2, dtype=dtype)
  370. def float_power(x1, x2, dtype=None):
  371. """
  372. First array elements raised to powers from second array, element-wise.
  373. Raise each base in `x1` to the positionally-corresponding power in `x2`. `x1` and
  374. `x2` must be broadcastable to the same shape. This differs from the power
  375. function in that integers, float16, and float64 are promoted to floats with
  376. a minimum precision of float32 so that the result is always inexact. The
  377. intent is that the function will return a usable result for negative powers
  378. and seldom overflow for positive powers.
  379. Note:
  380. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  381. not supported.
  382. Integers and floats are promoted to float32 instead of float64.
  383. Args:
  384. x1 (Tensor): the bases.
  385. x2 (Tensor): the exponenets.
  386. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  387. output Tensor.
  388. Returns:
  389. Tensor or scalar, the bases in `x1` raised to the exponents in `x2`. This
  390. is a scalar if both `x1` and `x2` are scalars.
  391. Supported Platforms:
  392. ``Ascend`` ``GPU`` ``CPU``
  393. Examples:
  394. >>> import mindspore.numpy as np
  395. >>> x1 = np.arange(6)
  396. >>> x2 = np.array(3)
  397. >>> output = np.float_power(x1, x2)
  398. >>> print(output)
  399. [ 0. 1. 8. 27. 64. 125.]
  400. """
  401. if not _check_same_type(F.dtype(x1), mstype.float32):
  402. x1 = F.cast(x1, mstype.float32)
  403. if not _check_same_type(F.dtype(x2), mstype.float32):
  404. x2 = F.cast(x2, mstype.float32)
  405. return _apply_tensor_op(F.tensor_pow, x1, x2, dtype=dtype)
  406. def minimum(x1, x2, dtype=None):
  407. """
  408. Element-wise minimum of tensor elements.
  409. Compares two tensors and returns a new tensor containing the element-wise minima.
  410. Note:
  411. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  412. not supported.
  413. On Ascend, input arrays containing inf or NaN are not supported.
  414. Args:
  415. x1 (Tensor): first input tensor to be compared.
  416. x2 (Tensor): second input tensor to be compared.
  417. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  418. output Tensor.
  419. Returns:
  420. Tensor, element-wise minimum of `x1` and `x2`.
  421. Raises:
  422. TypeError: If inputs have types not specified above.
  423. ValueError: If the shapes of `x1` and `x2` cannot be broadcast.
  424. Supported Platforms:
  425. ``Ascend`` ``GPU`` ``CPU``
  426. Examples:
  427. >>> import mindspore.numpy as np
  428. >>> a = np.asarray([1, 2])
  429. >>> b = np.asarray([[1, 3],[1, 4]])
  430. >>> print(np.minimum(a, b))
  431. [[1 2]
  432. [1 2]]
  433. """
  434. if isinstance(x1, (int, float, bool, list, tuple)):
  435. x1 = asarray_const(x1)
  436. elif not isinstance(x1, Tensor):
  437. _raise_type_error("Input x1 is expected to be array_like")
  438. if isinstance(x2, (int, float, bool, list, tuple)):
  439. x2 = asarray_const(x2)
  440. elif not isinstance(x2, Tensor):
  441. _raise_type_error("Input x2 is expected to be array_like")
  442. # if both are scalars, expand x1 to 1d tensor, since cpu kernel doesn't support
  443. # comparisons with 2 scalars
  444. if x1.ndim == 0 and x2.ndim == 0:
  445. x1 = expand_dims(x1, 0)
  446. return _apply_tensor_op(functools.partial(_prop_nan, F.minimum), x1, x2, dtype=dtype).squeeze()
  447. if x1.ndim == 0:
  448. dtype = x2.dtype
  449. elif x2.ndim == 0:
  450. dtype = x1.dtype
  451. return _apply_tensor_op(functools.partial(_prop_nan, F.minimum), x1, x2, dtype=dtype)
  452. def mean(a, axis=None, keepdims=False, dtype=None):
  453. """
  454. Computes the arithmetic mean along the specified axis.
  455. Returns the average of the array elements. The average is taken
  456. over the flattened array by default, otherwise over the specified
  457. axis.
  458. Note:
  459. Numpy arguments `out` is not supported.
  460. On GPU, the supported dtypes are np.float16, and np.float32.
  461. Args:
  462. a (Tensor): input tensor containing numbers whose mean is desired.
  463. If a is not an array, a conversion is attempted.
  464. axis (None or int or tuple of ints, optional): Axis or axes along
  465. which the means are computed. The default is to compute
  466. the mean of the flattened array. If this is a tuple of
  467. ints, a mean is performed over multiple axes.
  468. keepdims (bool, optional): If this is set to True, the axes which
  469. are reduced are left in the result as dimensions with
  470. size one. With this option, the result will broadcast
  471. correctly against the input tensor.
  472. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  473. output Tensor.
  474. Returns:
  475. Tensor or scalar, an array containing the mean values.
  476. Raises:
  477. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  478. if the axes contain duplicates.
  479. Supported Platforms:
  480. ``Ascend`` ``GPU`` ``CPU``
  481. Examples:
  482. >>> import mindspore.numpy as np
  483. >>> a = np.arange(6, dtype='float32')
  484. >>> output = np.mean(a, 0)
  485. >>> print(output)
  486. 2.5
  487. """
  488. return _reduce(a, P.ReduceMean(keepdims), axis=axis, keepdims=keepdims, dtype=dtype)
  489. def inner(a, b):
  490. """
  491. Returns the inner product of two tensors.
  492. Ordinary inner product of vectors for 1-D tensors (without complex
  493. conjugation), in higher dimensions a sum product over the last
  494. axes.
  495. Note:
  496. Numpy argument `out` is not supported.
  497. On GPU, the supported dtypes are np.float16, and np.float32.
  498. On CPU, the supported dtypes are np.float16, np.float32, and
  499. np.float64.
  500. Args:
  501. a (Tensor): input tensor. If `a` and `b` are nonscalar, their last
  502. dimensions must match.
  503. b (Tensor): input tensor. If `a` and `b` are nonscalar, their last
  504. dimensions must match.
  505. Returns:
  506. Tensor or scalar.
  507. Raises:
  508. ValueError: if ``x1.shape[-1] != x2.shape[-1]``.
  509. Supported Platforms:
  510. ``Ascend`` ``GPU`` ``CPU``
  511. Examples:
  512. >>> import mindspore.numpy as np
  513. >>> a = np.ones((5, 3))
  514. >>> b = np.ones((2, 7, 3))
  515. >>> output = np.inner(a, b)
  516. >>> print(output)
  517. [[[3. 3. 3. 3. 3. 3. 3.]
  518. [3. 3. 3. 3. 3. 3. 3.]]
  519. [[3. 3. 3. 3. 3. 3. 3.]
  520. [3. 3. 3. 3. 3. 3. 3.]]
  521. [[3. 3. 3. 3. 3. 3. 3.]
  522. [3. 3. 3. 3. 3. 3. 3.]]
  523. [[3. 3. 3. 3. 3. 3. 3.]
  524. [3. 3. 3. 3. 3. 3. 3.]]
  525. [[3. 3. 3. 3. 3. 3. 3.]
  526. [3. 3. 3. 3. 3. 3. 3.]]]
  527. """
  528. if F.rank(a) == 0 or F.rank(b) == 0:
  529. return F.tensor_mul(a, b)
  530. _check_shape_aligned(F.shape(a), F.shape(b))
  531. aligned_shape_a = (F.shape_mul(F.shape(a)[:-1]), F.shape(a)[-1])
  532. aligned_shape_b = (F.shape_mul(F.shape(b)[:-1]), F.shape(a)[-1])
  533. a_aligned = F.reshape(a, aligned_shape_a)
  534. b_aligned = F.reshape(b, aligned_shape_b)
  535. res = _matmul_T(a_aligned, b_aligned)
  536. res = F.reshape(res, F.shape(a)[:-1] + F.shape(b)[:-1])
  537. return res
  538. def dot(a, b):
  539. """
  540. Returns the dot product of two arrays.
  541. Specifically,
  542. If both `a` and `b` are 1-D arrays, it is inner product of vectors
  543. (without complex conjugation).
  544. If both `a` and `b` are 2-D arrays, it is matrix multiplication.
  545. If either `a` or `b` is 0-D (scalar), it is equivalent to multiply.
  546. If `a` is an `N-D` array and `b` is a 1-D array, it is a sum product
  547. over the last axis of `a` and `b`.
  548. If `a` is an `N-D` array and `b` is an `M-D` array (where ``M>=2``), it is a
  549. sum product over the last axis of `a` and the second-to-last axis of `b`:
  550. ``dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])``
  551. Note:
  552. Numpy argument `out` is not supported.
  553. On GPU, the supported dtypes are np.float16, and np.float32.
  554. On CPU, the supported dtypes are np.float16, np.float32, and
  555. np.float64.
  556. Args:
  557. a (Tensor): input tensor
  558. b (Tensor): input tensor
  559. Returns:
  560. Tensor or scalar, the dot product of `a` and `b`. If `a` and `b` are
  561. both scalars or both 1-D arrays then a scalar is returned;
  562. otherwise an array is returned
  563. Raises:
  564. ValueError: If the last dimension of `a` is not the same size
  565. as the second-to-last dimension of `b`.
  566. Supported Platforms:
  567. ``Ascend`` ``GPU`` ``CPU``
  568. Examples:
  569. >>> import mindspore.numpy as np
  570. >>> a = np.full((1, 3), 7).astype('float32')
  571. >>> b = np.full((2, 3, 4), 5).astype('float32')
  572. >>> output = np.dot(a, b)
  573. >>> print(output)
  574. [[[105, 105, 105, 105],
  575. [105, 105, 105, 105]]]
  576. """
  577. ndim_a, ndim_b = F.rank(a), F.rank(b)
  578. if ndim_a > 0 and ndim_b >= 2:
  579. perm = F.make_range(ndim_b)
  580. perm = perm[:-2] + (perm[-1],) + (perm[-2],)
  581. b = F.transpose(b, perm)
  582. return inner(a, b)
  583. def outer(a, b):
  584. """
  585. Computes the outer product of two vectors.
  586. Given two vectors, ``a = [a0, a1, ..., aM]`` and ``b = [b0, b1, ..., bN]``,
  587. the outer product is:
  588. ``[[a0*b0 a0*b1 ... a0*bN ]``
  589. ``[a1*b0 . ]``
  590. ``[ ... . ]``
  591. ``[aM*b0 aM*bN ]]``
  592. Note:
  593. Numpy argument ``out`` is not supported.
  594. On GPU, the supported dtypes are np.float16, and np.float32.
  595. On CPU, the supported dtypes are np.float16, np.float32, and
  596. np.float64.
  597. Args:
  598. a (Tensor): first input vector. Input is flattened if not
  599. already 1-dimensional.
  600. b (Tensor): second input vector. Input is flattened if not
  601. already 1-dimensional.
  602. Returns:
  603. Tensor or scalar, ``out[i, j] = a[i] * b[j]``.
  604. Raises:
  605. TypeError: if the input is not a tensor.
  606. Supported Platforms:
  607. ``Ascend`` ``GPU`` ``CPU``
  608. Examples:
  609. >>> import mindspore.numpy as np
  610. >>> a = np.full(7, 2).astype('float32')
  611. >>> b = np.full(4, 3).astype('float32')
  612. >>> output = np.outer(a, b)
  613. >>> print(output)
  614. [[6, 6, 6, 6],
  615. [6, 6, 6, 6],
  616. [6, 6, 6, 6],
  617. [6, 6, 6, 6],
  618. [6, 6, 6, 6],
  619. [6, 6, 6, 6],
  620. [6, 6, 6, 6]]
  621. """
  622. _check_input_tensor(a, b)
  623. if F.rank(a) != 1:
  624. a = ravel(a)
  625. if F.rank(b) != 1:
  626. b = ravel(b)
  627. a = F.reshape(a, (F.shape(a)[0], 1))
  628. b = _expand(b, 2)
  629. return _matmul(a, b)
  630. def tensordot(a, b, axes=2):
  631. """
  632. Computes tensor dot product along specified axes.
  633. Given two tensors, `a` and `b`, and an array_like object containing two array_like
  634. objects, `(a_axes, b_axes)`, sum the products of `a`’s and `b`’s elements (components)
  635. over the axes specified by `a_axes` and `b_axes`. The third argument can be a single
  636. non-negative integer_like scalar, `N`; if it is such, then the last `N` dimensions of
  637. `a` and the first `N` dimensions of `b` are summed over.
  638. Three common use cases are:
  639. - ``axes = 0`` : tensor product
  640. - ``axes = 1`` : tensor dot product
  641. - ``axes = 2`` : (default) tensor double contraction
  642. When axes is integer_like, the sequence for evaluation will be: first the `-Nth`
  643. axis in `a` and 0th axis in `b`, and the -1th axis in `a` and `Nth` axis in `b` last.
  644. When there is more than one axis to sum over - and they are not the last (first)
  645. axes of `a` `(b)` - the argument axes should consist of two sequences of the same
  646. length, with the first axis to sum over given first in both sequences, the second
  647. axis second, and so forth.
  648. The shape of the result consists of the non-contracted axes of the first tensor,
  649. followed by the non-contracted axes of the second.
  650. Note:
  651. On CPU, the supported dypes are np.float16 and np.float32.
  652. On GPU, the supported dypes are np.float16 and np.float32.
  653. Args:
  654. a (Tensor): Tensor to "dot".
  655. b (Tensor): Tensor to “dot”.
  656. axes (int or sequence of ints):
  657. integer_like: If an int `N`, sum over the last `N` axes of `a` and the first `N`
  658. axes of `b` in order. The sizes of the corresponding axes must match.
  659. sequence of ints: Or, a list of axes to be summed over, first sequence
  660. applying to `a`, second to `b`. Both elements `array_like` must be of the same
  661. length.
  662. Returns:
  663. Tensor, or list of tensors, the tensor dot product of the input.
  664. Supported Platforms:
  665. ``Ascend`` ``GPU`` ``CPU``
  666. Examples:
  667. >>> import mindspore.numpy as np
  668. >>> a = np.ones((3, 4, 5))
  669. >>> b = np.ones((4, 3, 2))
  670. >>> output = np.tensordot(a, b, axes=([1,0],[0,1]))
  671. >>> print(output.shape)
  672. (5, 2)
  673. """
  674. if F.rank(a)*F.rank(b) == 0 and axes == 0:
  675. return F.tensor_mul(a, b)
  676. return C.tensor_dot(a, b, axes)
  677. def std(x, axis=None, ddof=0, keepdims=False):
  678. """
  679. Computes the standard deviation along the specified axis.
  680. The standard deviation is the square root of the average of the squared deviations
  681. from the mean, i.e., :math:`std = sqrt(mean(abs(x - x.mean())**2))`.
  682. Returns the standard deviation, which is computed for the flattened array by default,
  683. otherwise over the specified axis.
  684. Note:
  685. Numpy arguments `dtype` and `out` are not supported.
  686. Args:
  687. x (Tensor): A Tensor to be calculated.
  688. axis (Union[None, int, tuple(int)]): Axis or axes along which the standard
  689. deviation is computed. Default: `None`.
  690. If `None`, compute the standard deviation of the flattened array.
  691. ddof (int): Means Delta Degrees of Freedom. The divisor used in calculations is :math:`N - ddof`,
  692. where :math:`N` represents the number of elements. Default: 0.
  693. keepdims: Default: `False`.
  694. Returns:
  695. Standard deviation tensor.
  696. Supported Platforms:
  697. ``Ascend`` ``GPU`` ``CPU``
  698. Examples:
  699. >>> import mindspore.numpy as np
  700. >>> input_x = np.array([1., 2., 3., 4.])
  701. >>> output = np.std(input_x)
  702. >>> print(output)
  703. 1.118034
  704. """
  705. if _is_shape_empty(x.shape):
  706. return full((), nan, F.dtype(x))
  707. if not isinstance(ddof, int):
  708. _raise_type_error("integer argument expected, but got ", ddof)
  709. if not isinstance(keepdims, int):
  710. _raise_type_error("integer argument expected, but got ", keepdims)
  711. if axis is None:
  712. axis = ()
  713. else:
  714. _check_axis_type(axis, True, True, False)
  715. axis = _canonicalize_axis(axis, x.ndim)
  716. x_mean = _mean_keepdims(x, axis)
  717. x_sub = F.tensor_sub(x, x_mean)
  718. x_pow = F.tensor_pow(x_sub, 2)
  719. if keepdims:
  720. x_sum = _reduce_sum_keepdims(x_pow, axis)
  721. else:
  722. x_sum = _reduce_sum_default(x_pow, axis)
  723. if isinstance(axis, int):
  724. nums = x.shape[axis]
  725. else:
  726. nums = _get_size(x, axis)
  727. x_std = F.tensor_pow(F.tensor_div(x_sum, nums - ddof), 0.5)
  728. return x_std
  729. def var(x, axis=None, ddof=0, keepdims=False):
  730. """
  731. Computes the variance along the specified axis.
  732. The variance is the average of the squared deviations from the mean, i.e.,
  733. :math:`var = mean(abs(x - x.mean())**2)`.
  734. Returns the variance, which is computed for the flattened array by default,
  735. otherwise over the specified axis.
  736. Note:
  737. Numpy arguments `dtype` and `out` are not supported.
  738. Args:
  739. x (Tensor): A Tensor to be calculated.
  740. axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
  741. The default is to compute the variance of the flattened array. Default: `None`.
  742. ddof (int): Means Delta Degrees of Freedom. Default: 0.
  743. The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
  744. keepdims (bool): Default: `False`.
  745. Supported Platforms:
  746. ``Ascend`` ``GPU`` ``CPU``
  747. Returns:
  748. Standard deviation tensor.
  749. Examples:
  750. >>> import mindspore.numpy as np
  751. >>> input_x = np.array([1., 2., 3., 4.])
  752. >>> output = np.var(input_x)
  753. >>> print(output)
  754. 1.25
  755. """
  756. if _is_shape_empty(x.shape):
  757. return full((), nan, F.dtype(x))
  758. x_std = std(x, axis, ddof, keepdims)
  759. return F.tensor_pow(x_std, 2)
  760. def ptp(x, axis=None, keepdims=False):
  761. """
  762. Range of values (maximum - minimum) along an axis.
  763. The name of the function comes from the acronym for ‘peak to peak’.
  764. Note:
  765. Numpy arguments `dtype` and `out` are not supported.
  766. Args:
  767. x (Tensor): Input tensor.
  768. axis (Union[None, int, tuple(int)]): Axis or axes along which the range is computed.
  769. The default is to compute the variance of the flattened array. Default: None.
  770. keepdims (bool): Default is False.
  771. Returns:
  772. Tensor.
  773. Raises:
  774. TypeError: if inputs have types not specified above.
  775. Supported Platforms:
  776. ``Ascend`` ``GPU`` ``CPU``
  777. Examples:
  778. >>> import mindspore.numpy as np
  779. >>> x = np.array([[4.0, 9.0, 2.0, 10.0], [6.0, 9.0, 7.0, 12.0]])
  780. >>> print(np.ptp(x, axis=1))
  781. [8. 6.]
  782. >>> print(np.ptp(x, axis=0))
  783. [2. 0. 5. 2.]
  784. """
  785. _check_input_tensor(x)
  786. if not isinstance(keepdims, bool):
  787. _raise_type_error('keepdims should be boolean')
  788. if axis is None:
  789. axis = ()
  790. else:
  791. _check_axis_type(axis, True, True, False)
  792. axis = _check_axis_valid(axis, x.ndim)
  793. if keepdims:
  794. x_min = _reduce_min_keepdims(x, axis)
  795. x_max = _reduce_max_keepdims(x, axis)
  796. else:
  797. x_min = _reduce_min_default(x, axis)
  798. x_max = _reduce_max_default(x, axis)
  799. return F.tensor_sub(x_max, x_min)
  800. def average(x, axis=None, weights=None, returned=False):
  801. """
  802. Computes the weighted average along the specified axis.
  803. Args:
  804. x (Tensor): A Tensor to be averaged.
  805. axis (Union[None, int, tuple(int)]): Axis along which to average `x`. Default: `None`.
  806. If the axis is `None`, it will average over all of the elements of the tensor `x`.
  807. If the axis is negative, it counts from the last to the first axis.
  808. weights (Union[None, Tensor]): Weights associated with the values in `x`. Default: `None`.
  809. If `weights` is `None`, all the data in `x` are assumed to have a weight equal to one.
  810. If `weights` is 1-D tensor, the length must be the same as the given axis.
  811. Otherwise, `weights` should have the same shape as `x`.
  812. returned (bool): Default: `False`.
  813. If `True`, the tuple (average, sum_of_weights) is returned.
  814. If `False`, only the average is returned.
  815. Returns:
  816. Averaged Tensor. If returned is `True`, return tuple.
  817. Supported Platforms:
  818. ``Ascend`` ``GPU`` ``CPU``
  819. Examples:
  820. >>> import mindspore.numpy as np
  821. >>> input_x = np.array([[1., 2.], [3., 4.]])
  822. >>> output = np.average(input_x, axis=0, weights=input_x, returned=True)
  823. >>> print(output)
  824. (Tensor(shape=[2], dtype=Float32, value= [ 2.50000000e+00, 3.33333325e+00]),
  825. Tensor(shape=[2], dtype=Float32, value= [ 4.00000000e+00, 6.00000000e+00]))
  826. """
  827. _check_input_tensor(x)
  828. if axis is not None:
  829. _check_axis_type(axis, True, True, False)
  830. axis = _canonicalize_axis(axis, x.ndim)
  831. x_avg = full((), nan, F.dtype(x))
  832. sum_of_weights = None
  833. if weights is None:
  834. x_avg = mean(x, axis)
  835. if axis is None:
  836. sum_of_weights = full((), x.size, F.dtype(x))
  837. else:
  838. fill_value = 1
  839. if isinstance(axis, int) or (isinstance(axis, tuple) and F.tuple_len(axis) == 1):
  840. fill_value = x.shape[axis] if isinstance(axis, int) else x.shape[axis[0]]
  841. elif axis is None:
  842. for sh in x.shape:
  843. fill_value *= sh
  844. else:
  845. for ax in axis:
  846. fill_value *= x.shape[ax]
  847. sum_of_weights = full_like(x_avg, fill_value, F.dtype(x))
  848. else:
  849. _check_input_tensor(weights)
  850. if x.shape == weights.shape:
  851. x_avg, sum_of_weights = comput_avg(x, axis, weights)
  852. elif F.rank(weights) == 1:
  853. if not isinstance(axis, int):
  854. _raise_type_error("Axis must be specified when shapes of x and weights differ.")
  855. perm = _expanded_shape(x.ndim, weights.shape[0], axis)
  856. weights = weights.reshape(perm)
  857. x_avg, sum_of_weights = comput_avg(x, axis, weights)
  858. else:
  859. _raise_type_error("Weights should be None, 1-D or the same shape as input x.")
  860. if returned:
  861. if x_avg.shape != sum_of_weights.shape:
  862. sum_of_weights = _broadcast_to(sum_of_weights, sum_of_weights.shape, x_avg.shape, x_avg.ndim)
  863. return (x_avg, sum_of_weights)
  864. return x_avg
  865. def comput_avg(x, axis, weights):
  866. """Computes average value of input x with given parameters."""
  867. axis = () if axis is None else axis
  868. x_mul = F.tensor_mul(x, weights)
  869. x_sum = _reduce_sum_default(x_mul, axis)
  870. sum_of_weights = _reduce_sum_default(weights, axis)
  871. x_avg = F.tensor_div(x_sum, sum_of_weights)
  872. return x_avg, sum_of_weights
  873. def matmul(x1, x2, dtype=None):
  874. """
  875. Returns the matrix product of two arrays.
  876. Note:
  877. Numpy arguments `out`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  878. not supported.
  879. On GPU, the supported dtypes are np.float16 and np.float32.
  880. On CPU, the supported dtypes are np.float16 and np.float32.
  881. Args:
  882. x1 (Tensor): Input tensor, scalar not allowed.
  883. x2 (Tensor): Input tensor, scalar not allowed.
  884. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  885. output Tensor.
  886. Returns:
  887. Tensor or scalar, the matrix product of the inputs. This is a scalar only
  888. when both `x1`, `x2` are 1-d vectors.
  889. Raises:
  890. ValueError: If the last dimension of `x1` is not the same size as the
  891. second-to-last dimension of `x2`, or if a scalar value is passed in.
  892. Supported Platforms:
  893. ``Ascend`` ``GPU`` ``CPU``
  894. Examples:
  895. >>> import mindspore.numpy as np
  896. >>> x1 = np.arange(2*3*4).reshape(2, 3, 4).astype('float32')
  897. >>> x2 = np.arange(4*5).reshape(4, 5).astype('float32')
  898. >>> output = np.matmul(x1, x2)
  899. >>> print(output)
  900. [[[ 70. 76. 82. 88. 94.]
  901. [ 190. 212. 234. 256. 278.]
  902. [ 310. 348. 386. 424. 462.]]
  903. [[ 430. 484. 538. 592. 646.]
  904. [ 550. 620. 690. 760. 830.]
  905. [ 670. 756. 842. 928. 1014.]]]
  906. """
  907. return C.matmul(x1, x2, dtype=dtype)
  908. def square(x, dtype=None):
  909. """
  910. Returns the element-wise square of the input.
  911. Note:
  912. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  913. not supported.
  914. On GPU, the supported dtypes are np.float16 and np.float32.
  915. Args:
  916. x (Tensor): Input data.
  917. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  918. output Tensor.
  919. Returns:
  920. Tensor or scalar, element-wise ``x*x``, of the same shape and dtype as `x`.
  921. This is a scalar if `x` is a scalar..
  922. Supported Platforms:
  923. ``Ascend`` ``GPU`` ``CPU``
  924. Examples:
  925. >>> import mindspore.numpy as np
  926. >>> x = np.square(np.arange(6).reshape(2, 3).astype('float32'))
  927. >>> print(x)
  928. [[ 0. 1. 4.]
  929. [ 9. 16. 25.]]
  930. """
  931. return _apply_tensor_op(F.square, x, dtype=dtype)
  932. def sqrt(x, dtype=None):
  933. """
  934. Returns the non-negative square-root of an array, element-wise.
  935. Note:
  936. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  937. not supported.
  938. On GPU, the supported dtypes are np.float16 and np.float32.
  939. Args:
  940. x (Tensor): The values whose square-roots are required.
  941. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  942. output Tensor.
  943. Returns:
  944. Tensor or scalar, an array of the same shape as `x`, containing the positive
  945. square-root of each element in `x`. For negative elements, nan is returned.
  946. This is a scalar if `x` is a scalar.
  947. Supported Platforms:
  948. ``Ascend`` ``GPU`` ``CPU``
  949. Examples:
  950. >>> import mindspore.numpy as np
  951. >>> x = np.arange(6).reshape(2, 3).astype('float32')
  952. >>> x_squared = np.square(x)
  953. >>> output = np.sqrt(x_squared)
  954. >>> print(output)
  955. [[ 0. 1. 2.]
  956. [ 3. 4. 5.]]
  957. """
  958. return _apply_tensor_op(F.sqrt, x, dtype=dtype)
  959. def reciprocal(x, dtype=None):
  960. """
  961. Returns the reciprocal of the argument, element-wise.
  962. Calculates ``1/x``.
  963. Note:
  964. Numpy arguments `casting`, `order`, `subok`, `signature`, and `extobj` are
  965. not supported.
  966. When `where` is provided, `out` must have a tensor value. `out` is not supported
  967. for storing the result, however it can be used in combination with `where` to set
  968. the value at indices for which `where` is set to False.
  969. Args:
  970. x (Tensor): Input array. For integer arguments with absolute value larger
  971. than 1 the result is always zero because of the way Python handles
  972. integer division. For integer zero the result is an overflow.
  973. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  974. output Tensor.
  975. Returns:
  976. Tensor or scalar, this is a scalar if `x` is a scalar.
  977. Supported Platforms:
  978. ``Ascend`` ``GPU`` ``CPU``
  979. Examples:
  980. >>> import mindspore.numpy as np
  981. >>> x = np.arange(1, 7).reshape(2, 3).astype('float32')
  982. >>> output = np.reciprocal(x)
  983. >>> print(output)
  984. [[1. 0.5 0.33333334]
  985. [0.25 0.2 0.16666667]]
  986. """
  987. return _apply_tensor_op(lambda x: F.tensor_div(1, x), x, dtype=dtype)
  988. def log(x, dtype=None):
  989. """
  990. Returns the natural logarithm, element-wise.
  991. The natural logarithm log is the inverse of the exponential function, so that
  992. ``log(exp(x)) = x``. The natural logarithm is logarithm in base e.
  993. Note:
  994. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  995. not supported.
  996. On GPU, the supported dtypes are np.float16, and np.float32.
  997. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  998. Args:
  999. x (Tensor): Input array. For integer arguments with absolute value larger
  1000. than 1 the result is always zero because of the way Python handles
  1001. integer division. For integer zero the result is an overflow.
  1002. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1003. output Tensor.
  1004. Returns:
  1005. Tensor or scalar, the natural logarithm of `x`, element-wise. This is a
  1006. scalar if `x` is a scalar.
  1007. Supported Platforms:
  1008. ``Ascend`` ``GPU`` ``CPU``
  1009. Examples:
  1010. >>> import mindspore.numpy as np
  1011. >>> x = np.array([2, 3, 4]).astype('float32')
  1012. >>> output = np.log(x)
  1013. >>> print(output)
  1014. [0.69314575 1.09861 1.3862929 ]
  1015. """
  1016. return _apply_tensor_op(F.log, x, dtype=dtype)
  1017. def _prop_nan(fn, x1, x2):
  1018. """Selects NaN if either element is NaN"""
  1019. has_nan = F.logical_or(_isnan(x1), _isnan(x2))
  1020. nan_tensor = F.fill(_promote(F.dtype(x1), F.dtype(x2)), F.shape(has_nan), nan)
  1021. res = fn(x1, x2)
  1022. return F.select(has_nan, nan_tensor, res)
  1023. def maximum(x1, x2, dtype=None):
  1024. """
  1025. Returns the element-wise maximum of array elements.
  1026. Compares two arrays and returns a new array containing the element-wise maxima.
  1027. Note:
  1028. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1029. not supported.
  1030. On Ascend, input arrays containing inf or NaN are not supported.
  1031. Args:
  1032. x1 (Tensor): Input array
  1033. x2 (Tensor): The array holding the elements to be compared. If
  1034. ``x1.shape != x2.shape``, they must be broadcastable to a common shape
  1035. (which becomes the shape of the output).
  1036. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1037. output Tensor.
  1038. Returns:
  1039. Tensor or scalar, the maximum of `x1` and `x2`, element-wise. This is a scalar
  1040. if both `x1` and `x2` are scalars.
  1041. Supported Platforms:
  1042. ``Ascend`` ``GPU`` ``CPU``
  1043. Examples:
  1044. >>> import mindspore.numpy as np
  1045. >>> output = np.maximum(np.array([2, 3, 4]), np.array([1, 5, 2]))
  1046. >>> print(output)
  1047. [2 5 4]
  1048. """
  1049. if isinstance(x1, (int, float, bool, list, tuple)):
  1050. x1 = asarray_const(x1)
  1051. elif not isinstance(x1, Tensor):
  1052. _raise_type_error("Input x1 is expected to be array_like")
  1053. if isinstance(x2, (int, float, bool, list, tuple)):
  1054. x2 = asarray_const(x2)
  1055. elif not isinstance(x2, Tensor):
  1056. _raise_type_error("Input x2 is expected to be array_like")
  1057. # F.maximum does not support when both operands are scalar
  1058. if x1.ndim == 0 and x2.ndim == 0:
  1059. x1 = expand_dims(x1, 0)
  1060. return _apply_tensor_op(functools.partial(_prop_nan, F.maximum), x1, x2, dtype=dtype).squeeze()
  1061. if x1.ndim == 0:
  1062. dtype = x2.dtype
  1063. elif x2.ndim == 0:
  1064. dtype = x1.dtype
  1065. return _apply_tensor_op(functools.partial(_prop_nan, F.maximum), x1, x2, dtype=dtype)
  1066. def heaviside(x1, x2, dtype=None):
  1067. """
  1068. Computes the Heaviside step function.
  1069. Note:
  1070. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1071. not supported.
  1072. Args:
  1073. x1 (Tensor): Input values.
  1074. x2 (Tensor): The value of the function when `x1` is 0. If
  1075. ``x1.shape != x2.shape``, they must be broadcastable to a common shape
  1076. (which becomes the shape of the output).
  1077. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1078. output Tensor.
  1079. Returns:
  1080. Tensor or scalar, the output array, element-wise Heaviside step function
  1081. of `x1`. This is a scalar if both `x1` and `x2` are scalars.
  1082. Supported Platforms:
  1083. ``Ascend`` ``GPU`` ``CPU``
  1084. Examples:
  1085. >>> import mindspore.numpy as np
  1086. >>> output = np.heaviside(np.array([-1.5, 0, 2.0]), np.array(0.5))
  1087. >>> print(output)
  1088. [0. 0.5 1. ]
  1089. >>> output = np.heaviside(np.array([-1.5, 0, 2.0]), np.array(1))
  1090. >>> print(output)
  1091. [0. 1. 1.]
  1092. """
  1093. def _heaviside(x1, x2):
  1094. """Computes heaviside without passing keyword arguments"""
  1095. # performs type promotion
  1096. dtype1 = F.dtype(x1)
  1097. dtype2 = F.dtype(x2)
  1098. dtype_out = _promote(dtype1, dtype2)
  1099. if not _check_same_type(dtype1, dtype_out):
  1100. x1 = F.cast(x1, dtype_out)
  1101. if not _check_same_type(dtype2, dtype_out):
  1102. x2 = F.cast(x2, dtype_out)
  1103. # performs broadcast
  1104. shape_out = _infer_out_shape(F.shape(x1), F.shape(x2))
  1105. x1 = _broadcast_to_shape(x1, shape_out)
  1106. x2 = _broadcast_to_shape(x2, shape_out)
  1107. x2 = F.select(x1 < 0, zeros(shape_out, dtype_out), x2)
  1108. x2 = F.select(x1 > 0, ones(shape_out, dtype_out), x2)
  1109. return x2
  1110. return _apply_tensor_op(_heaviside, x1, x2, dtype=dtype)
  1111. def amax(a, axis=None, keepdims=False, initial=None, where=True):
  1112. """
  1113. Returns the maximum of an array or maximum along an axis.
  1114. Note:
  1115. Numpy argument `out` is not supported.
  1116. On GPU, the supported dtypes are np.float16, and np.float32.
  1117. Args:
  1118. a (Tensor): Input data.
  1119. axis (None or int or tuple of ints, optional): defaults to None. Axis or
  1120. axes along which to operate. By default, flattened input is used. If
  1121. this is a tuple of ints, the maximum is selected over multiple axes,
  1122. instead of a single axis or all the axes as before.
  1123. keepdims (boolean, optional): defaults to False.
  1124. If this is set to True, the axes which are reduced are left in the
  1125. result as dimensions with size one. With this option, the result will
  1126. broadcast correctly against the input array.
  1127. initial (scalar, optional):
  1128. The minimum value of an output element. Must be present to allow
  1129. computation on empty slice.
  1130. where (boolean Tensor, optional): defaults to True.
  1131. A boolean array which is broadcasted to match the dimensions of array,
  1132. and selects elements to include in the reduction. If non-default value
  1133. is passed, initial must also be provided.
  1134. Returns:
  1135. Tensor or scalar, maximum of `a`. If `axis` is None, the result is a scalar
  1136. value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``.
  1137. Raises:
  1138. TypeError: if the input is not a tensor.
  1139. Supported Platforms:
  1140. ``Ascend`` ``GPU`` ``CPU``
  1141. Examples:
  1142. >>> import mindspore.numpy as np
  1143. >>> a = np.arange(4).reshape((2,2)).astype('float32')
  1144. >>> output = np.amax(a)
  1145. >>> print(output)
  1146. 3.0
  1147. >>> output = np.amax(a, axis=0)
  1148. >>> print(output)
  1149. [2. 3.]
  1150. >>> output = np.amax(a, axis=1)
  1151. >>> print(output)
  1152. [1. 3.]
  1153. >>> output = np.amax(a, where=np.array([False, True]), initial=-1, axis=0)
  1154. >>> print(output)
  1155. [-1. 3.]
  1156. """
  1157. return _reduce(a, P.ReduceMax(keepdims), cmp_fn=F.maximum, axis=axis, keepdims=keepdims,
  1158. initial=initial, where=where)
  1159. def amin(a, axis=None, keepdims=False, initial=None, where=True):
  1160. """
  1161. Returns the minimum of an array or minimum along an axis.
  1162. Note:
  1163. Numpy argument `out` is not supported.
  1164. On GPU, the supported dtypes are np.float16, and np.float32.
  1165. Args:
  1166. a (Tensor): Input data.
  1167. axis (None or int or tuple of ints, optional): defaults to None. Axis or
  1168. axes along which to operate. By default, flattened input is used. If
  1169. this is a tuple of ints, the minimum is selected over multiple axes,
  1170. instead of a single axis or all the axes as before.
  1171. keepdims (boolean, optional): defaults to False.
  1172. If this is set to True, the axes which are reduced are left in the
  1173. result as dimensions with size one. With this option, the result will
  1174. broadcast correctly against the input array.
  1175. initial (scalar, optional):
  1176. The maximum value of an output element. Must be present to allow
  1177. computation on empty slice.
  1178. where (boolean Tensor, optional): defaults to True.
  1179. A boolean array which is broadcasted to match the dimensions of array,
  1180. and selects elements to include in the reduction. If non-default value
  1181. is passed, initial must also be provided.
  1182. Returns:
  1183. Tensor or scalar, minimum of `a`. If axis is None, the result is a scalar
  1184. value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``.
  1185. Raises:
  1186. TypeError: if the input is not a tensor.
  1187. Supported Platforms:
  1188. ``Ascend`` ``GPU`` ``CPU``
  1189. Examples:
  1190. >>> import mindspore.numpy as np
  1191. >>> a = np.arange(4).reshape((2,2)).astype('float32')
  1192. >>> output = np.amin(a)
  1193. >>> print(output)
  1194. 0.0
  1195. >>> output = np.amin(a, axis=0)
  1196. >>> print(output)
  1197. [0. 1.]
  1198. >>> output = np.amin(a, axis=1)
  1199. >>> print(output)
  1200. [0, 2]
  1201. >>> output = np.amin(a, where=np.array([False, True]), initial=10, axis=0)
  1202. >>> print(output)
  1203. [10. 1.]
  1204. """
  1205. return _reduce(a, P.ReduceMin(keepdims), cmp_fn=F.minimum, axis=axis, keepdims=keepdims,
  1206. initial=initial, where=where)
  1207. def hypot(x1, x2, dtype=None):
  1208. """
  1209. Given the “legs” of a right triangle, returns its hypotenuse.
  1210. Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or `x2` is scalar_like
  1211. (i.e., unambiguously cast-able to a scalar type), it is broadcast for use
  1212. with each element of the other argument. (See Examples)
  1213. Note:
  1214. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1215. not supported.
  1216. On GPU, the supported dtypes are np.float16 and np.float32.
  1217. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  1218. Args:
  1219. x1 (Tensor): Leg of the traingle(s).
  1220. x2 (Tensor): Leg of the triangle(s). If ``x1.shape != x2.shape``, they
  1221. must be broadcastable to a common shape (which becomes the shape of
  1222. the output).
  1223. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1224. output Tensor.
  1225. Returns:
  1226. Tensor or scalar, the hypotenuse of the triangle(s). This is a scalar if
  1227. both `x1` and `x2` are scalars.
  1228. Supported Platforms:
  1229. ``Ascend`` ``GPU`` ``CPU``
  1230. Examples:
  1231. >>> import mindspore.numpy as np
  1232. >>> output = np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
  1233. >>> print(output)
  1234. [[5. 5. 5.]
  1235. [5. 5. 5.]
  1236. [5. 5. 5.]]
  1237. >>> output = np.hypot(3*np.ones((3, 3)), np.array([4.0]))
  1238. >>> print(output)
  1239. [[5. 5. 5.]
  1240. [5. 5. 5.]
  1241. [5. 5. 5.]]
  1242. """
  1243. def _hypot(x1, x2):
  1244. """Computes hypotenuse without passing keyword arguments"""
  1245. if _get_device() == 'CPU':
  1246. # broadcast is not fully supported in tensor_add on CPU,
  1247. # so we use tensor_sub as a substitute solution
  1248. return F.sqrt(F.tensor_sub(F.square(x1), F.neg_tensor(F.square(x2))))
  1249. return F.sqrt(F.tensor_add(F.square(x1), F.square(x2)))
  1250. return _apply_tensor_op(_hypot, x1, x2, dtype=dtype)
  1251. def floor(x, dtype=None):
  1252. """
  1253. Returns the floor of the input, element-wise.
  1254. The floor of the scalar `x` is the largest integer `i`, such that ``i <= x``.
  1255. Note:
  1256. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1257. not supported.
  1258. On GPU, the supported dtypes are np.float16 and np.float32.
  1259. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  1260. Args:
  1261. x (Tensor): input data.
  1262. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1263. output Tensor.
  1264. Returns:
  1265. Tensor or scalar, the floor of each element in `x`. This is a scalar if `x`
  1266. is a scalar.
  1267. Supported Platforms:
  1268. ``Ascend`` ``GPU`` ``CPU``
  1269. Examples:
  1270. >>> import mindspore.numpy as np
  1271. >>> output = np.floor(np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]))
  1272. >>> print(output)
  1273. [-2. -2. -1. 0. 1. 1. 2.]
  1274. """
  1275. return _apply_tensor_op(F.floor, x, dtype=dtype)
  1276. def floor_divide(x1, x2, dtype=None):
  1277. """
  1278. Returns the largest integer smaller or equal to the division of the inputs.
  1279. It is equivalent to the Python // operator and pairs with the
  1280. Python % (remainder), function so that ``a = a % b + b * (a // b)`` up to roundoff.
  1281. Note:
  1282. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1283. not supported.
  1284. Args:
  1285. x1 (Tensor): Input array.
  1286. x2 (Tensor): Input array.
  1287. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1288. output Tensor.
  1289. Returns:
  1290. Tensor or scalar.
  1291. Supported Platforms:
  1292. ``Ascend`` ``GPU`` ``CPU``
  1293. Examples:
  1294. >>> import mindspore.numpy as np
  1295. >>> output = np.floor_divide(np.array([1., 2., 3., 4.]), np.array(2.5))
  1296. >>> print(output)
  1297. [0. 0. 1. 1.]
  1298. """
  1299. return _apply_tensor_op(F.tensor_floordiv, x1, x2, dtype=dtype)
  1300. def _remainder(x1, x2, C_style=False):
  1301. """Computes remainder without applying keyword arguments."""
  1302. dtype = _promote(F.dtype(x1), F.dtype(x2))
  1303. if not _check_is_float(dtype):
  1304. x1 = F.cast(x1, mstype.float32)
  1305. x2 = F.cast(x2, mstype.float32)
  1306. quotient = F.tensor_div(x1, x2)
  1307. if C_style:
  1308. quotient = fix(quotient)
  1309. else:
  1310. quotient = F.floor(quotient)
  1311. prod = F.tensor_mul(x2, quotient)
  1312. res = F.tensor_sub(x1, prod)
  1313. if _check_is_int(dtype):
  1314. zeros_tensor = zeros(F.shape(quotient), F.dtype(quotient))
  1315. x2_zeros = F.equal(x2, zeros_tensor)
  1316. res = F.select(x2_zeros, zeros_tensor, res)
  1317. if not _check_same_type(F.dtype(res), dtype):
  1318. res = F.cast(res, dtype)
  1319. return res
  1320. def remainder(x1, x2, dtype=None):
  1321. """
  1322. Returns element-wise remainder of division.
  1323. Computes the remainder complementary to the floor_divide function. It is
  1324. equivalent to the Python modulus operator ``x1 % x2`` and has the same sign
  1325. as the divisor `x2`. The MATLAB function equivalent to np.remainder is mod.
  1326. Note:
  1327. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1328. not supported.
  1329. Args:
  1330. x1 (Tensor): input array.
  1331. x2 (Tensor): input array.
  1332. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1333. output Tensor.
  1334. Returns:
  1335. Tensor or scalar, the element-wise remainder of the quotient
  1336. ``floor_divide(x1, x2)``. This is a scalar if both `x1` and `x2` are scalars.
  1337. Supported Platforms:
  1338. ``Ascend`` ``GPU`` ``CPU``
  1339. Examples:
  1340. >>> import mindspore.numpy as np
  1341. >>> output = np.remainder(np.array([4, 7]), np.array([2, 3]))
  1342. >>> print(output)
  1343. [0 1]
  1344. >>> output = np.remainder(np.arange(7), np.array(5))
  1345. >>> print(output)
  1346. [0 1 2 3 4 0 1]
  1347. """
  1348. return _apply_tensor_op(_remainder, x1, x2, dtype=dtype)
  1349. def fix(x):
  1350. """
  1351. Rounds to nearest integer towards zero.
  1352. Rounds an array of floats element-wise to nearest integer towards zero. The
  1353. rounded values are returned as floats.
  1354. Note:
  1355. Numpy argument `out` is not supported.
  1356. Args:
  1357. x (Tensor): An array of floats to be rounded.
  1358. Returns:
  1359. Tensor.
  1360. Raises:
  1361. TypeError: if the input is not a tensor.
  1362. Supported Platforms:
  1363. ``Ascend`` ``GPU`` ``CPU``
  1364. Examples:
  1365. >>> import mindspore.numpy as np
  1366. >>> output = np.fix(np.array([2.1, 2.9, -2.1, -2.9]))
  1367. >>> print(output)
  1368. [ 2. 2. -2. -2.]
  1369. """
  1370. _check_input_tensor(x)
  1371. if not _check_is_float(F.dtype(x)):
  1372. x = F.cast(x, mstype.float32)
  1373. floored = F.floor(x)
  1374. # TODO change to F.ceil once supported on CPU.
  1375. ceiled = F.neg_tensor(F.floor(F.neg_tensor(x)))
  1376. is_neg = F.tensor_lt(x, zeros(F.shape(x), F.dtype(x)))
  1377. return F.select(is_neg, ceiled, floored)
  1378. def fmod(x1, x2, dtype=None):
  1379. """
  1380. Returns the element-wise remainder of division.
  1381. This is the NumPy implementation of the C library function fmod, the remainder
  1382. has the same sign as the dividend `x1`. It is equivalent to the Matlab(TM) rem
  1383. function and should not be confused with the Python modulus operator ``x1 % x2``.
  1384. Note:
  1385. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1386. not supported.
  1387. Args:
  1388. x1 (Tensor)
  1389. x2 (Tensor): input arrays.
  1390. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1391. output Tensor.
  1392. Returns:
  1393. Tensor or scalar, the remainder of the division of `x1` by `x2`. This is a
  1394. scalar if both `x1` and `x2` are scalars.
  1395. Supported Platforms:
  1396. ``Ascend`` ``GPU`` ``CPU``
  1397. Examples:
  1398. >>> import mindspore.numpy as np
  1399. >>> output = np.fmod(np.array([-3, -2, -1, 1, 2, 3]), np.array(2))
  1400. >>> print(output)
  1401. [-1 0 -1 1 0 1]
  1402. """
  1403. return _apply_tensor_op(lambda x1, x2: _remainder(x1, x2, C_style=True), x1, x2, dtype=dtype)
  1404. def trunc(x, dtype=None):
  1405. """
  1406. Returns the truncated value of the input, element-wise.
  1407. The truncated value of the scalar `x` is the nearest integer `i` which is closer to zero
  1408. than `x` is. In short, the fractional part of the signed number `x` is discarded.
  1409. Note:
  1410. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1411. not supported.
  1412. Args:
  1413. x (Tensor): input data.
  1414. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1415. output Tensor.
  1416. Returns:
  1417. Tensor or scalar, the truncated value of each element in `x`. This is a scalar if `x` is
  1418. a scalar.
  1419. Supported Platforms:
  1420. ``Ascend`` ``GPU`` ``CPU``
  1421. Examples:
  1422. >>> import mindspore.numpy as np
  1423. >>> output = np.trunc(np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]))
  1424. >>> print(output)
  1425. [-1. -1. -0. 0. 1. 1. 2.]
  1426. """
  1427. return _apply_tensor_op(fix, x, dtype=dtype)
  1428. def exp(x, dtype=None):
  1429. """
  1430. Calculates the exponential of all elements in the input array.
  1431. Note:
  1432. Numpy arguments `casting`, `order`, `subok`, `signature`, and `extobj` are
  1433. not supported.
  1434. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1435. for storing the result, however it can be used in combination with `where` to set
  1436. the value at indices for which `where` is set to False.
  1437. On GPU, the supported dtypes are np.float16, and np.float32.
  1438. On CPU, the supported dtypes are np.float16, np.float32, np.float64.
  1439. Args:
  1440. x (Tensor): input data.
  1441. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1442. output Tensor.
  1443. Returns:
  1444. Tensor or scalar, element-wise exponential of `x`. This is a scalar if both
  1445. `x1` and `x2` are scalars.
  1446. Supported Platforms:
  1447. ``Ascend`` ``GPU`` ``CPU``
  1448. Examples:
  1449. >>> import mindspore.numpy as np
  1450. >>> output = np.exp(np.arange(5).astype(np.float32))
  1451. >>> print(output)
  1452. [ 1. 2.718282 7.3890557 20.085537 54.598145 ]
  1453. """
  1454. return _apply_tensor_op(F.tensor_exp, x, dtype=dtype)
  1455. def expm1(x, dtype=None):
  1456. """
  1457. Calculates ``exp(x) - 1`` for all elements in the array.
  1458. Note:
  1459. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1460. not supported.
  1461. On GPU, the supported dtypes are np.float16, and np.float32.
  1462. On CPU, the supported dtypes are np.float16, and np.float32.
  1463. Args:
  1464. x (Tensor): input data.
  1465. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1466. output Tensor.
  1467. Returns:
  1468. Tensor or scalar, element-wise exponential minus one, ``out = exp(x) - 1``.
  1469. This is a scalar if both `x1` and `x2` are scalars.
  1470. Supported Platforms:
  1471. ``Ascend`` ``GPU`` ``CPU``
  1472. Examples:
  1473. >>> import mindspore.numpy as np
  1474. >>> output = np.expm1(np.arange(5).astype(np.float32))
  1475. >>> print(output)
  1476. [ 0. 1.7182819 6.389056 19.085537 53.59815 ]
  1477. """
  1478. return _apply_tensor_op(F.tensor_expm1, x, dtype=dtype)
  1479. def divmod_(x1, x2, dtype=None):
  1480. """
  1481. Returns element-wise quotient and remainder simultaneously.
  1482. Args:
  1483. x1(Union[Tensor]): Dividend tensor.
  1484. x2(Union[Tensor, int, float, bool]): Divisor. If ``x1.shape != x2.shape``,
  1485. they must be broadcastable to a common shape.
  1486. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1487. output Tensor.
  1488. Returns:
  1489. Element-wise quotient and remainder from floor division, in format of (quotient, remainder)
  1490. Raises:
  1491. TypeError: if `x1` and `x2` are not Tensor or scalar.
  1492. Supported Platforms:
  1493. ``Ascend`` ``GPU`` ``CPU``
  1494. Examples:
  1495. >>> import mindspore.numpy as np
  1496. >>> a = np.array([1, 2, 3, 4, 5])
  1497. >>> print(np.divmod(a, 1.5))
  1498. (Tensor(shape=[5], dtype=Float32,
  1499. value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00, 2.00000000e+00, 3.00000000e+00]),
  1500. Tensor(shape=[5], dtype=Float32,
  1501. value= [ 1.00000000e+00, 5.00000000e-01, 0.00000000e+00, 1.00000000e+00, 5.00000000e-01]))
  1502. """
  1503. q = F.tensor_floordiv(x1, x2)
  1504. r = remainder(x1, x2)
  1505. if dtype is not None:
  1506. q = q.astype(dtype)
  1507. r = r.astype(dtype)
  1508. return (q, r)
  1509. def diff(a, n=1, axis=-1, prepend=None, append=None):
  1510. """
  1511. Calculates the n-th discrete difference along the given axis.
  1512. The first difference is given by :math:`out[i] = a[i+1] - a[i]` along the given axis,
  1513. higher differences are calculated by using `diff` iteratively.
  1514. Args:
  1515. a (Tensor): Input tensor.
  1516. n (int, optional): The number of times values are differenced. If zero,
  1517. the input is returned as-is.
  1518. axis (int, optional): The axis along which the difference is taken, default
  1519. is the last axis.
  1520. prepend/append (Tensor, optional): Values to prepend or append to a along
  1521. `axis` prior to performing the difference. Scalar values are expanded to
  1522. arrays with length 1 in the direction of `axis` and the shape of the input
  1523. array in along all other axes. Otherwise the dimension and shape must
  1524. match `a` except along axis.
  1525. Returns:
  1526. The n-th differences. The shape of the output is the same as a except along
  1527. `axis` where the dimension is smaller by `n`. The type of the output is the same
  1528. as the type of the difference between any two elements of `a`. This is the same
  1529. as the type of `a` in most cases.
  1530. Raises:
  1531. TypeError: If inputs have types not specified above.
  1532. ValueError: If ``n < 0``.
  1533. Supported Platforms:
  1534. ``Ascend`` ``GPU`` ``CPU``
  1535. Examples:
  1536. >>> import mindspore.numpy as np
  1537. >>> arr = np.array([1, 3, -1, 0, 4])
  1538. >>> print(np.diff(arr, n=2))
  1539. [-6 5 3]
  1540. """
  1541. # This implementation is inspired by jax.numpy
  1542. _check_input_tensor(a)
  1543. axis = _canonicalize_axis(axis, a.ndim)
  1544. if not isinstance(n, int):
  1545. _raise_type_error("Input n should be int, but got ", n)
  1546. if n < 0:
  1547. _raise_value_error("Input n must > 0.")
  1548. if n == 0:
  1549. return a
  1550. combined = ()
  1551. if prepend is not None:
  1552. if isinstance(prepend, (int, float, bool)):
  1553. prepend = asarray_const(prepend)
  1554. prepend_shape = a.shape
  1555. prepend_shape = _tuple_setitem(prepend_shape, axis, 1)
  1556. prepend = _broadcast_to_shape(prepend, prepend_shape)
  1557. elif not isinstance(prepend, Tensor):
  1558. _raise_type_error("prepend must be scalar or Tensor, but got ", prepend)
  1559. combined += (prepend,)
  1560. combined += (a,)
  1561. if append is not None:
  1562. if isinstance(append, (int, float, bool)):
  1563. append = asarray_const(append)
  1564. append_shape = a.shape
  1565. append_shape = _tuple_setitem(append_shape, axis, 1)
  1566. append = _broadcast_to_shape(append, append_shape)
  1567. elif not isinstance(append, Tensor):
  1568. _raise_type_error("append must be scalar or Tensor, but got ", append)
  1569. combined += (append,)
  1570. if combined:
  1571. a = concatenate(combined, axis)
  1572. # if n > maximum length allowed, returns empty tensor, with shape matched with
  1573. # the original tensor
  1574. if n > a.shape[axis]:
  1575. empty_shape = a.shape
  1576. empty_shape = _tuple_setitem(empty_shape, axis, 0)
  1577. return empty(empty_shape, a.dtype)
  1578. original_dtype = a.dtype
  1579. # will change once F.tensor_slice supports types other than float32
  1580. if not _check_is_float(original_dtype):
  1581. a = a.astype(mstype.float32)
  1582. a = moveaxis(a, axis, -1)
  1583. for _ in F.make_range(n):
  1584. slice_start = _list_comprehensions(F.rank(a) - 1, 0, True)
  1585. slice_size = F.shape(a)[:-1] + (F.shape(a)[-1] - 1,)
  1586. minuend = F.tensor_slice(a, slice_start + (1,), slice_size)
  1587. subtrahend = F.tensor_slice(a, slice_start + (0,), slice_size)
  1588. a = F.tensor_sub(minuend, subtrahend)
  1589. if not _check_is_float(original_dtype):
  1590. a = a.astype(original_dtype)
  1591. return moveaxis(a, -1, axis)
  1592. def ediff1d(ary, to_end=None, to_begin=None):
  1593. """
  1594. The differences between consecutive elements of a tensor.
  1595. Args:
  1596. ary (Tensor): If necessary, will be flattened before the differences are taken.
  1597. to_end (Tensor or scalar, optional): Number(s) to append at the end of the
  1598. returned differences.
  1599. to_begin (Tensor or scalar, optional): Number(s) to prepend at the beginning
  1600. of the returned differences.
  1601. Returns:
  1602. The differences.
  1603. Raises:
  1604. TypeError: If inputs have types not specified above.
  1605. Supported Platforms:
  1606. ``Ascend`` ``GPU`` ``CPU``
  1607. Examples:
  1608. >>> import mindspore.numpy as np
  1609. >>> arr = np.array([1, 3, -1, 0, 4])
  1610. >>> print(np.ediff1d(arr))
  1611. [ 2 -4 1 4]
  1612. """
  1613. _check_input_tensor(ary)
  1614. combined = ()
  1615. if to_begin is not None:
  1616. if isinstance(to_begin, Tensor):
  1617. to_begin = to_begin.ravel()
  1618. else:
  1619. to_begin = _to_tensor(to_begin).ravel()
  1620. to_begin = to_begin.astype(ary.dtype)
  1621. combined += (to_begin,)
  1622. combined += (diff(ary.ravel()),)
  1623. if to_end is not None:
  1624. if isinstance(to_end, Tensor):
  1625. to_end = to_end.ravel()
  1626. else:
  1627. to_end = _to_tensor(to_end).ravel()
  1628. to_end = to_end.astype(ary.dtype)
  1629. combined += (to_end,)
  1630. return P.Concat(0)(combined)
  1631. def trapz(y, x=None, dx=1.0, axis=-1):
  1632. """
  1633. Integrates along the given axis using the composite trapezoidal rule.
  1634. Integrates `y` (x) along given axis.
  1635. Args:
  1636. y (Tensor): Input array to integrate.
  1637. x (Union[int, float, bool, list, tuple, Tensor], optional): The sample points
  1638. corresponding to the `y` values. If `x` is None, the sample points are
  1639. assumed to be evenly spaced `dx` apart. The default is None.
  1640. dx (scalar, optional): The spacing between sample points when `x` is None. The
  1641. default is 1.
  1642. axis (int, optional): The axis along which to integrate.
  1643. Returns:
  1644. Tensor of float, definite integral as approximated by trapezoidal rule.
  1645. Raises:
  1646. ValueError: If axis is out of range of ``[-y.ndim, y.ndim)``.
  1647. Supported Platforms:
  1648. ``Ascend`` ``GPU`` ``CPU``
  1649. Examples:
  1650. >>> import mindspore.numpy as np
  1651. >>> a = np.arange(6).reshape(2, 3)
  1652. >>> output = np.trapz(a, x=[-2, 1, 2], axis=1)
  1653. >>> print(output)
  1654. [ 3. 15.]
  1655. >>> output = np.trapz(a, dx=3, axis=0)
  1656. >>> print(output)
  1657. [ 4.5 7.5 10.5]
  1658. """
  1659. y = _to_tensor(y)
  1660. ndim = F.rank(y)
  1661. _check_axis_in_range(axis, ndim)
  1662. axis = axis + ndim if axis < 0 else axis
  1663. y_start_axis_left = _list_comprehensions(axis, 0, True)
  1664. y_start_axis_right = _list_comprehensions(ndim - axis - 1, 0, True)
  1665. shape = F.shape(y)
  1666. y_slice_size = _tuple_setitem(shape, axis, shape[axis] - 1)
  1667. if x is not None:
  1668. x = _to_tensor(x)
  1669. dx = diff(x)
  1670. else:
  1671. dx = _to_tensor(dx)
  1672. dx = _expand(dx, ndim - axis, axis=-1)
  1673. dx = _broadcast_to_shape(dx, y_slice_size)
  1674. if not _check_is_float(F.dtype(y)):
  1675. # trapz returns float
  1676. y = F.cast(y, mstype.float32)
  1677. dx = F.cast(dx, F.dtype(y))
  1678. # product of dx and y with the last column removed
  1679. y_slice_left = F.tensor_slice(y, y_start_axis_left + (0,) + y_start_axis_right, y_slice_size)
  1680. prod_left = F.tensor_mul(y_slice_left, dx)
  1681. # product of dx and y with the first column removed
  1682. y_slice_right = F.tensor_slice(y, y_start_axis_left + (1,) + y_start_axis_right, y_slice_size)
  1683. prod_right = F.tensor_mul(y_slice_right, dx)
  1684. prod_sum = F.tensor_div(F.tensor_add(prod_left, prod_right), _to_tensor(2.0).astype(F.dtype(y)))
  1685. return F.reduce_sum(prod_sum, axis)
  1686. def _gcd(x1, x2):
  1687. """Calculates gcd without applying keyword arguments."""
  1688. dtype = _promote(F.dtype(x1), F.dtype(x2))
  1689. if _get_device() == 'CPU' and not _check_is_float(dtype):
  1690. # F.reduce_sum only supports float
  1691. x1 = F.cast(x1, mstype.float32)
  1692. x2 = F.cast(x2, mstype.float32)
  1693. x1 = F.absolute(x1)
  1694. x2 = F.absolute(x2)
  1695. cond_ge = F.tensor_ge(x1, x2)
  1696. a = where_(cond_ge, x1, x2)
  1697. b = where_(cond_ge, x2, x1)
  1698. b = where_(F.equal(b, ZERO_TENSOR), a, b)
  1699. r = _remainder(a, b)
  1700. while F.tensor_gt(F.reduce_sum(r), ZERO_TENSOR):
  1701. r = _remainder(a, b)
  1702. has_terminated = F.equal(r, ZERO_TENSOR)
  1703. a = where_(has_terminated, a, b)
  1704. b = where_(has_terminated, b, r)
  1705. if not _check_same_type(F.dtype(b), dtype):
  1706. b = F.cast(b, dtype)
  1707. return b
  1708. def gcd(x1, x2, dtype=None):
  1709. """
  1710. Returns the greatest common divisor of ``|x1|`` and ``|x2|``.
  1711. Note:
  1712. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1713. not supported.
  1714. Args:
  1715. x1 (Tensor): input data.
  1716. x2 (Tensor): input data.
  1717. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1718. output Tensor.
  1719. Returns:
  1720. Tensor or scalar, the greatest common divisor of the absolute value of the inputs.
  1721. This is a scalar if both `x1` and `x2` are scalars.
  1722. Supported Platforms:
  1723. ``Ascend`` ``GPU`` ``CPU``
  1724. Examples:
  1725. >>> import mindspore.numpy as np
  1726. >>> output = np.gcd(np.arange(6), np.array(20))
  1727. >>> print(output)
  1728. [20 1 2 1 4 5]
  1729. """
  1730. return _apply_tensor_op(_gcd, x1, x2, dtype=dtype)
  1731. def lcm(x1, x2, dtype=None):
  1732. """
  1733. Returns the lowest common multiple of ``|x1|`` and ``|x2|``.
  1734. Note:
  1735. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1736. not supported.
  1737. Args:
  1738. x1 (Tensor): input data.
  1739. x2 (Tensor): input data.
  1740. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1741. output Tensor.
  1742. Returns:
  1743. Tensor or scalar, the lowest common multiple of the absolute value of the inputs.
  1744. This is a scalar if both `x1` and `x2` are scalars.
  1745. Supported Platforms:
  1746. ``Ascend`` ``GPU`` ``CPU``
  1747. Examples:
  1748. >>> import mindspore.numpy as np
  1749. >>> output = np.lcm(np.arange(6), np.array(20))
  1750. >>> print(output)
  1751. [ 0 20 20 60 20 20]
  1752. """
  1753. def _lcm(x1, x2):
  1754. """Calculates lcm without applying keyword arguments"""
  1755. common_divisor = _gcd(x1, x2)
  1756. q1 = F.tensor_div(x1, common_divisor)
  1757. q2 = F.tensor_div(x2, common_divisor)
  1758. res = F.tensor_mul(F.tensor_mul(q1, q2), common_divisor)
  1759. dtype = F.dtype(res)
  1760. if _get_device() == 'CPU' and not _check_is_float(dtype):
  1761. # F.absolute only supports float
  1762. res = F.cast(res, mstype.float32)
  1763. return F.absolute(res).astype(dtype)
  1764. return _apply_tensor_op(_lcm, x1, x2, dtype=dtype)
  1765. def convolve(a, v, mode='full'):
  1766. """
  1767. Returns the discrete, linear convolution of two one-dimensional sequences.
  1768. Note:
  1769. If `v` is longer than `a`, the tensors are swapped before computation.
  1770. Args:
  1771. a (Union[list, tuple, Tensor]): First one-dimensional input tensor.
  1772. v (Union[list, tuple, Tensor]): Second one-dimensional input tensor.
  1773. mode (str, optional): By default, mode is `\'full\'`. This returns the
  1774. convolution at each point of overlap, with an output shape of :math:`(N+M-1,)`.
  1775. At the end-points of the convolution, the signals do not overlap completely,
  1776. and boundary effects may be seen.
  1777. If `mode` is `\'same\'`, it returns output of length :math:`max(M, N)`. Boundary
  1778. effects are still visible.
  1779. If `mode` is `\'valid\'`, it returns output of length :math:`max(M, N) - min(M, N) + 1`.
  1780. The convolution product is only given for points where the signals overlap
  1781. completely. Values outside the signal boundary have no effect.
  1782. Returns:
  1783. Tensor, discrete, linear convolution of a and v.
  1784. Raises:
  1785. TypeError: if the inputs have types not specified above.
  1786. ValueError: if a and v are empty or have wrong dimensions
  1787. Supported Platforms:
  1788. ``GPU``
  1789. Examples:
  1790. >>> import mindspore.numpy as np
  1791. >>> output = np.convolve([1., 2., 3., 4., 5.], [2., 3.], mode="valid")
  1792. >>> print(output)
  1793. [ 3. 6. 9. 12.]
  1794. """
  1795. if not isinstance(a, Tensor):
  1796. a = asarray_const(a)
  1797. if not isinstance(v, Tensor):
  1798. v = asarray_const(v)
  1799. if a.size == 0 or v.size == 0:
  1800. _raise_value_error("Inputs cannot be empty.")
  1801. a = _expand(a, 1)
  1802. v = _expand(v, 1)
  1803. final_dtype = _promote(a.dtype, v.dtype)
  1804. a = a.astype("float32")
  1805. v = v.astype("float32")
  1806. if a.ndim != 1 or v.ndim != 1:
  1807. _raise_value_error("a and v must be 1-D tensor.")
  1808. if a.size < v.size:
  1809. a, v = v, a
  1810. v = v[::-1]
  1811. if mode not in ('same', 'full', 'valid'):
  1812. _raise_value_error("mode must be one of ['full', 'same', 'valid']")
  1813. if v.size > 1:
  1814. if mode == 'same':
  1815. pad_left = _to_tensor(_list_comprehensions(v.size // 2, 0.0, True))
  1816. pad_right = _to_tensor(_list_comprehensions(v.size - v.size // 2 - 1, 0.0, True))
  1817. a = P.Concat(axis=0)((pad_left, a, pad_right))
  1818. elif mode == 'full':
  1819. pad = _to_tensor(_list_comprehensions(v.size - 1, 0.0, True))
  1820. a = P.Concat(axis=0)((pad, a, pad))
  1821. a = a.reshape(1, 1, 1, a.size)
  1822. v = v.reshape(1, 1, 1, v.size)
  1823. _conv = P.Conv2D(out_channel=1, kernel_size=(1, v.size), pad_mode="valid")
  1824. return _conv(a, v).reshape(-1).astype(final_dtype)
  1825. def _handle_weights(weights, num_samples):
  1826. """Checks fweight and aweight in np.cov."""
  1827. weights = asarray_const(weights)
  1828. if not _check_is_int(weights.dtype):
  1829. _raise_type_error("weights must be integer")
  1830. weights = weights.astype("float32")
  1831. if weights.ndim > 1:
  1832. _raise_runtime_error("cannot handle multidimensional weights")
  1833. if weights.shape[0] != num_samples:
  1834. _raise_runtime_error("incompatible numbers of samples and weights")
  1835. return absolute(weights)
  1836. def _handle_inputs(cov_input, rowvar):
  1837. """Checks input arrays for np.cov."""
  1838. if not isinstance(cov_input, Tensor):
  1839. cov_input = asarray_const(cov_input)
  1840. if cov_input.ndim > 2:
  1841. _raise_value_error("input array has dimension more than 2.")
  1842. cov_input = cov_input.astype("float32")
  1843. cov_input = _expand(cov_input, 2)
  1844. if not rowvar and cov_input.shape[0] != 1:
  1845. cov_input = cov_input.T
  1846. return cov_input
  1847. def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None, dtype=None):
  1848. """
  1849. Estimates a covariance matrix, given data and weights.
  1850. Covariance indicates the level to which two variables vary together. If we examine
  1851. N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, then the covariance matrix
  1852. element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element
  1853. :math:`C_{ii}` is the variance of :math:`x_i`.
  1854. Note:
  1855. `fweights` and `aweights` must be all positive, in Numpy if negative values
  1856. are detected, a value error will be raised, in MindSpore we converts all values
  1857. to positive instead.
  1858. Args:
  1859. m (Union[Tensor, list, tuple]): A 1-D or 2-D tensor containing multiple variables
  1860. and observations. Each row of `m` represents a variable, and each column
  1861. represents a single observation of all those variables. Also see `rowvar` below.
  1862. y (Union[Tensor, list, tuple], optional): An additional set of variables
  1863. and observations. `y` has the same form as that of `m`.
  1864. rowvar(bool, optional): If `rowvar` is ``True`` (default), then each row represents
  1865. a variable, with observations in the columns. Otherwise, the relationship
  1866. is transposed: each column represents a variable, while the rows contain
  1867. observations.
  1868. bias (bool, optional): Default normalization (``False``) is by :math:`(N - 1)`, where
  1869. :math:`N` is the number of observations given (unbiased estimate). If bias is
  1870. ``True``, then normalization is by `N`. These values can be overridden by
  1871. using the keyword `ddof`.
  1872. ddof (int, optional): If not ``None``, the default value implied by `bias` is
  1873. overridden. Note that :math:`ddof=1` will return the unbiased estimate, even
  1874. if both fweights and aweights are specified, and :math:`ddof=0` will return
  1875. the simple average. See the notes for the details. The default value
  1876. is ``None``.
  1877. fweights (Union[Tensor, list, tuple], optional): 1-D tensor of integer
  1878. frequency weights; the number of times each observation vector should
  1879. be repeated.
  1880. aweights (Union[Tensor, list, tuple], optional): 1-D tensor of observation
  1881. vector weights. These relative weights are typically larger for observations
  1882. considered more important and smaller for observations considered less
  1883. important. If :math:`ddof=0` the tensor of weights can be used to assign probabilities
  1884. to observation vectors.
  1885. dtype (Union[:class:`mindspore.dtype`, str], optional): Data-type of the
  1886. result. By default, the return data-type will have mstype.float32 precision.
  1887. Returns:
  1888. Tensor, the covariance matrix of the variables.
  1889. Raises:
  1890. TypeError: if the inputs have types not specified above.
  1891. ValueError: if `m` and `y` have wrong dimensions.
  1892. RuntimeError: if `aweights` and `fweights` have dimensions > 2.
  1893. Supported Platforms:
  1894. ``Ascend`` ``GPU`` ``CPU``
  1895. Examples:
  1896. >>> import mindspore.numpy as np
  1897. >>> output = np.cov([[2., 3., 4., 5.], [0., 2., 3., 4.], [7., 8., 9., 10.]])
  1898. >>> print(output)
  1899. [[1.6666666 2.1666667 1.6666666]
  1900. [2.1666667 2.9166667 2.1666667]
  1901. [1.6666666 2.1666667 1.6666666]]
  1902. """
  1903. # This implementation was inspired by original numpy implementation.
  1904. m = _handle_inputs(m, rowvar)
  1905. if m.shape[0] == 0:
  1906. return empty((0, 0), dtype="float32")
  1907. if y is not None:
  1908. y = _handle_inputs(y, rowvar)
  1909. m = concatenate((m, y), axis=0)
  1910. if ddof is None:
  1911. if not bias:
  1912. ddof = 1
  1913. else:
  1914. ddof = 0
  1915. # Handle fweights and aweights
  1916. w = _handle_weights(fweights, m.shape[1]) if fweights is not None else None
  1917. if aweights is not None:
  1918. aweights = _handle_weights(aweights, m.shape[1])
  1919. w = aweights if w is None else w * aweights
  1920. avg = average(m, axis=1, weights=w)
  1921. # Determine the normalization
  1922. if w is None:
  1923. fact = m.shape[1] - ddof
  1924. else:
  1925. w_sum = _reduce_sum_default(w, -1)
  1926. if ddof == 0:
  1927. fact = w_sum
  1928. elif aweights is None:
  1929. fact = w_sum - ddof
  1930. else:
  1931. fact = w_sum - ddof * F.reduce_sum(w * aweights) / w_sum
  1932. m = m - F.expand_dims(avg, -1)
  1933. if w is None:
  1934. m_T = m.T
  1935. else:
  1936. m_T = (m * w).T
  1937. res = true_divide(dot(m, m_T), fact).squeeze()
  1938. if dtype is not None:
  1939. return res.astype(dtype)
  1940. return res
  1941. @constexpr
  1942. def _real_axes(ndim_orig, ndim_out, axes_orig):
  1943. """Returns the real axes to be reduced after performing broadcast"""
  1944. _diff = ndim_out - ndim_orig
  1945. axes = F.make_range(_diff)
  1946. axes_orig = map(functools.partial(operator.add, _diff), axes_orig)
  1947. return axes + tuple(axes_orig)
  1948. @constexpr
  1949. def _shape_reduced_keepdims(shape, axes):
  1950. """
  1951. Reduces dimensions corresponding to argument axes while
  1952. keeping the number of dimensions unchanged.
  1953. """
  1954. ndim_out = F.tuple_len(shape)
  1955. shape_out = [1]*ndim_out
  1956. for i in range(ndim_out):
  1957. if not i in axes:
  1958. shape_out[i] = shape[i]
  1959. return tuple(shape_out)
  1960. @constexpr
  1961. def _shape_reduced(shape, axes):
  1962. """Removes dimensions corresponding to argument axes"""
  1963. ndim_orig = F.tuple_len(shape)
  1964. ndim_out = ndim_orig - F.tuple_len(axes)
  1965. shape_out = [0]*ndim_out
  1966. idx_out = 0
  1967. for i in range(ndim_orig):
  1968. if not i in axes:
  1969. shape_out[idx_out] = shape[i]
  1970. idx_out += 1
  1971. return tuple(shape_out)
  1972. def _reduce(a, reduce_fn, cmp_fn=None, axis=None, keepdims=False, initial=None, where=True, dtype=None):
  1973. """
  1974. Applies comparison based on cmp_fn and reduction based on reduce_fn.
  1975. If cmp_fn is None, only reduction is performed.
  1976. """
  1977. _check_input_tensor(a)
  1978. shape = F.shape(a)
  1979. ndim = F.rank(a)
  1980. if dtype is None:
  1981. dtype = F.dtype(a)
  1982. axes = _check_axis_valid(axis, ndim)
  1983. if initial is not None:
  1984. if ((isinstance(initial, Tensor) and F.rank(initial) > 0) or
  1985. not isinstance(initial, (int, float, bool, Tensor))):
  1986. _raise_type_error('initial should be scalar')
  1987. if _is_shape_empty(shape):
  1988. if not axes:
  1989. return a
  1990. if keepdims:
  1991. shape_out = _shape_reduced_keepdims(shape, axes)
  1992. else:
  1993. shape_out = _shape_reduced(shape, axes)
  1994. if _is_shape_empty(shape_out):
  1995. return empty(shape_out, dtype)
  1996. if initial is None:
  1997. if cmp_fn is None:
  1998. initial = nan
  1999. else:
  2000. return _raise_value_error('initial value must be provided for zero-size arrays')
  2001. return full(shape_out, initial, dtype)
  2002. if initial is not None:
  2003. initial = full(shape, initial, dtype)
  2004. a = cmp_fn(a, initial)
  2005. if not axes:
  2006. return a.astype(dtype)
  2007. if isinstance(where, Tensor):
  2008. if initial is None:
  2009. return _raise_value_error('initial value must be provided for where masks')
  2010. ndim_orig = F.rank(a)
  2011. a = where_(where, a, initial)
  2012. axes = _real_axes(ndim_orig, F.rank(a), axes)
  2013. return reduce_fn(a, axes).astype(dtype)
  2014. def _reduce_nansum(x, axis, keepdims=False):
  2015. """Computes reduce sum treating NaNs as zeros."""
  2016. x = F.select(_isnan(x), zeros(F.shape(x), F.dtype(x)), x)
  2017. if keepdims:
  2018. return _reduce_sum_keepdims(x, axis)
  2019. return _reduce_sum_default(x, axis)
  2020. def nansum(a, axis=None, dtype=None, keepdims=False):
  2021. """
  2022. Returns the sum of array elements over a given axis treating Not a Numbers (NaNs) as zero.
  2023. Note:
  2024. Numpy arguments `out` is not supported.
  2025. Args:
  2026. a (Union[int, float, bool, list, tuple, Tensor]): Array containing numbers
  2027. whose sum is desired. If `a` is not an array, a conversion is attempted.
  2028. axis (Union[int, tuple of int, None], optional): Axis or axes along which the sum is
  2029. computed. The default is to compute the sum of the flattened array.
  2030. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2031. output Tensor.
  2032. keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
  2033. are reduced are left in the result as dimensions with size one. With this option,
  2034. the result will broadcast correctly against the original `a`.
  2035. Returns:
  2036. Tensor.
  2037. Raises:
  2038. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  2039. if the axes contain duplicates.
  2040. Supported Platforms:
  2041. ``GPU`` ``CPU``
  2042. Examples:
  2043. >>> import mindspore.numpy as np
  2044. >>> a = np.array([[1, 1], [1, np.nan]])
  2045. >>> output = np.nansum(a)
  2046. >>> print(output)
  2047. 3.0
  2048. >>> output = np.nansum(a, axis=0)
  2049. >>> print(output)
  2050. [2. 1.]
  2051. """
  2052. a = _to_tensor(a)
  2053. nan_mask = _isnan(a)
  2054. a = F.select(nan_mask, zeros(F.shape(a), F.dtype(a)), a)
  2055. if dtype is None and _get_device() == 'CPU' and not _check_is_float(F.dtype(a)):
  2056. # F.reduce_sum only supports float on CPU
  2057. dtype = F.dtype(a)
  2058. a = F.cast(a, mstype.float32)
  2059. return _reduce(a, functools.partial(_reduce_nansum, keepdims=keepdims), axis=axis,
  2060. keepdims=keepdims, dtype=dtype)
  2061. def _count_nonnan(a, axis, keepdims=False):
  2062. """Counts the number of elements excluding NaNs."""
  2063. nonnan_mask = F.select(_isnan(a), zeros(F.shape(a), F.dtype(a)), ones(F.shape(a), F.dtype(a)))
  2064. if keepdims:
  2065. return _reduce_sum_keepdims(nonnan_mask, axis)
  2066. return _reduce_sum_default(nonnan_mask, axis)
  2067. def nanmean(a, axis=None, dtype=None, keepdims=False):
  2068. """
  2069. Computes the arithmetic mean along the specified axis, ignoring NaNs.
  2070. Returns the average of the array elements. The average is taken over the flattened
  2071. array by default, otherwise over the specified axis. float32 intermediate and
  2072. return values are used for integer inputs.
  2073. Note:
  2074. Numpy arguments `out` is not supported.
  2075. Args:
  2076. a (Union[int, float, bool, list, tuple, Tensor]): Array containing numbers
  2077. whose mean is desired. If `a` is not an array, a conversion is attempted.
  2078. axis (Union[int, tuple of int, None], optional): Axis or axes along which the mean is
  2079. computed. The default is to compute the mean of the flattened array.
  2080. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2081. output Tensor.
  2082. keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
  2083. are reduced are left in the result as dimensions with size one. With this option,
  2084. the result will broadcast correctly against the original `a`.
  2085. Returns:
  2086. Tensor.
  2087. Raises:
  2088. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  2089. if the axes contain duplicates.
  2090. Supported Platforms:
  2091. ``GPU`` ``CPU``
  2092. Examples:
  2093. >>> import mindspore.numpy as np
  2094. >>> a = np.array([[1, np.nan], [3, 4]])
  2095. >>> output = np.nanmean(a)
  2096. >>> print(output)
  2097. 2.6666667
  2098. >>> output = np.nanmean(a, axis=0)
  2099. >>> print(output)
  2100. [2. 4.]
  2101. >>> output = np.nanmean(a, axis=1)
  2102. >>> print(output)
  2103. [1. 3.5]
  2104. """
  2105. a = _to_tensor(a)
  2106. axis = _check_axis_valid(axis, F.rank(a))
  2107. sum_a = nansum(a, axis=axis, dtype=dtype, keepdims=keepdims)
  2108. return F.tensor_div(sum_a, _count_nonnan(a, axis, keepdims))
  2109. def _nanvar(a, axis, ddof=0, keepdims=False):
  2110. """Computes nanvar without applying keyword arguments."""
  2111. mean_a = nanmean(a, axis=axis, keepdims=True)
  2112. pow_a = F.tensor_pow(F.tensor_sub(a, mean_a), 2)
  2113. sum_a = _reduce_nansum(pow_a, axis, keepdims)
  2114. count = _count_nonnan(a, axis, keepdims)
  2115. return F.tensor_div(sum_a, F.tensor_sub(count, ddof))
  2116. def nanvar(a, axis=None, dtype=None, ddof=0, keepdims=False):
  2117. """
  2118. Computes the variance along the specified axis, while ignoring NaNs.
  2119. Returns the variance of the array elements, a measure of the spread of a distribution. The
  2120. variance is computed for the flattened array by default, otherwise over the specified axis.
  2121. Note:
  2122. Numpy arguments `out` is not supported.
  2123. On GPU, the supported dtypes are np.float16, and np.float32.
  2124. Args:
  2125. a (Union[int, float, bool, list, tuple, Tensor]): Array containing numbers
  2126. whose variance is desired. If `a` is not an array, a conversion is attempted.
  2127. axis (Union[int, tuple of int, None], optional): Axis or axes along which the variance is
  2128. computed. The default is to compute the variance of the flattened array.
  2129. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2130. output Tensor.
  2131. ddof (int, optional): “Delta Degrees of Freedom”: the divisor used in the calculation is
  2132. ``N - ddof``, where `N` represents the number of non-NaN elements. By default `ddof`
  2133. is zero.
  2134. keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
  2135. are reduced are left in the result as dimensions with size one. With this option,
  2136. the result will broadcast correctly against the original `a`.
  2137. Returns:
  2138. Tensor.
  2139. Raises:
  2140. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  2141. if the axes contain duplicates.
  2142. Supported Platforms:
  2143. ``GPU`` ``CPU``
  2144. Examples:
  2145. >>> import mindspore.numpy as np
  2146. >>> a = np.array([[1, np.nan], [3, 4]])
  2147. >>> output = np.nanstd(a)
  2148. >>> print(output)
  2149. 1.2472192
  2150. >>> output = np.nanstd(a, axis=0)
  2151. >>> print(output)
  2152. [1. 0.]
  2153. >>> output = np.nanstd(a, axis=1)
  2154. >>> print(output)
  2155. [0. 0.5]
  2156. """
  2157. return _reduce(a, functools.partial(_nanvar, ddof=ddof, keepdims=keepdims), axis=axis,
  2158. keepdims=keepdims, dtype=dtype)
  2159. def nanstd(a, axis=None, dtype=None, ddof=0, keepdims=False):
  2160. """
  2161. Computes the standard deviation along the specified axis, while ignoring NaNs.
  2162. Returns the standard deviation, a measure of the spread of a distribution, of the non-NaN
  2163. array elements. The standard deviation is computed for the flattened array by default,
  2164. otherwise over the specified axis.
  2165. Note:
  2166. Numpy arguments `out` is not supported.
  2167. On GPU, the supported dtypes are np.float16, and np.float32.
  2168. Args:
  2169. a (Union[int, float, bool, list, tuple, Tensor]): Calculates the standard deviation of the non-NaN values.
  2170. axis (Union[int, tuple of int, None], optional): Axis or axes along which the standard
  2171. deviation is computed. The default is to compute the standard deviation of the
  2172. flattened array.
  2173. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2174. output Tensor.
  2175. ddof (int, optional): “Delta Degrees of Freedom”: the divisor used in the calculation is
  2176. ``N - ddof``, where `N` represents the number of non-NaN elements. By default `ddof`
  2177. is zero.
  2178. keepdims (boolean, optional): defaults to False. If this is set to True, the axes which
  2179. are reduced are left in the result as dimensions with size one. With this option,
  2180. the result will broadcast correctly against the original `a`.
  2181. Returns:
  2182. Tensor.
  2183. Raises:
  2184. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  2185. if the axes contain duplicates.
  2186. Supported Platforms:
  2187. ``GPU`` ``CPU``
  2188. Examples:
  2189. >>> import mindspore.numpy as np
  2190. >>> a = np.array([[1, np.nan], [3, 4]])
  2191. >>> output = np.nanvar(a)
  2192. >>> print(output)
  2193. 1.5555557
  2194. >>> output = np.nanvar(a, axis=0)
  2195. >>> print(output)
  2196. [1. 0.]
  2197. >>> output = np.nanvar(a, axis=1)
  2198. >>> print(output)
  2199. [0. 0.25]
  2200. """
  2201. return _reduce(a, lambda a, axis: F.sqrt(_nanvar(a, axis, ddof=ddof, keepdims=keepdims)),
  2202. axis=axis, keepdims=keepdims, dtype=dtype)
  2203. def exp2(x, dtype=None):
  2204. """
  2205. Calculates ``2**p`` for all p in the input array.
  2206. Note:
  2207. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2208. not supported.
  2209. On GPU, the supported dtypes are np.float16, and np.float32.
  2210. Args:
  2211. x (Tensor): input values.
  2212. dtype (:class:`mindspore.dtype`, optional): defaults to :class:`None`. Overrides the dtype of the
  2213. output Tensor.
  2214. Returns:
  2215. Tensor or scalar, element-wise 2 to the power `x`.
  2216. Supported Platforms:
  2217. ``Ascend`` ``GPU`` ``CPU``
  2218. Examples:
  2219. >>> import mindspore.numpy as np
  2220. >>> x = np.array([2, 3]).astype(np.float32)
  2221. >>> output = np.exp2(x)
  2222. >>> print(output)
  2223. [4. 8.]
  2224. """
  2225. return _apply_tensor_op(lambda x: F.tensor_pow(2, x), x, dtype=dtype)
  2226. def kron(a, b):
  2227. """
  2228. Kronecker product of two arrays.
  2229. Computes the Kronecker product, a composite array made of blocks of the second
  2230. array scaled by the first.
  2231. Args:
  2232. a (Union[int, float, bool, list, tuple, Tensor]): input values.
  2233. b (Union[int, float, bool, list, tuple, Tensor]): input values.
  2234. Returns:
  2235. Tensor.
  2236. Supported Platforms:
  2237. ``Ascend`` ``GPU`` ``CPU``
  2238. Examples:
  2239. >>> import mindspore.numpy as np
  2240. >>> output = np.kron([1,10,100], [5,6,7])
  2241. >>> print(output)
  2242. [ 5 6 7 50 60 70 500 600 700]
  2243. >>> output = np.kron([5,6,7], [1,10,100])
  2244. >>> print(output)
  2245. [ 5 50 500 6 60 600 7 70 700]
  2246. >>> output = np.kron(np.eye(2), np.ones((2,2)))
  2247. >>> print(output)
  2248. [[1. 1. 0. 0.]
  2249. [1. 1. 0. 0.]
  2250. [0. 0. 1. 1.]
  2251. [0. 0. 1. 1.]]
  2252. """
  2253. a, b = _to_tensor(a, b)
  2254. ndim = _max(F.rank(a), F.rank(b))
  2255. if ndim == 0:
  2256. return F.tensor_mul(a, b)
  2257. a = _expand(a, ndim)
  2258. b = _expand(b, ndim)
  2259. shape_a = F.shape(a)
  2260. shape_b = F.shape(b)
  2261. # scales a by the shape of b
  2262. kron_shape = _seq_prod(shape_a, shape_b)
  2263. a = F.reshape(a, _add_unit_axes(shape_a, 2*ndim, True))
  2264. a = F.tile(a, _add_unit_axes(shape_b, 2*ndim, False))
  2265. a = moveaxis(a, F.make_range(ndim, 2*ndim), F.make_range(1, 2*ndim, 2))
  2266. a = F.reshape(a, kron_shape)
  2267. # scales b by the shape of a
  2268. b = F.tile(b, shape_a)
  2269. return F.tensor_mul(a, b)
  2270. def cross(a, b, axisa=- 1, axisb=- 1, axisc=- 1, axis=None):
  2271. """
  2272. Returns the cross product of two (arrays of) vectors.
  2273. The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular to both
  2274. `a` and `b`. If `a` and `b` are arrays of vectors, the vectors are defined by the
  2275. last axis of `a` and `b` by default, and these axes can have dimensions 2 or 3.
  2276. Where the dimension of either `a` or `b` is 2, the third component of the input
  2277. vector is assumed to be zero and the cross product calculated accordingly. In cases
  2278. where both input vectors have dimension 2, the z-component of the cross product is
  2279. returned.
  2280. Args:
  2281. a (Union[int, float, bool, list, tuple, Tensor]): Components of the first vector(s).
  2282. b (Union[int, float, bool, list, tuple, Tensor]): Components of the second vector(s).
  2283. axisa (int, optional): Axis of `a` that defines the vector(s). By default, the last
  2284. axis.
  2285. axisb (int, optional): Axis of `b` that defines the vector(s). By default, the last
  2286. axis.
  2287. axisc (int, optional): Axis of `c` containing the cross product vector(s). Ignored
  2288. if both input vectors have dimension 2, as the return is scalar. By default,
  2289. the last axis.
  2290. axis (int, optional): If defined, the axis of `a`, `b` and `c` that defines the
  2291. vector(s) and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
  2292. Returns:
  2293. Tensor, vector cross product(s).
  2294. Raises:
  2295. ValueError: when the dimensions of the vector(s) in `a` and/or `b` equal 2 or 3.
  2296. Supported Platforms:
  2297. ``Ascend`` ``GPU`` ``CPU``
  2298. Examples:
  2299. >>> import mindspore.numpy as np
  2300. >>> x = np.array([[1,2,3], [4,5,6]])
  2301. >>> y = np.array([[4,5,6], [1,2,3]])
  2302. >>> output = np.cross(x, y)
  2303. >>> print(output)
  2304. [[-3 6 -3]
  2305. [ 3 -6 3]]
  2306. >>> output = np.cross(x, y, axisc=0)
  2307. [[-3 3]
  2308. [ 6 -6]
  2309. [-3 3]]
  2310. """
  2311. a, b = _to_tensor(a, b)
  2312. if axis is not None:
  2313. axisa, axisb, axisc = axis, axis, axis
  2314. _check_axis_in_range(axisa, F.rank(a))
  2315. _check_axis_in_range(axisb, F.rank(b))
  2316. a = moveaxis(a, axisa, -1)
  2317. b = moveaxis(b, axisb, -1)
  2318. shape_a = F.shape(a)
  2319. shape_b = F.shape(b)
  2320. if F.shape(a)[-1] not in (2, 3) or F.shape(b)[-1] not in (2, 3):
  2321. _raise_value_error('incompatible dimensions for cross product (dimension must be 2 or 3)')
  2322. a_has_z = shape_a[-1] == 3
  2323. b_has_z = shape_b[-1] == 3
  2324. shape_out = _infer_out_shape(shape_a[:-1], shape_b[:-1])
  2325. if a_has_z or b_has_z:
  2326. shape_out += (3,)
  2327. _check_axis_in_range(axisc, len(shape_out))
  2328. dtype = _promote(F.dtype(a), F.dtype(b))
  2329. if _get_device() == 'CPU':
  2330. # F.tensor_slice only supports float on CPU
  2331. if not _check_is_float(F.dtype(a)):
  2332. a = F.cast(a, mstype.float32)
  2333. if not _check_is_float(F.dtype(b)):
  2334. b = F.cast(b, mstype.float32)
  2335. a_slice_start = _list_comprehensions(F.rank(a) - 1, 0, True)
  2336. a_slice_size = shape_a[:-1] + (1,)
  2337. b_slice_start = _list_comprehensions(F.rank(b) - 1, 0, True)
  2338. b_slice_size = shape_b[:-1] + (1,)
  2339. def _get_slice_product(idx_a, idx_b):
  2340. return multiply(F.tensor_slice(a, a_slice_start + (idx_a,), a_slice_size),
  2341. F.tensor_slice(b, b_slice_start + (idx_b,), b_slice_size))
  2342. cz = F.tensor_sub(_get_slice_product(0, 1), _get_slice_product(1, 0)) # ax*by - ay*bx
  2343. if not a_has_z and not b_has_z:
  2344. return F.reshape(cz, shape_out).astype(dtype)
  2345. if a_has_z and b_has_z:
  2346. cx = F.tensor_sub(_get_slice_product(1, 2), _get_slice_product(2, 1)) # ay*bz - az*by
  2347. cy = F.tensor_sub(_get_slice_product(2, 0), _get_slice_product(0, 2)) # az*bx - ax*bz
  2348. elif a_has_z:
  2349. cx = F.neg_tensor(_get_slice_product(2, 1)) # -az*by
  2350. cy = _get_slice_product(0, 2) # az*bx
  2351. else: # b_has_z
  2352. cx = _get_slice_product(1, 2) # ay*bz
  2353. cy = F.neg_tensor(_get_slice_product(0, 2)) # -ax*bz
  2354. res = _concat((cx, cy, cz)).reshape(shape_out)
  2355. return moveaxis(res, -1, axisc).astype(dtype)
  2356. def ceil(x, dtype=None):
  2357. """
  2358. Returns the ceiling of the input, element-wise.
  2359. The ceil of the scalar `x` is the smallest integer `i`, such that ``i >= x``.
  2360. Note:
  2361. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2362. not supported.
  2363. On GPU, the supported dtypes are np.float16, and np.float32.
  2364. Args:
  2365. x (Tensor): input values.
  2366. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2367. output Tensor.
  2368. Returns:
  2369. Tensor or scalar, the floor of each element in `x`. This is a scalar if `x` is a scalar.
  2370. Supported Platforms:
  2371. ``Ascend`` ``GPU`` ``CPU``
  2372. Examples:
  2373. >>> import mindspore.numpy as np
  2374. >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
  2375. >>> output = np.ceil(a)
  2376. >>> print(output)
  2377. [-1. -1. -0. 1. 2. 2. 2.]
  2378. """
  2379. return _apply_tensor_op(lambda x: F.neg_tensor(F.floor(F.neg_tensor(x.astype(mstype.float32)))),
  2380. x, dtype=dtype)
  2381. def _infer_shape_rem(shape1, shape2, ndim1, ndim2, transpose_b):
  2382. """Infers the shape of the last two dimensions after performing matmul."""
  2383. shape_rem = ()
  2384. if ndim1 >= 2:
  2385. shape_rem += (shape1[-2],)
  2386. if transpose_b:
  2387. if ndim2 >= 2:
  2388. shape_rem += (shape2[-2],)
  2389. else:
  2390. if ndim1 >= 1:
  2391. shape_rem += (shape2[-1],)
  2392. return shape_rem
  2393. def positive(a, dtype=None):
  2394. """
  2395. Numerical positive, element-wise.
  2396. Note:
  2397. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2398. not supported.
  2399. Args:
  2400. a (Tensor): Input tensor.
  2401. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2402. output Tensor.
  2403. Returns:
  2404. Tensor.
  2405. Supported Platforms:
  2406. ``Ascend`` ``GPU`` ``CPU``
  2407. Examples:
  2408. >>> import mindspore.numpy as np
  2409. >>> a = np.asarray([1, -1]).astype('float32')
  2410. >>> output = np.positive(a)
  2411. >>> print(output)
  2412. [1. -1.]
  2413. """
  2414. _check_input_tensor(a)
  2415. neg_tensor = F.neg_tensor(a)
  2416. return _apply_tensor_op(F.neg_tensor, neg_tensor, dtype=dtype)
  2417. def negative(a, dtype=None):
  2418. """
  2419. Numerical negative, element-wise.
  2420. Note:
  2421. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2422. not supported.
  2423. Args:
  2424. a (Tensor): Input tensor.
  2425. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2426. output Tensor.
  2427. Returns:
  2428. Tensor.
  2429. Supported Platforms:
  2430. ``Ascend`` ``GPU`` ``CPU``
  2431. Examples:
  2432. >>> import mindspore.numpy as np
  2433. >>> a = np.asarray([1, -1]).astype('float32')
  2434. >>> output = np.negative(a)
  2435. >>> print(output)
  2436. [-1. 1.]
  2437. """
  2438. return _apply_tensor_op(F.neg_tensor, a, dtype=dtype)
  2439. def cumsum(a, axis=None, dtype=None):
  2440. """
  2441. Returns the cumulative sum of the elements along a given axis.
  2442. Note:
  2443. If ``a.dtype`` is :class:`int8`, :class:`int16` or :class:`bool`, the result
  2444. `dtype` will be elevated to :class:`int32`.
  2445. Args:
  2446. a (Tensor): Input tensor.
  2447. axis (int, optional): Axis along which the cumulative sum is computed. The
  2448. default (None) is to compute the cumsum over the flattened array.
  2449. dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as `a`,
  2450. unless `a` has an integer dtype with a precision less than that of the
  2451. default platform integer. In that case, the default platform integer
  2452. is used.
  2453. Returns:
  2454. Tensor.
  2455. Raises:
  2456. TypeError: If input arguments have types not specified above.
  2457. ValueError: If axis is out of range.
  2458. Supported Platforms:
  2459. ``Ascend`` ``GPU`` ``CPU``
  2460. Examples:
  2461. >>> import mindspore.numpy as np
  2462. >>> output = np.cumsum(np.ones((3,3)), axis=0)
  2463. >>> print(output)
  2464. [[1. 1. 1.]
  2465. [2. 2. 2.]
  2466. [3. 3. 3.]]
  2467. """
  2468. _check_input_tensor(a)
  2469. original_dtype = F.dtype(a)
  2470. # If original tensor is int, and has precision less then int32, convert to int32
  2471. if _check_same_type(original_dtype, mstype.bool_) or \
  2472. _check_same_type(original_dtype, mstype.int8) or \
  2473. _check_same_type(original_dtype, mstype.int16):
  2474. original_dtype = mstype.int32
  2475. a = a.astype(mstype.float32)
  2476. if axis is None:
  2477. a = a.ravel()
  2478. axis = 0
  2479. _check_axis_in_range(axis, a.ndim)
  2480. if dtype is not None and not _check_same_type(original_dtype, dtype):
  2481. return _cumsum_default(a, axis).astype(dtype, copy=False)
  2482. return _cumsum_default(a, axis).astype(original_dtype, copy=False)
  2483. def nancumsum(a, axis=None, dtype=None):
  2484. """
  2485. Return the cumulative sum of array elements over a given axis treating Not a Numbers (NaNs)
  2486. as zero. The cumulative sum does not change when NaNs are encountered and leading NaNs are
  2487. replaced by zeros.
  2488. Zeros are returned for slices that are all-NaN or empty.
  2489. Note:
  2490. If ``a.dtype`` is :class:`int8`, :class:`int16` or :class:`bool`, the result
  2491. `dtype` will be elevated to :class:`int32`.
  2492. Args:
  2493. a (Tensor): Input tensor.
  2494. axis (int, optional): Axis along which the cumulative sum is computed. The
  2495. default (None) is to compute the cumsum over the flattened array.
  2496. dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as `a`,
  2497. unless `a` has an integer dtype with a precision less than that of the
  2498. default platform integer. In that case, the default platform integer
  2499. is used.
  2500. Returns:
  2501. Tensor.
  2502. Raises:
  2503. TypeError: If input arguments have types not specified above.
  2504. ValueError: If axis is out of range.
  2505. Supported Platforms:
  2506. ``GPU`` ``CPU``
  2507. Examples:
  2508. >>> import mindspore.numpy as np
  2509. >>> a = np.array([[1, 2], [3, np.nan]])
  2510. >>> output = np.nancumsum(a)
  2511. >>> print(output)
  2512. [1. 3. 6. 6.]
  2513. >>> output = np.nancumsum(a, axis=0)
  2514. >>> print(output)
  2515. [[1. 2.]
  2516. [4. 2.]]
  2517. >>> output = np.nancumsum(a, axis=1)
  2518. >>> print(output)
  2519. [[1. 3.]
  2520. [3. 3.]]
  2521. """
  2522. a = F.select(_isnan(a), zeros(F.shape(a), F.dtype(a)), a)
  2523. return cumsum(a, axis=axis, dtype=dtype)
  2524. def cbrt(x, dtype=None):
  2525. """
  2526. Returns the cube-root of a tensor, element-wise.
  2527. Note:
  2528. Numpy arguments `casting`, `order`, `subok`, `signature`, and `extobj` are
  2529. not supported.
  2530. Args:
  2531. x (Tensor): Input tensor.
  2532. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2533. output Tensor.
  2534. Returns:
  2535. Tensor.
  2536. Supported Platforms:
  2537. ``Ascend`` ``GPU`` ``CPU``
  2538. Examples:
  2539. >>> import mindspore.numpy as np
  2540. >>> a = np.asarray([1, -1, 3, -8, 64])
  2541. >>> output = np.cbrt(a)
  2542. >>> print(output)
  2543. [ 1. -1. 1.4422495 -2. 4. ]
  2544. """
  2545. def _cbrt(x):
  2546. compute_type = promote_types(x.dtype, "float32")
  2547. x = x.astype(compute_type)
  2548. # TODO: use P.Sign() once gpu support is added
  2549. abs_x = F.absolute(x)
  2550. sign_x = abs_x / x
  2551. return sign_x * F.tensor_pow(abs_x, 1. / 3.)
  2552. return _apply_tensor_op(_cbrt, x, dtype=dtype)
  2553. def log1p(x, dtype=None):
  2554. """
  2555. Returns the natural logarithm of one plus the input array, element-wise.
  2556. Calculates ``log(1 + x)``.
  2557. Note:
  2558. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2559. not supported.
  2560. Args:
  2561. x (Tensor): Input array.
  2562. dtype (:class:`mindspore.dtype`): Default: :class:`None`. Overrides the dtype of the
  2563. output Tensor.
  2564. Returns:
  2565. Tensor or scalar. This is a scalar if `x` is a scalar.
  2566. Supported Platforms:
  2567. ``Ascend`` ``GPU`` ``CPU``
  2568. Examples:
  2569. >>> import mindspore.numpy as np
  2570. >>> x = np.array([1, 2, 3]).astype('float16')
  2571. >>> output = np.log1p(x)
  2572. >>> print(output)
  2573. [0.6934 1.099 1.387 ]
  2574. """
  2575. return _apply_tensor_op(lambda x: F.log(x + 1), x, dtype=dtype)
  2576. def logaddexp(x1, x2, dtype=None):
  2577. """
  2578. Logarithm of the sum of exponentiations of the inputs.
  2579. Calculates ``log(exp(x1) + exp(x2))``. This function is useful in statistics where the
  2580. calculated probabilities of events may be so small as to exceed the range of normal
  2581. floating point numbers. In such cases the logarithm of the calculated probability is
  2582. stored. This function allows adding probabilities stored in such a fashion.
  2583. Note:
  2584. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2585. not supported.
  2586. Args:
  2587. x1 (Tensor): Input array.
  2588. x2 (Tensor): Input array. If ``x1.shape != x2.shape``, they must be broadcastable to
  2589. a common shape (which becomes the shape of the output).
  2590. dtype (:class:`mindspore.dtype`): Default: :class:`None`. Overrides the dtype of the
  2591. output Tensor.
  2592. Returns:
  2593. Tensor or scalar. This is a scalar if both `x1` and `x2` are scalars.
  2594. Supported Platforms:
  2595. ``Ascend`` ``GPU`` ``CPU``
  2596. Examples:
  2597. >>> import mindspore.numpy as np
  2598. >>> x1 = np.array([1, 2, 3]).astype('float16')
  2599. >>> x2 = np.array(2).astype('float16')
  2600. >>> output = np.logaddexp(x1, x2)
  2601. >>> print(output)
  2602. [2.312 2.693 3.312]
  2603. """
  2604. def _logaddexp(x1, x2):
  2605. return F.log(F.tensor_add(F.tensor_exp(x1), F.tensor_exp(x2)))
  2606. return _apply_tensor_op(_logaddexp, x1, x2, dtype=dtype)
  2607. def log2(x, dtype=None):
  2608. """
  2609. Base-2 logarithm of `x`.
  2610. Note:
  2611. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2612. not supported.
  2613. Args:
  2614. x (Tensor): Input tensor.
  2615. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2616. output Tensor.
  2617. Returns:
  2618. Tensor or scalar. This is a scalar if `x` is a scalar.
  2619. Supported Platforms:
  2620. ``Ascend`` ``GPU`` ``CPU``
  2621. Examples:
  2622. >>> import mindspore.numpy as np
  2623. >>> x = np.array([2, 4, 8]).astype('float16')
  2624. >>> output = np.log2(x)
  2625. >>> print(output)
  2626. [1. 2. 3.]
  2627. """
  2628. tensor_2 = _make_tensor(2, x.dtype)
  2629. def _log2(x):
  2630. return F.log(x) / F.log(tensor_2)
  2631. return _apply_tensor_op(_log2, x, dtype=dtype)
  2632. def logaddexp2(x1, x2, dtype=None):
  2633. """
  2634. Logarithm of the sum of exponentiations of the inputs in base of 2.
  2635. Calculates ``log2(2**x1 + 2**x2)``.
  2636. This function is useful in machine learning when the calculated probabilities of events
  2637. may be so small as to exceed the range of normal floating point numbers.
  2638. In such cases the base-2 logarithm of the calculated probability can be used instead.
  2639. This function allows adding probabilities stored in such a fashion.
  2640. Note:
  2641. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2642. not supported.
  2643. Args:
  2644. x1 (Tensor): Input tensor.
  2645. x2 (Tensor): Input tensor. If ``x1.shape != x2.shape``, they must be broadcastable to
  2646. a common shape (which becomes the shape of the output).
  2647. dtype (:class:`mindspore.dtype`): Default: :class:`None`. Overrides the dtype of the
  2648. output Tensor.
  2649. Returns:
  2650. Tensor or scalar. This is a scalar if both `x1` and `x2` are scalars.
  2651. Supported Platforms:
  2652. ``Ascend`` ``GPU`` ``CPU``
  2653. Examples:
  2654. >>> import mindspore.numpy as np
  2655. >>> x1 = np.array([2, 4, 8]).astype('float16')
  2656. >>> x2 = np.array(2).astype('float16')
  2657. >>> output = np.logaddexp2(x1, x2)
  2658. >>> print(output)
  2659. [3. 4.32 8.02]
  2660. """
  2661. _check_input_tensor(x1, x2)
  2662. add_exp = F.tensor_add(F.tensor_pow(2, x1), F.tensor_pow(2, x2))
  2663. return log2(add_exp, dtype=dtype)
  2664. def log10(x, dtype=None):
  2665. """
  2666. Base-10 logarithm of `x`.
  2667. Note:
  2668. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2669. not supported.
  2670. Args:
  2671. x (Tensor): Input tensor.
  2672. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2673. output Tensor.
  2674. Returns:
  2675. Tensor or scalar. This is a scalar if `x` is a scalar.
  2676. Supported Platforms:
  2677. ``Ascend`` ``GPU`` ``CPU``
  2678. Examples:
  2679. >>> import mindspore.numpy as np
  2680. >>> x = np.array([10, 100, 1000]).astype('float16')
  2681. >>> output = np.log10(x)
  2682. >>> print(output)
  2683. [1. 2. 3.]
  2684. """
  2685. tensor_10 = _make_tensor(10, x.dtype)
  2686. def _log10(x):
  2687. return F.log(x) / F.log(tensor_10)
  2688. return _apply_tensor_op(_log10, x, dtype=dtype)
  2689. def _cast_type_for_trigonometric(x):
  2690. _check_input_tensor(x)
  2691. if x.dtype != mstype.float16 or x.dtype != mstype.float32 or x.dtype != mstype.float64:
  2692. dtype = _promote_for_trigonometric(x.dtype)
  2693. x = F.cast(x, dtype)
  2694. return x
  2695. def sin(x, dtype=None):
  2696. """
  2697. Trigonometric sine, element-wise.
  2698. Note:
  2699. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2700. not supported.
  2701. Args:
  2702. x (Tensor): Input tensor.
  2703. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2704. output Tensor.
  2705. Returns:
  2706. Tensor or scalar. This is a scalar if `x` is a scalar.
  2707. Supported Platforms:
  2708. ``Ascend`` ``GPU`` ``CPU``
  2709. Examples:
  2710. >>> import mindspore.numpy as np
  2711. >>> x = np.array([-5, -1, 0, 2, 4, 100]).astype('float32')
  2712. >>> output = np.sin(x)
  2713. >>> print(output)
  2714. [ 0.9589243 -0.84147096 0. 0.9092974 -0.7568025 -0.50636566]
  2715. """
  2716. x = _cast_type_for_trigonometric(x)
  2717. return _apply_tensor_op(F.sin, x, dtype=dtype)
  2718. def cos(x, dtype=None):
  2719. """
  2720. Cosine element-wise.
  2721. Note:
  2722. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2723. not supported.
  2724. Args:
  2725. x (Tensor): Input tensor.
  2726. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2727. output Tensor.
  2728. Returns:
  2729. Tensor or scalar. This is a scalar if `x` is a scalar.
  2730. Supported Platforms:
  2731. ``Ascend`` ``GPU`` ``CPU``
  2732. Examples:
  2733. >>> import mindspore.numpy as np
  2734. >>> x = np.arange(5).astype('float32')
  2735. >>> print(np.cos(x))
  2736. [ 1. 0.5403023 -0.41614684 -0.9899925 -0.6536436 ]
  2737. """
  2738. x = _cast_type_for_trigonometric(x)
  2739. return _apply_tensor_op(F.cos, x, dtype=dtype)
  2740. def tan(x, dtype=None):
  2741. """
  2742. Computes tangent element-wise.
  2743. Equivalent to :math:`np.sin(x)/np.cos(x)` element-wise.
  2744. Note:
  2745. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2746. not supported.
  2747. Args:
  2748. x (Tensor): Input tensor.
  2749. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2750. output Tensor.
  2751. Returns:
  2752. Tensor or scalar. This is a scalar if `x` is a scalar.
  2753. Raises:
  2754. TypeError: If the input is not a tensor or is :class:`tensor.dtype` is :class:`mindsproe.float64`.
  2755. Supported Platforms:
  2756. ``Ascend`` ``CPU``
  2757. Examples:
  2758. >>> import mindspore.numpy as np
  2759. >>> x = np.array([-5, -1, 0, 2, 4, 100]).astype('float32')
  2760. >>> print(np.tan(x))
  2761. [ 3.380515 -1.5574077 0. -2.1850398 1.1578213 -0.58721393]
  2762. """
  2763. x = _cast_type_for_trigonometric(x)
  2764. return _apply_tensor_op(F.tan, x, dtype=dtype)
  2765. def arcsin(x, dtype=None):
  2766. """
  2767. Inverse sine, element-wise.
  2768. Note:
  2769. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2770. not supported.
  2771. Args:
  2772. x (Tensor): Input tensor. y-coordinate on the unit circle.
  2773. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2774. output Tensor.
  2775. Returns:
  2776. Tensor.
  2777. Raises:
  2778. TypeError: If the input is not a tensor.
  2779. Supported Platforms:
  2780. ``Ascend`` ``GPU`` ``CPU``
  2781. Examples:
  2782. >>> import mindspore.numpy as np
  2783. >>> x = np.asarray([1, -1], np.float32)
  2784. >>> output = np.arcsin(x)
  2785. >>> print(output)
  2786. [ 1.5707964 -1.5707964]
  2787. """
  2788. x = _cast_type_for_trigonometric(x)
  2789. return _apply_tensor_op(F.asin, x, dtype=dtype)
  2790. def arccos(x, dtype=None):
  2791. """
  2792. Trigonometric inverse cosine, element-wise.
  2793. Note:
  2794. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2795. not supported.
  2796. Args:
  2797. x (Tensor): Input tensor. x-coordinate on the unit circle.
  2798. For real arguments, the domain is :math:`[-1, 1]`.
  2799. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2800. output Tensor.
  2801. Returns:
  2802. Tensor.
  2803. Raises:
  2804. TypeError: If the input is not a tensor.
  2805. Supported Platforms:
  2806. ``Ascend`` ``GPU`` ``CPU``
  2807. Examples:
  2808. >>> import mindspore.numpy as np
  2809. >>> x = np.asarray([1, -1], np.float32)
  2810. >>> output = np.arccos(x)
  2811. >>> print(output)
  2812. [0. 3.1415927]
  2813. """
  2814. x = _cast_type_for_trigonometric(x)
  2815. return _apply_tensor_op(F.acos, x, dtype=dtype)
  2816. def arctan(x, dtype=None):
  2817. """
  2818. Trigonometric inverse tangent, element-wise.
  2819. The inverse of tan, so that if :math:`y = tan(x)` then :math:`x = arctan(y)`.
  2820. Note:
  2821. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2822. not supported.
  2823. Args:
  2824. x (Tensor): Input tensor.
  2825. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2826. output Tensor.
  2827. Returns:
  2828. Tensor or scalar. This is a scalar if `x` is a scalar.
  2829. Supported Platforms:
  2830. ``Ascend`` ``GPU`` ``CPU``
  2831. Examples:
  2832. >>> import mindspore.numpy as np
  2833. >>> x = np.arange(5).astype('float32')
  2834. >>> print(np.tan(x))
  2835. [ 0. 1.5574077 -2.1850398 -0.14254655 1.1578213 ]
  2836. """
  2837. x = _cast_type_for_trigonometric(x)
  2838. return _apply_tensor_op(F.atan, x, dtype=dtype)
  2839. def sinh(x, dtype=None):
  2840. """
  2841. Hyperbolic sine, element-wise.
  2842. Note:
  2843. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2844. not supported.
  2845. Args:
  2846. x (Tensor): Input tensor.
  2847. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2848. output Tensor.
  2849. Returns:
  2850. Tensor or scalar. This is a scalar if `x` is a scalar.
  2851. Supported Platforms:
  2852. ``Ascend`` ``CPU``
  2853. Examples:
  2854. >>> import mindspore.numpy as np
  2855. >>> x = np.arange(5).astype('float32')
  2856. >>> print(np.sinh(x))
  2857. [ 0. 1.1752012 3.6268604 10.017875 27.289917 ]
  2858. """
  2859. x = _cast_type_for_trigonometric(x)
  2860. return _apply_tensor_op(F.sinh, x, dtype=dtype)
  2861. def cosh(x, dtype=None):
  2862. """
  2863. Hyperbolic cosine, element-wise.
  2864. Note:
  2865. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2866. not supported.
  2867. Args:
  2868. x (Tensor): Input tensor.
  2869. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2870. output Tensor.
  2871. Returns:
  2872. Tensor or scalar. This is a scalar if `x` is a scalar.
  2873. Supported Platforms:
  2874. ``Ascend`` ``CPU``
  2875. Examples:
  2876. >>> import mindspore.numpy as np
  2877. >>> x = np.arange(5).astype('float32')
  2878. >>> print(np.cosh(x))
  2879. [ 1. 1.5430807 3.7621956 10.067662 27.308233 ]
  2880. """
  2881. x = _cast_type_for_trigonometric(x)
  2882. return _apply_tensor_op(F.cosh, x, dtype=dtype)
  2883. def tanh(x, dtype=None):
  2884. """
  2885. Computes hyperbolic tangent element-wise.
  2886. Note:
  2887. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2888. not supported.
  2889. Args:
  2890. x (Tensor): Input tensor.
  2891. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2892. output Tensor.
  2893. Returns:
  2894. Tensor or scalar. This is a scalar if `x` is a scalar.
  2895. Supported Platforms:
  2896. ``Ascend`` ``GPU`` ``CPU``
  2897. Examples:
  2898. >>> import mindspore.numpy as np
  2899. >>> x = np.arange(5).astype('float32')
  2900. >>> print(np.tanh(x))
  2901. [0. 0.7615942 0.9640276 0.9950548 0.9993293]
  2902. """
  2903. x = _cast_type_for_trigonometric(x)
  2904. return _apply_tensor_op(F.tanh, x, dtype=dtype)
  2905. def arcsinh(x, dtype=None):
  2906. """
  2907. Inverse hyperbolic sine element-wise.
  2908. Note:
  2909. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2910. not supported.
  2911. Args:
  2912. x (Tensor): Input tensor.
  2913. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2914. output Tensor.
  2915. Returns:
  2916. Tensor or scalar. This is a scalar if `x` is a scalar.
  2917. Supported Platforms:
  2918. ``Ascend`` ``GPU`` ``CPU``
  2919. Examples:
  2920. >>> import mindspore.numpy as np
  2921. >>> x = np.arange(5).astype('float32')
  2922. >>> print(np.arcsinh(x))
  2923. [0. 0.8813736 1.4436355 1.8184465 2.0947125]
  2924. """
  2925. x = _cast_type_for_trigonometric(x)
  2926. return _apply_tensor_op(F.asinh, x, dtype=dtype)
  2927. def arccosh(x, dtype=None):
  2928. """
  2929. Inverse hyperbolic cosine, element-wise.
  2930. Note:
  2931. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2932. not supported.
  2933. Args:
  2934. x (Tensor): Input tensor.
  2935. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2936. output Tensor.
  2937. Returns:
  2938. Tensor or scalar. This is a scalar if `x` is a scalar.
  2939. Supported Platforms:
  2940. ``Ascend`` ``GPU`` ``CPU``
  2941. Examples:
  2942. >>> import mindspore.numpy as np
  2943. >>> x = np.arange(1, 5).astype('float32')
  2944. >>> print(np.arccosh(x))
  2945. [0. 1.316958 1.7627472 2.063437 ]
  2946. """
  2947. x = _cast_type_for_trigonometric(x)
  2948. return _apply_tensor_op(F.acosh, x, dtype=dtype)
  2949. def arctanh(x, dtype=None):
  2950. """
  2951. Inverse hyperbolic tangent element-wise.
  2952. Note:
  2953. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2954. not supported.
  2955. Args:
  2956. x (Tensor): Input tensor.
  2957. dtype (:class:`mindspore.dtype`, optional): Default: :class:`None`. Overrides the dtype of the
  2958. output Tensor.
  2959. Returns:
  2960. Tensor or scalar. This is a scalar if `x` is a scalar.
  2961. Supported Platforms:
  2962. ``Ascend`` ``CPU``
  2963. Examples:
  2964. >>> import mindspore.numpy as np
  2965. >>> x = np.array([-0.99, -0.75, -0.5, 0, 0.5]).astype('float32')
  2966. >>> print(np.arctanh(x))
  2967. [-2.646653 -0.97295505 -0.54930615 0. 0.54930615]
  2968. """
  2969. x = _cast_type_for_trigonometric(x)
  2970. return _apply_tensor_op(F.atanh, x, dtype=dtype)
  2971. def arctan2(x1, x2, dtype=None):
  2972. """
  2973. Element-wise arc tangent of :math:`x1/x2` choosing the quadrant correctly.
  2974. Note:
  2975. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  2976. not supported.
  2977. Args:
  2978. x1 (Tensor): input tensor.
  2979. x2 (Tensor): input tensor.
  2980. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  2981. output Tensor.
  2982. Returns:
  2983. Tensor or scalar, the sum of `x1` and `x2`, element-wise. This is a scalar
  2984. if both `x1` and `x2` are scalars.
  2985. Supported Platforms:
  2986. ``Ascend`` ``CPU``
  2987. Examples:
  2988. >>> import mindspore.numpy as np
  2989. >>> x1 = np.array([-1, +1, +1, -1])
  2990. >>> x2 = np.array([-1, -1, +1, +1])
  2991. >>> output = np.arctan2(x1, x2)
  2992. >>> print(output)
  2993. [-2.3561945 2.3561945 0.78539819 -0.78539819]
  2994. """
  2995. x1 = _cast_type_for_trigonometric(x1)
  2996. x2 = _cast_type_for_trigonometric(x2)
  2997. return _apply_tensor_op(F.atan2, x1, x2, dtype=dtype)
  2998. def promote_types(type1, type2):
  2999. """
  3000. Returns the data type with the smallest size and smallest scalar kind.
  3001. Note:
  3002. The promotion rule is slightly different from original Numpy, but more like
  3003. jax, due to the preference on ``32-bit`` over ``64-bit`` data types.
  3004. Args:
  3005. type1 (Union[:class:`mindspore.dtype`, str]): First data type.
  3006. type2 (Union[:class:`mindspore.dtype`, str]): Second data type.
  3007. Returns:
  3008. The promoted data type.
  3009. Raises:
  3010. TypeError: if the input are not valid :class:`mindspore.dtype` input.
  3011. Supported Platforms:
  3012. ``Ascend`` ``GPU`` ``CPU``
  3013. Examples:
  3014. >>> import mindspore.numpy as np
  3015. >>> output = np.promote_types(np.float32, np.float64)
  3016. >>> print(output)
  3017. np.float64
  3018. """
  3019. type1 = _check_dtype(type1)
  3020. type2 = _check_dtype(type2)
  3021. return _promote(type1, type2)
  3022. def _apply_tensor_op(fn, *args, dtype=None):
  3023. """Applies tensor operations based on fn"""
  3024. args = _to_tensor(*args)
  3025. if isinstance(args, Tensor):
  3026. res = fn(args)
  3027. else:
  3028. res = fn(*args)
  3029. if dtype is not None and not _check_same_type(F.dtype(res), dtype):
  3030. res = F.cast(res, dtype)
  3031. return res