|
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894 |
- /*******************************************************************************
- Copyright (c) 2021, The OpenBLAS Project
- All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- 3. Neither the name of the OpenBLAS project nor the names of
- its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *******************************************************************************/
- #define ASSEMBLER
-
- #include "common.h"
-
- /* Function parameters */
- #define M $r4 // param 1: bm
- #define N $r5 // param 2: bn
- #define K $r6 // param 3: bk
- #define ALPHA $f0 // param 4: alpha
- #define A $r7 // param 5: ba
- #define B $r8 // param 6: bb
- #define C $r9 // param 7: bc
- #define LDC $r10 // param 8: ldc
-
- #ifdef TRMMKERNEL
- #define OFFSET $r11 // param 9: offset
- #endif
- #define OFF $r12
-
- /* Cycle control parameters */
- #define I $r13
- #define J $r14
- #define L $r15
- #define TL $r16
- /* Matrix address */
- #define A0 $r17
- #define B0 $r18
- #define C0 $r19
- #define C1 $r20
- #define C2 $r23
- #define C3 $r24
- #define T0 $r25 /* !! DO NOT USE $r21 and $r22 !! */
- #define T1 $r26
- #define T2 $r27
- #define ZERO $r0
-
- /* LSX vectors */
- #define U0 $vr0
- #define U1 $vr1
- #define U2 $vr2
- #define U3 $vr3
- #define U4 $vr4
- #define U5 $vr5
- #define U6 $vr6
- #define U7 $vr7
- #define U8 $vr8
- #define U9 $vr9
- #define U10 $vr10
- #define U11 $vr11
- #define U12 $vr12
- #define U13 $vr13
- #define U14 $vr14
- #define U15 $vr15
- #define D0 $vr16
- #define D1 $vr17
- #define D2 $vr18
- #define D3 $vr19
- #define D4 $vr20
- #define D5 $vr21
- #define D6 $vr22
- #define D7 $vr23
- #define D8 $vr24
- #define D9 $vr25
- #define D10 $vr26
- #define D11 $vr27
- #define D12 $vr28
- #define D13 $vr29
- #define D14 $vr30
- #define D15 $vr31
- #define VALPHA $vr15
-
- /* Prefetch interval */
- #define A_PRE 0x200
- #define B_PRE 0x100
-
- .macro KERNEL2x8x4
- vld U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U9, U12, D1
-
- vld U1, A0, 0x10
- vfmadd.d D2, U10, U12, D2
- vfmadd.d D3, U11, U12, D3
-
- vld U2, A0, 0x20
- vfmadd.d D4, U8, U13, D4
- vfmadd.d D5, U9, U13, D5
-
- vld U3, A0, 0x30
- vfmadd.d D6, U10, U13, D6
- vfmadd.d D7, U11, U13, D7
-
- vldrepl.d U4, B0, 0x00
- vfmadd.d D8, U8, U14, D8
- vfmadd.d D9, U9, U14, D9
-
- preld 0, B0, B_PRE
- vldrepl.d U5, B0, 0x08
- vfmadd.d D10, U10, U14, D10
- vfmadd.d D11, U11, U14, D11
-
- preld 0, A0, A_PRE
- vldrepl.d U6, B0, 0x10
- vfmadd.d D12, U8, U15, D12
- vfmadd.d D13, U9, U15, D13
-
- preld 0, A0, A_PRE + 0x40
- vldrepl.d U7, B0, 0x18
- vfmadd.d D14, U10, U15, D14
- vfmadd.d D15, U11, U15, D15
-
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x20
-
- vld U8, A0, 0x00
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
-
- vld U9, A0, 0x10
- vfmadd.d D2, U2, U4, D2
- vfmadd.d D3, U3, U4, D3
-
- vld U10, A0, 0x20
- vfmadd.d D4, U0, U5, D4
- vfmadd.d D5, U1, U5, D5
-
- vld U11, A0, 0x30
- vfmadd.d D6, U2, U5, D6
- vfmadd.d D7, U3, U5, D7
-
- vldrepl.d U12, B0, 0x00
- vfmadd.d D8, U0, U6, D8
- vfmadd.d D9, U1, U6, D9
-
- preld 0, B0, B_PRE
- vldrepl.d U13, B0, 0x08
- vfmadd.d D10, U2, U6, D10
- vfmadd.d D11, U3, U6, D11
-
- preld 0, A0, A_PRE
- vldrepl.d U14, B0, 0x10
- vfmadd.d D12, U0, U7, D12
- vfmadd.d D13, U1, U7, D13
-
- preld 0, A0, A_PRE + 0x40
- vldrepl.d U15, B0, 0x18
- vfmadd.d D14, U2, U7, D14
- vfmadd.d D15, U3, U7, D15
-
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x20
- .endm
-
- .macro KERNEL2x8x4_END
- vld U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U9, U12, D1
-
- vld U1, A0, 0x10
- vfmadd.d D2, U10, U12, D2
- vfmadd.d D3, U11, U12, D3
-
- vld U2, A0, 0x20
- vfmadd.d D4, U8, U13, D4
- vfmadd.d D5, U9, U13, D5
-
- vld U3, A0, 0x30
- vfmadd.d D6, U10, U13, D6
- vfmadd.d D7, U11, U13, D7
-
- vldrepl.d U4, B0, 0x00
- vfmadd.d D8, U8, U14, D8
- vfmadd.d D9, U9, U14, D9
-
- preld 0, B0, B_PRE
- vldrepl.d U5, B0, 0x08
- vfmadd.d D10, U10, U14, D10
- vfmadd.d D11, U11, U14, D11
-
- preld 0, A0, A_PRE
- vldrepl.d U6, B0, 0x10
- vfmadd.d D12, U8, U15, D12
- vfmadd.d D13, U9, U15, D13
-
- preld 0, A0, A_PRE + 0x40
- vldrepl.d U7, B0, 0x18
- vfmadd.d D14, U10, U15, D14
- vfmadd.d D15, U11, U15, D15
-
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x20
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
-
- vfmadd.d D2, U2, U4, D2
- vfmadd.d D3, U3, U4, D3
-
- vfmadd.d D4, U0, U5, D4
- vfmadd.d D5, U1, U5, D5
-
- vfmadd.d D6, U2, U5, D6
- vfmadd.d D7, U3, U5, D7
-
- vfmadd.d D8, U0, U6, D8
- vfmadd.d D9, U1, U6, D9
-
- preld 0, B0, B_PRE
- vfmadd.d D10, U2, U6, D10
- vfmadd.d D11, U3, U6, D11
-
- preld 0, A0, A_PRE
- vfmadd.d D12, U0, U7, D12
- vfmadd.d D13, U1, U7, D13
-
- preld 0, A0, A_PRE + 0x40
- vfmadd.d D14, U2, U7, D14
- vfmadd.d D15, U3, U7, D15
- .endm
-
- .macro KERNEL8x8x4
- .rept 4
- KERNEL2x8x4
- .endr
- .endm
-
- .macro KERNEL8x8x4_END
- .rept 3
- KERNEL2x8x4
- .endr
- KERNEL2x8x4_END
- .endm
-
- .macro KERNEL2x4x4
- vld U0, A0, 0x00
- vld U1, A0, 0x10
-
- vldrepl.d U4, B0, 0x00
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U9, U12, D1
-
- vldrepl.d U5, B0, 0x08
- vfmadd.d D4, U8, U13, D4
- vfmadd.d D5, U9, U13, D5
-
- vldrepl.d U6, B0, 0x10
- vfmadd.d D8, U8, U14, D8
- vfmadd.d D9, U9, U14, D9
-
- vldrepl.d U7, B0, 0x18
- vfmadd.d D12, U8, U15, D12
- vfmadd.d D13, U9, U15, D13
-
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x20
-
- vld U8, A0, 0x00
- vld U9, A0, 0x10
-
- vldrepl.d U12, B0, 0x00
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
-
- vldrepl.d U13, B0, 0x08
- vfmadd.d D4, U0, U5, D4
- vfmadd.d D5, U1, U5, D5
-
- vldrepl.d U14, B0, 0x10
- vfmadd.d D8, U0, U6, D8
- vfmadd.d D9, U1, U6, D9
-
- vldrepl.d U15, B0, 0x18
- vfmadd.d D12, U0, U7, D12
- vfmadd.d D13, U1, U7, D13
-
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x20
- .endm
-
- .macro KERNEL2x4x4_END
- vld U0, A0, 0x00
- vld U1, A0, 0x10
-
- vldrepl.d U4, B0, 0x00
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U9, U12, D1
-
- vldrepl.d U5, B0, 0x08
- vfmadd.d D4, U8, U13, D4
- vfmadd.d D5, U9, U13, D5
-
- vldrepl.d U6, B0, 0x10
- vfmadd.d D8, U8, U14, D8
- vfmadd.d D9, U9, U14, D9
-
- vldrepl.d U7, B0, 0x18
- vfmadd.d D12, U8, U15, D12
- vfmadd.d D13, U9, U15, D13
-
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x20
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
- vfmadd.d D4, U0, U5, D4
- vfmadd.d D5, U1, U5, D5
- vfmadd.d D8, U0, U6, D8
- vfmadd.d D9, U1, U6, D9
- vfmadd.d D12, U0, U7, D12
- vfmadd.d D13, U1, U7, D13
- .endm
-
- .macro KERNEL8x4x4
- .rept 4
- KERNEL2x4x4
- .endr
- .endm
-
- .macro KERNEL8x4x4_END
- .rept 3
- KERNEL2x4x4
- .endr
- KERNEL2x4x4_END
- .endm
-
- .macro KERNEL2x2x4
- vldrepl.d U0, A0, 0x00
- vldrepl.d U1, A0, 0x08
-
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U8, U13, D1
- vfmadd.d D2, U9, U12, D2
- vfmadd.d D3, U9, U13, D3
-
- vld U4, B0, 0x00
- vld U5, B0, 0x10
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x20
-
- vldrepl.d U8, A0, 0x00
- vldrepl.d U9, A0, 0x08
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U0, U5, D1
- vfmadd.d D2, U1, U4, D2
- vfmadd.d D3, U1, U5, D3
-
- vld U12, B0, 0x00
- vld U13, B0, 0x10
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x20
- .endm
-
- .macro KERNEL2x2x4_END
- vldrepl.d U0, A0, 0x00
- vldrepl.d U1, A0, 0x08
-
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U8, U13, D1
- vfmadd.d D2, U9, U12, D2
- vfmadd.d D3, U9, U13, D3
-
- vld U4, B0, 0x00
- vld U5, B0, 0x10
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x20
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U0, U5, D1
- vfmadd.d D2, U1, U4, D2
- vfmadd.d D3, U1, U5, D3
- .endm
-
- .macro KERNEL8x2x4
- .rept 4
- KERNEL2x2x4
- .endr
- .endm
-
- .macro KERNEL8x2x4_END
- .rept 3
- KERNEL2x2x4
- .endr
- KERNEL2x2x4_END
- .endm
-
- .macro KERNEL2x1x4
- vldrepl.d U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U8, U13, D1
- vld U4, B0, 0x00
- vld U5, B0, 0x10
-
- vldrepl.d U8, A0, 0x08
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U0, U5, D1
- vld U12, B0, 0x20
- vld U13, B0, 0x30
-
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x40
- .endm
-
- .macro KERNEL2x1x4_END
- vldrepl.d U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U8, U13, D1
- vld U4, B0, 0x00
- vld U5, B0, 0x10
-
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x20
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U0, U5, D1
- .endm
-
- .macro KERNEL8x1x4
- .rept 4
- KERNEL2x1x4
- .endr
- .endm
-
- .macro KERNEL8x1x4_END
- .rept 3
- KERNEL2x1x4
- .endr
- KERNEL2x1x4_END
- .endm
-
- .macro KERNEL2x8x2
- vld U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U9, U12, D1
-
- vld U1, A0, 0x10
- vfmadd.d D2, U10, U12, D2
- vfmadd.d D3, U11, U12, D3
-
- vld U2, A0, 0x20
- vfmadd.d D4, U8, U13, D4
- vfmadd.d D5, U9, U13, D5
-
- vld U3, A0, 0x30
- vfmadd.d D6, U10, U13, D6
- vfmadd.d D7, U11, U13, D7
-
- vldrepl.d U4, B0, 0x00
- vldrepl.d U5, B0, 0x08
-
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x10
-
- vld U8, A0, 0x00
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
-
- vld U9, A0, 0x10
- vfmadd.d D2, U2, U4, D2
- vfmadd.d D3, U3, U4, D3
-
- vld U10, A0, 0x20
- vfmadd.d D4, U0, U5, D4
- vfmadd.d D5, U1, U5, D5
-
- vld U11, A0, 0x30
- vfmadd.d D6, U2, U5, D6
- vfmadd.d D7, U3, U5, D7
-
- vldrepl.d U12, B0, 0x00
- vldrepl.d U13, B0, 0x08
-
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x10
- .endm
-
- .macro KERNEL2x8x2_END
- vld U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U9, U12, D1
-
- vld U1, A0, 0x10
- vfmadd.d D2, U10, U12, D2
- vfmadd.d D3, U11, U12, D3
-
- vld U2, A0, 0x20
- vfmadd.d D4, U8, U13, D4
- vfmadd.d D5, U9, U13, D5
-
- vld U3, A0, 0x30
- vfmadd.d D6, U10, U13, D6
- vfmadd.d D7, U11, U13, D7
-
- vldrepl.d U4, B0, 0x00
- vldrepl.d U5, B0, 0x08
-
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x10
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
-
- vfmadd.d D2, U2, U4, D2
- vfmadd.d D3, U3, U4, D3
-
- vfmadd.d D4, U0, U5, D4
- vfmadd.d D5, U1, U5, D5
-
- vfmadd.d D6, U2, U5, D6
- vfmadd.d D7, U3, U5, D7
- .endm
-
- .macro KERNEL8x8x2
- .rept 4
- KERNEL2x8x2
- .endr
- .endm
-
- .macro KERNEL8x8x2_END
- .rept 3
- KERNEL2x8x2
- .endr
- KERNEL2x8x2_END
- .endm
-
- .macro KERNEL2x4x2
- vld U0, A0, 0x00
- vld U1, A0, 0x10
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U9, U12, D1
- vfmadd.d D4, U8, U13, D4
- vfmadd.d D5, U9, U13, D5
-
- vldrepl.d U4, B0, 0x00
- vldrepl.d U5, B0, 0x08
-
- vld U8, A0, 0x20
- vld U9, A0, 0x30
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
- vfmadd.d D4, U0, U5, D4
- vfmadd.d D5, U1, U5, D5
-
- vldrepl.d U12, B0, 0x10
- vldrepl.d U13, B0, 0x18
-
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x20
- .endm
-
- .macro KERNEL2x4x2_END
- vld U0, A0, 0x00
- vld U1, A0, 0x10
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U9, U12, D1
- vfmadd.d D4, U8, U13, D4
- vfmadd.d D5, U9, U13, D5
-
- vldrepl.d U4, B0, 0x00
- vldrepl.d U5, B0, 0x08
-
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x10
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
- vfmadd.d D4, U0, U5, D4
- vfmadd.d D5, U1, U5, D5
- .endm
-
- .macro KERNEL8x4x2
- .rept 4
- KERNEL2x4x2
- .endr
- .endm
-
- .macro KERNEL8x4x2_END
- .rept 3
- KERNEL2x4x2
- .endr
- KERNEL2x4x2_END
- .endm
-
- .macro KERNEL2x2x2
- vld U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D4, U8, U13, D4
-
- vldrepl.d U4, B0, 0x00
- vldrepl.d U5, B0, 0x08
-
- vld U8, A0, 0x10
- vldrepl.d U12, B0, 0x10
- vldrepl.d U13, B0, 0x18
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D4, U0, U5, D4
-
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x20
- .endm
-
- .macro KERNEL2x2x2_END
- vld U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D4, U8, U13, D4
-
- vldrepl.d U4, B0, 0x00
- vldrepl.d U5, B0, 0x08
-
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x10
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D4, U0, U5, D4
- .endm
-
- .macro KERNEL8x2x2
- .rept 4
- KERNEL2x2x2
- .endr
- .endm
-
- .macro KERNEL8x2x2_END
- .rept 3
- KERNEL2x2x2
- .endr
- KERNEL2x2x2_END
- .endm
-
- .macro KERNEL2x1x2
- vldrepl.d U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
-
- vld U4, B0, 0x00
- vldrepl.d U8, A0, 0x08
- vld U12, B0, 0x10
- vfmadd.d D0, U0, U4, D0
-
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x20
- .endm
-
- .macro KERNEL2x1x2_END
- vldrepl.d U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
-
- vld U4, B0, 0x00
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x10
-
- vfmadd.d D0, U0, U4, D0
- .endm
-
- .macro KERNEL8x1x2
- .rept 4
- KERNEL2x1x2
- .endr
- .endm
-
- .macro KERNEL8x1x2_END
- .rept 3
- KERNEL2x1x2
- .endr
- KERNEL2x1x2_END
- .endm
-
- .macro KERNEL2x8x1
- vld U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U9, U12, D1
-
- vld U1, A0, 0x10
- vfmadd.d D2, U10, U12, D2
- vfmadd.d D3, U11, U12, D3
-
- vldrepl.d U4, B0, 0x00
- vld U2, A0, 0x20
- vld U3, A0, 0x30
-
- vld U8, A0, 0x40
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
-
- vld U9, A0, 0x50
- vfmadd.d D2, U2, U4, D2
- vfmadd.d D3, U3, U4, D3
-
- vld U10, A0, 0x60
- vld U11, A0, 0x70
-
- vldrepl.d U12, B0, 0x08
-
- addi.d A0, A0, 0x80
- addi.d B0, B0, 0x10
- .endm
-
- .macro KERNEL2x8x1_END
- vld U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U9, U12, D1
-
- vld U1, A0, 0x10
- vfmadd.d D2, U10, U12, D2
- vfmadd.d D3, U11, U12, D3
-
- vld U2, A0, 0x20
- vld U3, A0, 0x30
-
- vldrepl.d U4, B0, 0x00
-
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x08
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
-
- vfmadd.d D2, U2, U4, D2
- vfmadd.d D3, U3, U4, D3
- .endm
-
- .macro KERNEL8x8x1
- .rept 4
- KERNEL2x8x1
- .endr
- .endm
-
- .macro KERNEL8x8x1_END
- .rept 3
- KERNEL2x8x1
- .endr
- KERNEL2x8x1_END
- .endm
-
- .macro KERNEL2x4x1
- vld U0, A0, 0x00
- vld U1, A0, 0x10
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U9, U12, D1
- vldrepl.d U4, B0, 0x00
-
- vld U8, A0, 0x20
- vld U9, A0, 0x30
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
- vldrepl.d U12, B0, 0x08
-
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x10
- .endm
-
- .macro KERNEL2x4x1_END
- vld U0, A0, 0x00
- vld U1, A0, 0x10
- vfmadd.d D0, U8, U12, D0
- vfmadd.d D1, U9, U12, D1
- vldrepl.d U4, B0, 0x00
-
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x08
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
- .endm
-
- .macro KERNEL8x4x1
- .rept 4
- KERNEL2x4x1
- .endr
- .endm
-
- .macro KERNEL8x4x1_END
- .rept 3
- KERNEL2x4x1
- .endr
- KERNEL2x4x1_END
- .endm
-
- .macro KERNEL2x2x1
- vld U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vldrepl.d U4, B0, 0x00
-
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x08
-
- vld U8, A0, 0x00
- vfmadd.d D0, U0, U4, D0
- vldrepl.d U12, B0, 0x00
-
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x08
- .endm
-
- .macro KERNEL2x2x1_END
- vld U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vldrepl.d U4, B0, 0x00
-
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x08
-
- vfmadd.d D0, U0, U4, D0
- .endm
-
- .macro KERNEL8x2x1
- .rept 4
- KERNEL2x2x1
- .endr
- .endm
-
- .macro KERNEL8x2x1_END
- .rept 3
- KERNEL2x2x1
- .endr
- KERNEL2x2x1_END
- .endm
-
- .macro KERNEL2x1x1
- vldrepl.d U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vldrepl.d U4, B0, 0x00
-
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x08
-
- vldrepl.d U8, A0, 0x00
- vfmadd.d D0, U0, U4, D0
- vldrepl.d U12, B0, 0x00
-
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x08
- .endm
-
- .macro KERNEL2x1x1_END
- vldrepl.d U0, A0, 0x00
- vfmadd.d D0, U8, U12, D0
- vldrepl.d U4, B0, 0x00
-
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x08
-
- vfmadd.d D0, U0, U4, D0
- .endm
-
- .macro KERNEL8x1x1
- .rept 4
- KERNEL2x1x1
- .endr
- .endm
-
- .macro KERNEL8x1x1_END
- .rept 3
- KERNEL2x1x1
- .endr
- KERNEL2x1x1_END
- .endm
-
-
- PROLOGUE
-
- addi.d $sp, $sp, -112
- /* Store regs */
- SDARG $r23, $sp, 0
- SDARG $r24, $sp, 8
- SDARG $r25, $sp, 16
- SDARG $r26, $sp, 24
- SDARG $r27, $sp, 32
- ST $f24, $sp, 40
- ST $f25, $sp, 48
- ST $f26, $sp, 56
- ST $f27, $sp, 64
- ST $f28, $sp, 72
- ST $f29, $sp, 80
- ST $f30, $sp, 88
- ST $f31, $sp, 96
- ST ALPHA, $sp, 104
-
- #if defined (TRMMKERNEL) && !defined(LEFT)
- sub.d OFF, ZERO, OFFSET
- #else
- xor OFF, OFF, OFF
- #endif
-
- /* if (!(N >> 2)) goto L_N3 */
- srai.d J, N, 2 /* J = bn >> 2 */
- andi N, N, 0x03
- vldrepl.d VALPHA, $sp, 104 /* When N < 4, VALPHA will not changed */
- beq ZERO, J, .L_N3
-
- .L_J1: /* J-- && This loop include Condition 1 */
-
- /************************* Condition 1 if((N >> 2) && (M >> 3)) START !!! *************************
- * dgemm_core_16x4 */
- move C0, C
- move A0, A
- slli.d T0, LDC, 3
- add.d C1, C0, T0
- addi.d J, J, -1 /* J-- */
- add.d C2, C1, T0
- add.d C3, C2, T0
-
- #if defined(TRMMKERNEL) && defined(LEFT)
- move OFF, OFFSET
- #endif
-
- /* if (!(M >> 3)) goto L_M8 */
- srai.d I, M, 3 /* I = bm >> 3 */
- beq ZERO, I, .L_M8
-
- .L_I1: /* I-- */
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- move B0, B
- #else
- slli.d T0, OFF, 0x06
- add.d A0, A0, T0
- slli.d T0, OFF, 0x05
- add.d B0, B, T0
- #endif
-
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- sub.d L, K, OFF
- #elif defined(LEFT)
- /* number of values in A */
- addi.d L, OFF, 8
- #else
- /* number of values in B */
- addi.d L, OFF, 4
- #endif
- #else // #if !defined(TRMMKERNEL)
- move B0, B
- move L, K /* L = bk */
- #endif
- /* Calculate the first set of D0~D15,
- * avoidig set 0 operation
- * Load 8 * 64 from A0
- * U0 = {a1, a0}
- * U1 = {a3, a2}
- * U2 = {a5, a4}
- * U3 = {a7, a6}
- */
- vld U0, A0, 0x00
- vld U1, A0, 0x10
- vld U2, A0, 0x20
- vld U3, A0, 0x30
-
- vldrepl.d U4, B0, 0x00
- preld 0, C0, 0x00
- /* line 1 */
- vfmul.d D0, U0, U4
- vfmul.d D1, U1, U4
- preld 0, C0, 0x20
- vfmul.d D2, U2, U4
- vfmul.d D3, U3, U4
-
- vldrepl.d U5, B0, 0x08
- preld 0, C1, 0x00
- /* line 2 */
- vfmul.d D4, U0, U5
- vfmul.d D5, U1, U5
- preld 0, C1, 0x20
- vfmul.d D6, U2, U5
- vfmul.d D7, U3, U5
-
- vldrepl.d U6, B0, 0x10
- preld 0, C2, 0x00
- /* line 3 */
- vfmul.d D8, U0, U6
- vfmul.d D9, U1, U6
- preld 0, C2, 0x20
- vfmul.d D10, U2, U6
- vfmul.d D11, U3, U6
-
- vldrepl.d U7, B0, 0x18
- preld 0, C3, 0x00
- /* line 4 */
- vfmul.d D12, U0, U7
- vfmul.d D13, U1, U7
- preld 0, C3, 0x20
- vfmul.d D14, U2, U7
- vfmul.d D15, U3, U7
-
- /* Add stride for A0 and B0 */
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x20
- /* Reduce L */
- addi.d L, L, -1
- srai.d TL, L, 3 /* TL = (L-1) >> 3 */
- /* if (TL < 1) goto L_L7 */
- beq ZERO,TL, .L_L7
-
- vld U8, A0, 0x00
- vld U9, A0, 0x10
- vld U10, A0, 0x20
- vld U11, A0, 0x30
-
- addi.d TL, TL, -1
-
- vldrepl.d U12, B0, 0x00
- vldrepl.d U13, B0, 0x08
- vldrepl.d U14, B0, 0x10
- vldrepl.d U15, B0, 0x18
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x20
-
- beq ZERO, TL, .L_TL1_END
- .L_TL1: /* TL-- */
- KERNEL8x8x4
- addi.d TL, TL, -1 /* TL-- */
- blt ZERO,TL, .L_TL1
-
- .L_TL1_END:
- KERNEL8x8x4_END
-
- /* Maybe we need calculate the last
- * 7 sets of D0~D15?
- */
- .L_L7:
- /* if (!(L & 7)) goto L_L0 */
- andi TL, L, 7
- beq TL, ZERO,.L_L0
-
- .L_L71:
- /* Load 16 * 64 from A0 */
- vld U0, A0, 0x00
- vld U1, A0, 0x10
- vld U2, A0, 0x20
- vld U3, A0, 0x30
-
- /* Cumulative D0~D15 */
- vldrepl.d U4, B0, 0x00
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
- vfmadd.d D2, U2, U4, D2
- vfmadd.d D3, U3, U4, D3
-
- vldrepl.d U5, B0, 0x08
- vfmadd.d D4, U0, U5, D4
- vfmadd.d D5, U1, U5, D5
- vfmadd.d D6, U2, U5, D6
- vfmadd.d D7, U3, U5, D7
-
- vldrepl.d U6, B0, 0x10
- vfmadd.d D8, U0, U6, D8
- vfmadd.d D9, U1, U6, D9
- vfmadd.d D10, U2, U6, D10
- vfmadd.d D11, U3, U6, D11
-
- vldrepl.d U7, B0, 0x18
- vfmadd.d D12, U0, U7, D12
- vfmadd.d D13, U1, U7, D13
- vfmadd.d D14, U2, U7, D14
- vfmadd.d D15, U3, U7, D15
-
- /* Add stride for A0, B0 */
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x20
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_L71
-
- .L_L0:
- vldrepl.d VALPHA, $sp, 104
- #if defined(TRMMKERNEL)
- vfmul.d D0, D0, VALPHA
- vfmul.d D1, D1, VALPHA
- vfmul.d D2, D2, VALPHA
- vfmul.d D3, D3, VALPHA
- vfmul.d D4, D4, VALPHA
- vfmul.d D5, D5, VALPHA
- vfmul.d D6, D6, VALPHA
- vfmul.d D7, D7, VALPHA
- vfmul.d D8, D8, VALPHA
- vfmul.d D9, D9, VALPHA
- vfmul.d D10, D10, VALPHA
- vfmul.d D11, D11, VALPHA
- vfmul.d D12, D12, VALPHA
- vfmul.d D13, D13, VALPHA
- vfmul.d D14, D14, VALPHA
- vfmul.d D15, D15, VALPHA
- #else
- /* Load C0 */
- vld U0, C0, 0x00
- vld U1, C0, 0x10
- vld U2, C0, 0x20
- vld U3, C0, 0x30
- vfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */
- vfmadd.d D1, D1, VALPHA, U1
- vfmadd.d D2, D2, VALPHA, U2
- vfmadd.d D3, D3, VALPHA, U3
-
- /* Load C1 */
- vld U4, C1, 0x00
- vld U5, C1, 0x10
- vld U6, C1, 0x20
- vld U7, C1, 0x30
- vfmadd.d D4, D4, VALPHA, U4
- vfmadd.d D5, D5, VALPHA, U5
- vfmadd.d D6, D6, VALPHA, U6
- vfmadd.d D7, D7, VALPHA, U7
-
- /* Load C2 */
- vld U8, C2, 0x00
- vld U9, C2, 0x10
- vld U10, C2, 0x20
- vld U11, C2, 0x30
- vfmadd.d D8, D8, VALPHA, U8
- vfmadd.d D9, D9, VALPHA, U9
- vfmadd.d D10, D10, VALPHA, U10
- vfmadd.d D11, D11, VALPHA, U11
-
- /* Load C3 */
- vld U0, C3, 0x00
- vld U1, C3, 0x10
- vld U2, C3, 0x20
- vld U3, C3, 0x30
- vfmadd.d D12, D12, VALPHA, U0
- vfmadd.d D13, D13, VALPHA, U1
- vfmadd.d D14, D14, VALPHA, U2
- vfmadd.d D15, D15, VALPHA, U3
- #endif // #if defined(TRMMKERNEL)
-
- /* Store C0 */
- vst D0, C0, 0x00
- vst D1, C0, 0x10
- vst D2, C0, 0x20
- vst D3, C0, 0x30
- /* Store C1 */
- vst D4, C1, 0x00
- vst D5, C1, 0x10
- vst D6, C1, 0x20
- vst D7, C1, 0x30
- /* Store C2 */
- vst D8, C2, 0x00
- vst D9, C2, 0x10
- vst D10, C2, 0x20
- vst D11, C2, 0x30
- /* Store C3 */
- vst D12, C3, 0x00
- vst D13, C3, 0x10
- vst D14, C3, 0x20
- vst D15, C3, 0x30
-
- /* Add stride for C */
- addi.d C0, C0, 0x40
- addi.d C1, C1, 0x40
- addi.d C2, C2, 0x40
- addi.d C3, C3, 0x40
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- sub.d L, K, OFF
- #ifdef LEFT
- /* number of values in A */
- addi.d L, L, -8
- #else
- /* number of values in B */
- addi.d L, L, -4
- #endif
- slli.d T0, L, 0x06
- add.d A0, A0, T0
- slli.d T0, L, 0x05
- add.d B0, B0, T0
- #endif
-
- #ifdef LEFT
- addi.d OFF, OFF, 0x08
- #endif
- #endif // #if defined(TRMMKERNEL)
-
- addi.d I, I, -1 /* I-- */
- blt ZERO,I, .L_I1
-
- .L_M8:
- /* We have done M & 16, considering M=8/4/2/1 */
- andi I, M, 7
- beq ZERO,I, .L_M0
-
- andi I, M, 4
- beq ZERO,I, .L_M2
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- move B0, B
- #else
- slli.d T0, OFF, 0x05
- add.d A0, A0, T0
- add.d B0, B, T0
- #endif
-
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- sub.d L, K, OFF
- #elif defined(LEFT)
- /* number of values in A */
- addi.d L, OFF, 4
- #else
- /* number of values in B */
- addi.d L, OFF, 4
- #endif
- #else // #if !defined(TRMMKERNEL)
- move B0, B
- move L, K /* L = bk */
- #endif
-
- /* Load 4 * 64 from A0 */
- vld U0, A0, 0x00
- vld U1, A0, 0x10
-
- vldrepl.d U4, B0, 0x00
- /* line 1 */
- vfmul.d D0, U0, U4
- vfmul.d D1, U1, U4
-
- vldrepl.d U5, B0, 0x08
- /* line 2 */
- vfmul.d D4, U0, U5
- vfmul.d D5, U1, U5
-
- vldrepl.d U6, B0, 0x10
- /* line 3 */
- vfmul.d D8, U0, U6
- vfmul.d D9, U1, U6
-
- vldrepl.d U7, B0, 0x18
- /* line 4 */
- vfmul.d D12, U0, U7
- vfmul.d D13, U1, U7
-
- /* Add stride for A0 and B0 */
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x20
- /* Reduce L */
- addi.d L, L, -1
- srai.d TL, L, 3 /* TL = (L-1) >> 3 */
- /* if (TL < 1) goto L_M4_L7 */
- beq ZERO,TL, .L_M4_L7
-
- vld U8, A0, 0x00
- vld U9, A0, 0x10
-
- addi.d TL, TL, -1
-
- vldrepl.d U12, B0, 0x00
- vldrepl.d U13, B0, 0x08
- vldrepl.d U14, B0, 0x10
- vldrepl.d U15, B0, 0x18
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x20
-
- beq ZERO, TL, .L_M4_TL1_END
-
- .L_M4_TL1: /* TL-- */
- KERNEL8x4x4
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_M4_TL1
-
- .L_M4_TL1_END:
- KERNEL8x4x4_END
-
- .L_M4_L7:
- /* if (!(L & 7)) goto L_M4_L0 */
- andi TL, L, 7
- beq TL, ZERO,.L_M4_L0
-
- .L_M4_L71:
- vld U0, A0, 0x00
- vld U1, A0, 0x10
-
- vldrepl.d U4, B0, 0x00
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
-
- vldrepl.d U5, B0, 0x08
- vfmadd.d D4, U0, U5, D4
- vfmadd.d D5, U1, U5, D5
-
- vldrepl.d U6, B0, 0x10
- vfmadd.d D8, U0, U6, D8
- vfmadd.d D9, U1, U6, D9
-
- vldrepl.d U7, B0, 0x18
- vfmadd.d D12, U0, U7, D12
- vfmadd.d D13, U1, U7, D13
-
- /* Add stride for A0, B0 */
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x20
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_M4_L71
-
- .L_M4_L0:
- vldrepl.d VALPHA, $sp, 104
- #if defined(TRMMKERNEL)
- vfmul.d D0, D0, VALPHA
- vfmul.d D1, D1, VALPHA
- vfmul.d D4, D4, VALPHA
- vfmul.d D5, D5, VALPHA
- vfmul.d D8, D8, VALPHA
- vfmul.d D9, D9, VALPHA
- vfmul.d D12, D12, VALPHA
- vfmul.d D13, D13, VALPHA
- #else
- /* Load C0 */
- vld U0, C0, 0x00
- vld U1, C0, 0x10
- vfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */
- vfmadd.d D1, D1, VALPHA, U1
-
- /* Load C1 */
- vld U2, C1, 0x00
- vld U3, C1, 0x10
- vfmadd.d D4, D4, VALPHA, U2
- vfmadd.d D5, D5, VALPHA, U3
-
- /* Load C2 */
- vld U4, C2, 0x00
- vld U5, C2, 0x10
- vfmadd.d D8, D8, VALPHA, U4
- vfmadd.d D9, D9, VALPHA, U5
-
- /* Load C3 */
- vld U6, C3, 0x00
- vld U7, C3, 0x10
- vfmadd.d D12, D12, VALPHA, U6
- vfmadd.d D13, D13, VALPHA, U7
- #endif // #if defined(TRMMKERNEL)
-
- /* Store C0 */
- vst D0, C0, 0x00
- vst D1, C0, 0x10
- /* Store C1 */
- vst D4, C1, 0x00
- vst D5, C1, 0x10
- /* Store C2 */
- vst D8, C2, 0x00
- vst D9, C2, 0x10
- /* Store C3 */
- vst D12, C3, 0x00
- vst D13, C3, 0x10
-
- /* Add stride for C */
- addi.d C0, C0, 0x20
- addi.d C1, C1, 0x20
- addi.d C2, C2, 0x20
- addi.d C3, C3, 0x20
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- sub.d L, K, OFF
- #ifdef LEFT
- /* number of values in A */
- addi.d L, L, -4
- #else
- /* number of values in B */
- addi.d L, L, -4
- #endif
- slli.d T0, L, 0x05
- add.d A0, A0, T0
- add.d B0, B0, T0
- #endif
-
- #ifdef LEFT
- /* number of values in A */
- addi.d OFF, OFF, 0x04
- #endif
- #endif // #if defined(TRMMKERNEL)
-
- /********LOOP (if(N >> 2 ) && (M & 4) ) End************/
-
- .L_M2:
- andi I, M, 2
- beq ZERO,I, .L_M1
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- move B0, B
- #else
- slli.d T0, OFF, 0x04
- add.d A0, A0, T0
- slli.d T0, OFF, 0x05
- add.d B0, B, T0
- #endif
-
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- sub.d L, K, OFF
- #elif defined(LEFT)
- /* number of values in A */
- addi.d L, OFF, 2
- #else
- /* number of values in B */
- addi.d L, OFF, 4
- #endif
- #else // #if !defined(TRMMKERNEL)
- move B0, B
- move L, K /* L = bk */
- #endif
-
- /* Load 2 * 64 from A0 */
- vldrepl.d U0, A0, 0x00
- vldrepl.d U1, A0, 0x08
-
- vld U4, B0, 0x00
- vld U5, B0, 0x10
-
- vfmul.d D0, U0, U4
- vfmul.d D1, U0, U5
- vfmul.d D2, U1, U4
- vfmul.d D3, U1, U5
-
- /* Add stride for A0 and B0 */
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x20
- /* Reduce L */
- addi.d L, L, -1
- srai.d TL, L, 3 /* TL = (L-1) >> 3 */
- /* if (TL < 1) goto L_M2_L7 */
- beq ZERO,TL, .L_M2_L7
-
- vldrepl.d U8, A0, 0x00
- vldrepl.d U9, A0, 0x08
-
- addi.d TL, TL, -1
-
- vld U12, B0, 0x00
- vld U13, B0, 0x10
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x20
-
- beq ZERO, TL, .L_M2_TL1_END
- .L_M2_TL1: /* TL-- */
- KERNEL8x2x4
-
- addi.d TL, TL, -1 /* TL-- */
- blt ZERO,TL, .L_M2_TL1
- .L_M2_TL1_END:
- KERNEL8x2x4_END
-
- .L_M2_L7:
- /* if (!(L & 7)) goto L_M2_L0 */
- andi TL, L, 7
- beq TL, ZERO,.L_M2_L0
-
- .L_M2_L71:
- vldrepl.d U0, A0, 0x00
- vldrepl.d U1, A0, 0x08
-
- vld U4, B0, 0x00
- vld U5, B0, 0x10
-
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U0, U5, D1
- vfmadd.d D2, U1, U4, D2
- vfmadd.d D3, U1, U5, D3
- /* Add stride for A0, B0 */
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x20
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_M2_L71
-
- .L_M2_L0:
- vldrepl.d VALPHA, $sp, 104
- #if defined(TRMMKERNEL)
- vfmul.d D0, D0, VALPHA
- vfmul.d D1, D1, VALPHA
- vfmul.d D2, D2, VALPHA
- vfmul.d D3, D3, VALPHA
-
- vstelm.d D0, C0, 0x00, 0x00
- vstelm.d D0, C1, 0x00, 0x01
- vstelm.d D1, C2, 0x00, 0x00
- vstelm.d D1, C3, 0x00, 0x01
- vstelm.d D2, C0, 0x08, 0x00
- vstelm.d D2, C1, 0x08, 0x01
- vstelm.d D3, C2, 0x08, 0x00
- vstelm.d D3, C3, 0x08, 0x01
- #else
- /* Load C0 */
- vld U0, C0, 0x00
- /* Load C1 */
- vld U1, C1, 0x00
- /* Load C2 */
- vld U2, C2, 0x00
- /* Load C3 */
- vld U3, C3, 0x00
-
- vilvl.d D4, D2, D0 //C0
- vilvh.d D5, D2, D0 //C1
- vilvl.d D6, D3, D1 //C2
- vilvh.d D7, D3, D1 //C3
-
- vfmadd.d D0, D4, VALPHA, U0
- vfmadd.d D2, D5, VALPHA, U1
- vfmadd.d D1, D6, VALPHA, U2
- vfmadd.d D3, D7, VALPHA, U3
-
- vst D0, C0, 0x00
- vst D2, C1, 0x00
- vst D1, C2, 0x00
- vst D3, C3, 0x00
- #endif // #if defined(TRMMKERNEL)
-
- /* Add stride for C */
- addi.d C0, C0, 0x10
- addi.d C1, C1, 0x10
- addi.d C2, C2, 0x10
- addi.d C3, C3, 0x10
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- sub.d L, K, OFF
- #ifdef LEFT
- /* number of values in A */
- addi.d L, L, -2
- #else
- /* number of values in B */
- addi.d L, L, -4
- #endif
- slli.d T0, L, 0x04
- add.d A0, A0, T0
- slli.d T0, L, 0x05
- add.d B0, B0, T0
- #endif
-
- #ifdef LEFT
- /* number of values in A */
- addi.d OFF, OFF, 0x02
- #endif
- #endif // #if defined(TRMMKERNEL)
-
- /********LOOP (if(N >> 2 ) && (M & 2) ) End************/
-
- .L_M1:
- andi I, M, 1
- beq ZERO,I, .L_M0
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- move B0, B
- #else
- slli.d T0, OFF, 0x03
- add.d A0, A0, T0
- slli.d T0, OFF, 0x05
- add.d B0, B, T0
- #endif
-
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- sub.d L, K, OFF
- #elif defined(LEFT)
- /* number of values in A */
- addi.d L, OFF, 1
- #else
- /* number of values in B */
- addi.d L, OFF, 4
- #endif
- #else // #if !defined(TRMMKERNEL)
- move B0, B
- move L, K /* L = bk */
- #endif
-
- vldrepl.d U0, A0, 0x00
- vld U4, B0, 0x00
- vld U5, B0, 0x10
- vfmul.d D0, U0, U4
- vfmul.d D1, U0, U5
-
- /* Add stride for A0 and B0 */
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x20
- /* Reduce L */
- addi.d L, L, -1
- srai.d TL, L, 3 /* TL = (L-1) >> 3 */
- /* if (TL < 1) goto L_M1_L7 */
- beq ZERO,TL, .L_M1_L7
-
- vldrepl.d U8, A0, 0x00
-
- addi.d TL, TL, -1
- vld U12, B0, 0x00
- vld U13, B0, 0x10
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x20
-
- beq ZERO, TL, .L_M1_TL1_END
-
- .L_M1_TL1: /* TL-- */
- KERNEL8x1x4
-
- addi.d TL, TL, -1 /* TL-- */
- blt ZERO,TL, .L_M1_TL1
- .L_M1_TL1_END:
- KERNEL8x1x4_END
-
- .L_M1_L7:
- /* if (!(L & 7)) goto L_M1_L0 */
- andi TL, L, 7
- beq TL, ZERO,.L_M1_L0
-
- .L_M1_L71:
- vldrepl.d U0, A0, 0x00
- vld U4, B0, 0x00
- vld U5, B0, 0x10
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U0, U5, D1
-
- /* Add stride for A0, B0 */
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x20
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_M1_L71
-
- .L_M1_L0:
- vldrepl.d VALPHA, $sp, 104
- #if defined(TRMMKERNEL)
- vfmul.d D0, D0, VALPHA
- vfmul.d D1, D1, VALPHA
-
- vstelm.d D0, C0, 0x00, 0x00
- vstelm.d D0, C1, 0x00, 0x01
- vstelm.d D1, C2, 0x00, 0x00
- vstelm.d D1, C3, 0x00, 0x01
- #else
- /* Load C0 */
- vldrepl.d U0, C0, 0x00
- vldrepl.d U1, C1, 0x00
- vilvl.d D4, U1, U0
- vfmadd.d D6, D0, VALPHA, D4
-
- vldrepl.d U2, C2, 0x00
- vldrepl.d U3, C3, 0x00
- vilvl.d D5, U3, U2
- vfmadd.d D7, D1, VALPHA, D5
-
- vstelm.d D6, C0, 0x00, 0x00
- vstelm.d D6, C1, 0x00, 0x01
- vstelm.d D7, C2, 0x00, 0x00
- vstelm.d D7, C3, 0x00, 0x01
- #endif // #if defined(TRMMKERNEL)
-
- /* Add stride for C */
- addi.d C0, C0, 0x08
- addi.d C1, C1, 0x08
- addi.d C2, C2, 0x08
- addi.d C3, C3, 0x08
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- sub.d L, K, OFF
- #ifdef LEFT
- /* number of values in A */
- addi.d L, L, -1
- #else
- /* number of values in B */
- addi.d L, L, -4
- #endif
- slli.d T0, L, 0x03
- add.d A0, A0, T0
- slli.d T0, L, 0x05
- add.d B0, B0, T0
- #endif
-
- #ifdef LEFT
- /* number of values in A */
- addi.d OFF, OFF, 0x01
- #endif
- #endif // #if defined(TRMMKERNEL)
-
- /********LOOP (if(N >> 2 ) && (M & 1) ) End************/
-
- .L_M0:
- /* Add stride for B and C
- * B += (K * 32)
- * C += (LDC * 32)
- */
- /* since the array type is double,
- * so we must mul 32
- */
- slli.d T0, K, 5
- slli.d T1, LDC, 5
- add.d B, B, T0
- add.d C, C, T1
-
- #if defined(TRMMKERNEL) && !defined(LEFT)
- addi.d OFF, OFF, 0x04
- #endif
-
- blt ZERO, J, .L_J1
-
- //////////////// go back to L_J1 /////////////////
- /////////////////////////////////////////////////
- /************************ Condition 1 if((N >> 2) && (M >> 3)) END !!! ************************/
-
- vldrepl.d VALPHA, $sp, 104
-
- .L_N3:
- andi J, N, 2
- beq ZERO, J, .L_N1
-
- /************************* Condition 2 if((N & 2) && (M >> 3)) START !!! *************************
- * dgemm_core_16x2 */
-
- move C0, C
- move A0, A
- slli.d T0, LDC, 3
- add.d C1, C0, T0
-
- #if defined(TRMMKERNEL) && defined(LEFT)
- move OFF, OFFSET
- #endif
-
- /* if (!(M >> 3)) goto L_N3_M8 */
- srai.d I, M, 3 /* I = bm >> 3 */
- beq ZERO, I, .L_N3_M8
-
- .L_N3_I1:
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- move B0, B
- #else
- slli.d T0, OFF, 0x06
- add.d A0, A0, T0
- slli.d T0, OFF, 0x04
- add.d B0, B, T0
- #endif
-
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- sub.d L, K, OFF
- #elif defined(LEFT)
- /* number of values in A */
- addi.d L, OFF, 8
- #else
- /* number of values in B */
- addi.d L, OFF, 2
- #endif
- #else // #if !defined(TRMMKERNEL)
- move B0, B
- move L, K /* L = bk */
- #endif
-
- /* Load 8 * 64 from A0
- * U0 = {a1, a0}
- * U1 = {a3, a2}
- * U2 = {a5, a4}
- * U3 = {a7, a6}
- */
- vld U0, A0, 0x00
- vld U1, A0, 0x10
- vld U2, A0, 0x20
- vld U3, A0, 0x30
-
- vldrepl.d U4, B0, 0x00
- /* line 1 */
- vfmul.d D0, U0, U4
- vfmul.d D1, U1, U4
- vfmul.d D2, U2, U4
- vfmul.d D3, U3, U4
-
- vldrepl.d U5, B0, 0x08
- /* line 2 */
- vfmul.d D4, U0, U5
- vfmul.d D5, U1, U5
- vfmul.d D6, U2, U5
- vfmul.d D7, U3, U5
-
- /* Add stride for A0 and B0 */
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x10
- /* Reduce L */
- addi.d L, L, -1
- srai.d TL, L, 3 /* TL = (L-1) >> 3 */
- /* if (TL < 1) goto L_N3_L7 */
- beq ZERO,TL, .L_N3_L7
-
- vld U8, A0, 0x00
- vld U9, A0, 0x10
- vld U10, A0, 0x20
- vld U11, A0, 0x30
-
- addi.d TL, TL, -1
-
- vldrepl.d U12, B0, 0x00
- vldrepl.d U13, B0, 0x08
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x10
-
- beq ZERO, TL, .L_N3_TL1_END
-
- .L_N3_TL1: /* TL-- */
- KERNEL8x8x2
-
- addi.d TL, TL, -1 /* TL-- */
- blt ZERO,TL, .L_N3_TL1
- .L_N3_TL1_END:
- KERNEL8x8x2_END
-
- .L_N3_L7:
- /* if (!(L & 7)) goto L_N3_L0 */
- andi TL, L, 7
- beq TL, ZERO,.L_N3_L0
-
- .L_N3_L71:
- /* Load 16 * 64 from A0 */
- vld U0, A0, 0x00
- vld U1, A0, 0x10
- vld U2, A0, 0x20
- vld U3, A0, 0x30
-
- vldrepl.d U4, B0, 0x00
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
- vfmadd.d D2, U2, U4, D2
- vfmadd.d D3, U3, U4, D3
-
- vldrepl.d U5, B0, 0x08
- vfmadd.d D4, U0, U5, D4
- vfmadd.d D5, U1, U5, D5
- vfmadd.d D6, U2, U5, D6
- vfmadd.d D7, U3, U5, D7
- /* Add stride for A0, B0 */
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x10
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_N3_L71
-
- .L_N3_L0:
- #if defined(TRMMKERNEL)
- vfmul.d D0, D0, VALPHA
- vfmul.d D1, D1, VALPHA
- vfmul.d D2, D2, VALPHA
- vfmul.d D3, D3, VALPHA
- vfmul.d D4, D4, VALPHA
- vfmul.d D5, D5, VALPHA
- vfmul.d D6, D6, VALPHA
- vfmul.d D7, D7, VALPHA
- #else
- /* Load C0 */
- vld U0, C0, 0x00
- vld U1, C0, 0x10
- vld U2, C0, 0x20
- vld U3, C0, 0x30
- vfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */
- vfmadd.d D1, D1, VALPHA, U1
- vfmadd.d D2, D2, VALPHA, U2
- vfmadd.d D3, D3, VALPHA, U3
-
- /* Load C1 */
- vld U4, C1, 0x00
- vld U5, C1, 0x10
- vld U6, C1, 0x20
- vld U7, C1, 0x30
- vfmadd.d D4, D4, VALPHA, U4
- vfmadd.d D5, D5, VALPHA, U5
- vfmadd.d D6, D6, VALPHA, U6
- vfmadd.d D7, D7, VALPHA, U7
- #endif // #if defined(TRMMKERNEL)
-
- /* Store C0 */
- vst D0, C0, 0x00
- vst D1, C0, 0x10
- vst D2, C0, 0x20
- vst D3, C0, 0x30
- /* Store C1 */
- vst D4, C1, 0x00
- vst D5, C1, 0x10
- vst D6, C1, 0x20
- vst D7, C1, 0x30
-
- /* Add stride for C */
- addi.d C0, C0, 0x40
- addi.d C1, C1, 0x40
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- sub.d L, K, OFF
- #ifdef LEFT
- addi.d L, L, -8
- #else
- addi.d L, L, -2
- #endif
- slli.d T0, L, 0x06
- add.d A0, A0, T0
- slli.d T0, L, 0x04
- add.d B0, B0, T0
- #endif
-
- #ifdef LEFT
- addi.d OFF, OFF, 0x8
- #endif
- #endif // #if defined(TRMMKERNEL)
-
- addi.d I, I, -1 /* I-- */
- blt ZERO,I, .L_N3_I1
-
- .L_N3_M8:
- /* We have done M & 8, considering M=4/2/1 */
- andi I, M, 7
- beq ZERO,I, .L_N3_M0
-
- andi I, M, 4
- beq ZERO,I, .L_N3_M2
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- move B0, B
- #else
- slli.d T0, OFF, 0x05
- add.d A0, A0, T0
- slli.d T0, OFF, 0x04
- add.d B0, B, T0
- #endif
-
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- sub.d L, K, OFF
- #elif defined(LEFT)
- /* number of values in A */
- addi.d L, OFF, 4
- #else
- /* number of values in B */
- addi.d L, OFF, 2
- #endif
- #else // #if !defined(TRMMKERNEL)
- move B0, B
- move L, K /* L = bk */
- #endif
-
- /* Load 4 * 64 from A0 */
- vld U0, A0, 0x00
- vld U1, A0, 0x10
-
- vldrepl.d U4, B0, 0x00
- /* line 1 */
- vfmul.d D0, U0, U4
- vfmul.d D1, U1, U4
-
- vldrepl.d U5, B0, 0x08
- /* line 2 */
- vfmul.d D4, U0, U5
- vfmul.d D5, U1, U5
-
- /* Add stride for A0 and B0 */
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x10
- /* Reduce L */
- addi.d L, L, -1
- srai.d TL, L, 3 /* TL = (L-1) >> 3 */
- /* if (TL < 1) goto L_N3_M4_L7 */
- beq ZERO,TL, .L_N3_M4_L7
-
- vld U8, A0, 0x00
- vld U9, A0, 0x10
-
- addi.d TL, TL, -1
-
- vldrepl.d U12, B0, 0x00
- vldrepl.d U13, B0, 0x08
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x10
-
- beq ZERO, TL, .L_N3_M4_TL1_END
-
- .L_N3_M4_TL1: /* TL-- */
- KERNEL8x4x2
-
- addi.d TL, TL, -1 /* TL-- */
- blt ZERO,TL, .L_N3_M4_TL1
- .L_N3_M4_TL1_END:
- KERNEL8x4x2_END
-
- .L_N3_M4_L7:
- /* if (!(L & 7)) goto L_N3_M4_L0 */
- andi TL, L, 7
- beq TL, ZERO,.L_N3_M4_L0
-
- .L_N3_M4_L71:
- vld U0, A0, 0x00
- vld U1, A0, 0x10
-
- vldrepl.d U4, B0, 0x00
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
-
- vldrepl.d U5, B0, 0x08
- vfmadd.d D4, U0, U5, D4
- vfmadd.d D5, U1, U5, D5
-
- /* Add stride for A0, B0 */
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x10
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_N3_M4_L71
-
- .L_N3_M4_L0:
- #if defined(TRMMKERNEL)
- vfmul.d D0, D0, VALPHA
- vfmul.d D1, D1, VALPHA
- vfmul.d D4, D4, VALPHA
- vfmul.d D5, D5, VALPHA
- #else
- /* Load C0 */
- vld U0, C0, 0x00
- vld U1, C0, 0x10
- vfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */
- vfmadd.d D1, D1, VALPHA, U1
-
- /* Load C1 */
- vld U2, C1, 0x00
- vld U3, C1, 0x10
- vfmadd.d D4, D4, VALPHA, U2
- vfmadd.d D5, D5, VALPHA, U3
- #endif // #if defined(TRMMKERNEL)
-
- /* Store C0 */
- vst D0, C0, 0x00
- vst D1, C0, 0x10
- /* Store C1 */
- vst D4, C1, 0x00
- vst D5, C1, 0x10
-
- /* Add stride for C */
- addi.d C0, C0, 0x20
- addi.d C1, C1, 0x20
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- sub.d L, K, OFF
- #ifdef LEFT
- addi.d L, L, -4
- #else
- addi.d L, L, -2
- #endif
- slli.d T0, L, 0x05
- add.d A0, A0, T0
- slli.d T0, L, 0x04
- add.d B0, B0, T0
- #endif
-
- #ifdef LEFT
- addi.d OFF, OFF, 0x04
- #endif
- #endif // #if defined(TRMMKERNEL)
-
- /********LOOP (if(N & 2 ) && (M & 4) ) End************/
-
- .L_N3_M2:
- andi I, M, 2
- beq ZERO,I, .L_N3_M1
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- move B0, B
- #else
- slli.d T0, OFF, 0x04
- add.d A0, A0, T0
- add.d B0, B, T0
- #endif
-
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- sub.d L, K, OFF
- #elif defined(LEFT)
- /* number of values in A */
- addi.d L, OFF, 2
- #else
- /* number of values in B */
- addi.d L, OFF, 2
- #endif
- #else // #if !defined(TRMMKERNEL)
- move B0, B
- move L, K /* L = bk */
- #endif
-
- /* Load 2 * 64 from A0 */
- vld U0, A0, 0x00
-
- vldrepl.d U4, B0, 0x00
- /* line 1 */
- vfmul.d D0, U0, U4
-
- vldrepl.d U4, B0, 0x08
- /* line 2 */
- vfmul.d D4, U0, U4
-
- /* Add stride for A0 and B0 */
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x10
- /* Reduce L */
- addi.d L, L, -1
- srai.d TL, L, 3 /* TL = (L-1) >> 3 */
- /* if (TL < 1) goto L_N3_M2_L7 */
- beq ZERO,TL, .L_N3_M2_L7
-
- vld U8, A0, 0x00
-
- addi.d TL, TL, -1
-
- vldrepl.d U12, B0, 0x00
- vldrepl.d U13, B0, 0x08
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x10
-
- beq ZERO, TL, .L_N3_M2_TL1_END
-
- .L_N3_M2_TL1: /* TL-- */
- KERNEL8x2x2
-
- addi.d TL, TL, -1 /* TL-- */
- blt ZERO,TL, .L_N3_M2_TL1
- .L_N3_M2_TL1_END:
- KERNEL8x2x2_END
-
- .L_N3_M2_L7:
- /* if (!(L & 7)) goto L_N3_M2_L0 */
- andi TL, L, 7
- beq TL, ZERO,.L_N3_M2_L0
-
- .L_N3_M2_L71:
- vld U0, A0, 0x00
-
- vldrepl.d U4, B0, 0x00
- vldrepl.d U5, B0, 0x08
- vfmadd.d D0, U0, U4, D0
-
- vfmadd.d D4, U0, U5, D4
-
- /* Add stride for A0, B0 */
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x10
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_N3_M2_L71
-
- .L_N3_M2_L0:
- #if defined(TRMMKERNEL)
- vfmul.d D0, D0, VALPHA
- vfmul.d D4, D4, VALPHA
- #else
- /* Load C0 */
- vld U0, C0, 0x00
- vfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */
-
- /* Load C1 */
- vld U1, C1, 0x00
- vfmadd.d D4, D4, VALPHA, U1
- #endif // #if defined(TRMMKERNEL)
-
- vst D0, C0, 0x00
- vst D4, C1, 0x00
-
- /* Add stride for C */
- addi.d C0, C0, 0x10
- addi.d C1, C1, 0x10
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- sub.d L, K, OFF
- #ifdef LEFT
- addi.d L, L, -2
- #else
- addi.d L, L, -2
- #endif
- slli.d T0, L, 0x04
- add.d A0, A0, T0
- add.d B0, B0, T0
- #endif
-
- #ifdef LEFT
- addi.d OFF, OFF, 0x02
- #endif
- #endif // #if defined(TRMMKERNEL)
-
- /********LOOP (if(N & 2 ) && (M & 2) ) End************/
-
- .L_N3_M1:
- andi I, M, 1
- beq ZERO,I, .L_N3_M0
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- move B0, B
- #else
- slli.d T0, OFF, 0x03
- add.d A0, A0, T0
- slli.d T0, OFF, 0x04
- add.d B0, B, T0
- #endif
-
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- sub.d L, K, OFF
- #elif defined(LEFT)
- /* number of values in A */
- addi.d L, OFF, 1
- #else
- /* number of values in B */
- addi.d L, OFF, 2
- #endif
- #else // #if !defined(TRMMKERNEL)
- move B0, B
- move L, K /* L = bk */
- #endif
-
- /* Load 1 * 64 from A0 */
- vldrepl.d U0, A0, 0x00
-
- vld U4, B0, 0x00
- /* line 1 */
- vfmul.d D0, U0, U4
-
- /* Add stride for A0 and B0 */
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x10
- /* Reduce L */
- addi.d L, L, -1
- srai.d TL, L, 3 /* TL = (L-1) >> 3 */
- /* if (TL < 1) goto L_N3_M1_L7 */
- beq ZERO,TL, .L_N3_M1_L7
-
- vldrepl.d U8, A0, 0x00
-
- addi.d TL, TL, -1
-
- vld U12, B0, 0x00
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x10
-
- beq ZERO, TL, .L_N3_M1_TL1_END
-
- .L_N3_M1_TL1: /* TL-- */
- KERNEL8x1x2
-
- addi.d TL, TL, -1 /* TL-- */
- blt ZERO,TL, .L_N3_M1_TL1
- .L_N3_M1_TL1_END:
- KERNEL8x1x2_END
-
- .L_N3_M1_L7:
- /* if (!(L & 7)) goto L_N3_M1_L0 */
- andi TL, L, 7
- beq TL, ZERO,.L_N3_M1_L0
-
- .L_N3_M1_L71:
- vldrepl.d U0, A0, 0x00
-
- vld U4, B0, 0x00
- vfmadd.d D0, U0, U4, D0
-
- /* Add stride for A0, B0 */
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x10
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_N3_M1_L71
-
- .L_N3_M1_L0:
- #if defined(TRMMKERNEL)
- vfmul.d D0, D0, VALPHA
- #else
- /* Load C0 */
- vld U0, C0, 0x00
- vld U1, C1, 0x00
- vilvl.d U2, U1, U0
- vfmadd.d D0, D0, VALPHA, U2
- #endif // #if defined(TRMMKERNEL)
-
- vstelm.d D0, C0, 0x00, 0x00
- vstelm.d D0, C1, 0x00, 0x01
-
- /* Add stride for C */
- addi.d C0, C0, 0x08
- addi.d C1, C1, 0x08
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- sub.d L, K, OFF
- #ifdef LEFT
- addi.d L, L, -1
- #else
- addi.d L, L, -2
- #endif
- slli.d T0, L, 0x03
- add.d A0, A0, T0
- slli.d T0, L, 0x04
- add.d B0, B0, T0
- #endif
-
- #ifdef LEFT
- addi.d OFF, OFF, 0x01
- #endif
- #endif // #if defined(TRMMKERNEL)
-
- /********LOOP (if(N & 2 ) && (M & 1) ) End************/
-
- .L_N3_M0:
- /* Add stride for B and C
- * B += (K * 16)
- * C += (LDC * 16)
- */
- /* since the array type is double,
- * so we must mul 16
- */
- slli.d T0, K, 4
- slli.d T1, LDC, 4
- add.d B, B, T0
- add.d C, C, T1
-
- #if defined(TRMMKERNEL) && !defined(LEFT)
- addi.d OFF, OFF, 0x02
- #endif
-
- /* We must reinit I */
- srai.d I, M, 4 /* I = bm >> 4 */
-
- /************************* Condition 2 if((N & 2) && (M >> 3)) End !!! *************************
- * dgemm_core_16x2 */
-
- .L_N1:
- andi J, N, 1
- beq ZERO, J, .L_N0
-
- /************************* Condition 3 if((N & 1) && (M >> 3)) START !!! *************************
- * dgemm_core_16x1 */
-
- move C0, C
- move A0, A
-
- #if defined(TRMMKERNEL) && defined(LEFT)
- move OFF, OFFSET
- #endif
-
- /* if (!(M >> 3)) goto L_N1_M8 */
- srai.d I, M, 3 /* I = bm >> 3 */
- beq ZERO, I, .L_N1_M8
-
- .L_N1_I1:
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- move B0, B
- #else
- slli.d T0, OFF, 0x06
- add.d A0, A0, T0
- slli.d T0, OFF, 0x03
- add.d B0, B, T0
- #endif
-
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- sub.d L, K, OFF
- #elif defined(LEFT)
- /* number of values in A */
- addi.d L, OFF, 8
- #else
- /* number of values in B */
- addi.d L, OFF, 1
- #endif
- #else // #if !defined(TRMMKERNEL)
- move B0, B
- move L, K /* L = bk */
- #endif
-
- /* Load 8 * 64 from A0
- * U0 = {a3, a2}
- * U1 = {a1, a0}
- * U2 = {a5, a4}
- * U3 = {a7, a6}
- */
-
- vld U0, A0, 0x00
- vld U1, A0, 0x10
- vld U2, A0, 0x20
- vld U3, A0, 0x30
-
- vldrepl.d U4, B0, 0x00
- /* line 1 */
- vfmul.d D0, U0, U4
- vfmul.d D1, U1, U4
- vfmul.d D2, U2, U4
- vfmul.d D3, U3, U4
-
- /* Add stride for A0 and B0 */
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x08
- /* Reduce L */
- addi.d L, L, -1
- srai.d TL, L, 3 /* TL = (L-1) >> 3 */
- /* if (TL < 1) goto L_N1_L7 */
- beq ZERO,TL, .L_N1_L7
-
- vld U8, A0, 0x00
- vld U9, A0, 0x10
- vld U10, A0, 0x20
- vld U11, A0, 0x30
-
- addi.d TL, TL, -1
-
- vldrepl.d U12, B0, 0x00
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x08
-
- beq ZERO, TL, .L_N1_TL1_END
- .L_N1_TL1: /* TL-- */
- KERNEL8x8x1
-
- addi.d TL, TL, -1 /* TL-- */
- blt ZERO,TL, .L_N1_TL1
- .L_N1_TL1_END:
- KERNEL8x8x1_END
-
- .L_N1_L7:
- /* if (!(L & 7)) goto L_N1_L0 */
- andi TL, L, 7
- beq TL, ZERO,.L_N1_L0
-
- .L_N1_L71:
- /* Load 16 * 64 from A0 */
- vld U0, A0, 0x00
- vld U1, A0, 0x10
- vld U2, A0, 0x20
- vld U3, A0, 0x30
-
- vldrepl.d U4, B0, 0x00
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
- vfmadd.d D2, U2, U4, D2
- vfmadd.d D3, U3, U4, D3
-
- /* Add stride for A0, B0 */
- addi.d A0, A0, 0x40
- addi.d B0, B0, 0x08
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_N1_L71
-
- .L_N1_L0:
- #if defined(TRMMKERNEL)
- vfmul.d D0, D0, VALPHA
- vfmul.d D1, D1, VALPHA
- vfmul.d D2, D2, VALPHA
- vfmul.d D3, D3, VALPHA
- #else
- /* Load C0 */
- vld U0, C0, 0x00
- vld U1, C0, 0x10
- vld U2, C0, 0x20
- vld U3, C0, 0x30
- vfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */
- vfmadd.d D1, D1, VALPHA, U1
- vfmadd.d D2, D2, VALPHA, U2
- vfmadd.d D3, D3, VALPHA, U3
- #endif // #if defined(TRMMKERNEL)
-
- /* Store C0 */
- vst D0, C0, 0x00
- vst D1, C0, 0x10
- vst D2, C0, 0x20
- vst D3, C0, 0x30
-
- /* Add stride for C */
- addi.d C0, C0, 0x40
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- sub.d L, K, OFF
- #ifdef LEFT
- addi.d L, L, -8
- #else
- addi.d L, L, -1
- #endif
- slli.d T0, L, 0x06
- add.d A0, A0, T0
- slli.d T0, L, 0x03
- add.d B0, B0, T0
- #endif
-
- #ifdef LEFT
- addi.d OFF, OFF, 0x8
- #endif
- #endif // #if defined(TRMMKERNEL)
-
- addi.d I, I, -1 /* I-- */
- blt ZERO,I, .L_N1_I1
-
- .L_N1_M8:
- /* We have done M & 16, considering M=8/4/2/1 */
- andi I, M, 7
- beq ZERO,I, .L_N1_M0
-
- andi I, M, 4
- beq ZERO,I, .L_N1_M2
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- move B0, B
- #else
- slli.d T0, OFF, 0x05
- add.d A0, A0, T0
- slli.d T0, OFF, 0x03
- add.d B0, B, T0
- #endif
-
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- sub.d L, K, OFF
- #elif defined(LEFT)
- /* number of values in A */
- addi.d L, OFF, 4
- #else
- /* number of values in B */
- addi.d L, OFF, 1
- #endif
- #else // #if !defined(TRMMKERNEL)
- move B0, B
- move L, K /* L = bk */
- #endif
-
- /* Load 4 * 64 from A0 */
- vld U0, A0, 0x00
- vld U1, A0, 0x10
-
- vldrepl.d U4, B0, 0x00
- /* line 1 */
- vfmul.d D0, U0, U4
- vfmul.d D1, U1, U4
-
- /* Add stride for A0 and B0 */
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x08
- /* Reduce L */
- addi.d L, L, -1
- srai.d TL, L, 3 /* TL = (L-1) >> 3 */
- /* if (TL < 1) goto L_N1_M4_L7 */
- beq ZERO,TL, .L_N1_M4_L7
-
- vld U8, A0, 0x00
- vld U9, A0, 0x10
-
- addi.d TL, TL, -1
-
- vldrepl.d U12, B0, 0x00
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x08
-
- beq ZERO, TL, .L_N1_M4_TL1_END
-
- .L_N1_M4_TL1: /* TL-- */
- KERNEL8x4x1
-
- addi.d TL, TL, -1 /* TL-- */
- blt ZERO,TL, .L_N1_M4_TL1
- .L_N1_M4_TL1_END:
- KERNEL8x4x1_END
-
- .L_N1_M4_L7:
- /* if (!(L & 7)) goto L_N1_M4_L0 */
- andi TL, L, 7
- beq TL, ZERO,.L_N1_M4_L0
-
- .L_N1_M4_L71:
- vld U0, A0, 0x00
- vld U1, A0, 0x10
-
- vldrepl.d U4, B0, 0x00
- vfmadd.d D0, U0, U4, D0
- vfmadd.d D1, U1, U4, D1
-
- /* Add stride for A0, B0 */
- addi.d A0, A0, 0x20
- addi.d B0, B0, 0x08
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_N1_M4_L71
-
- .L_N1_M4_L0:
- #if defined(TRMMKERNEL)
- vfmul.d D0, D0, VALPHA
- vfmul.d D1, D1, VALPHA
- #else
- /* Load C0 */
- vld U0, C0, 0x00
- vld U1, C0, 0x10
- vfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */
- vfmadd.d D1, D1, VALPHA, U1
- #endif // #if defined(TRMMKERNEL)
-
- /* Store C0 */
- vst D0, C0, 0x00
- vst D1, C0, 0x10
-
- /* Add stride for C */
- addi.d C0, C0, 0x20
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- sub.d L, K, OFF
- #ifdef LEFT
- addi.d L, L, -4
- #else
- addi.d L, L, -1
- #endif
- slli.d T0, L, 0x05
- add.d A0, A0, T0
- slli.d T0, L, 0x03
- add.d B0, B0, T0
- #endif
-
- #ifdef LEFT
- addi.d OFF, OFF, 0x04
- #endif
- #endif // #if defined(TRMMKERNEL)
-
- /********LOOP (if(N & 1) && (M & 4) ) End************/
-
- .L_N1_M2:
- andi I, M, 2
- beq ZERO,I, .L_N1_M1
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- move B0, B
- #else
- slli.d T0, OFF, 0x04
- add.d A0, A0, T0
- slli.d T0, OFF, 0x03
- add.d B0, B, T0
- #endif
-
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- sub.d L, K, OFF
- #elif defined(LEFT)
- /* number of values in A */
- addi.d L, OFF, 2
- #else
- /* number of values in B */
- addi.d L, OFF, 1
- #endif
- #else // #if !defined(TRMMKERNEL)
- move B0, B
- move L, K /* L = bk */
- #endif
-
- /* Load 2 * 64 from A0 */
- vld U0, A0, 0x00
-
- vldrepl.d U4, B0, 0x00
- /* line 1 */
- vfmul.d D0, U0, U4
-
- /* Add stride for A0 and B0 */
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x08
- /* Reduce L */
- addi.d L, L, -1
- srai.d TL, L, 3 /* TL = (L-1) >> 3 */
- /* if (TL < 1) goto L_N1_M2_L7 */
- beq ZERO,TL, .L_N1_M2_L7
-
- vld U8, A0, 0x00
-
- addi.d TL, TL, -1
-
- vldrepl.d U12, B0, 0x00
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x08
-
- beq ZERO, TL, .L_N1_M2_TL1_END
-
- .L_N1_M2_TL1: /* TL-- */
- KERNEL8x2x1
-
- addi.d TL, TL, -1 /* TL-- */
- blt ZERO,TL, .L_N1_M2_TL1
- .L_N1_M2_TL1_END:
- KERNEL8x2x1_END
-
- .L_N1_M2_L7:
- /* if (!(L & 7)) goto L_N1_M2_L0 */
- andi TL, L, 7
- beq TL, ZERO,.L_N1_M2_L0
-
- .L_N1_M2_L71:
- vld U0, A0, 0x00
-
- vldrepl.d U4, B0, 0x00
- vfmadd.d D0, U0, U4, D0
-
- /* Add stride for A0, B0 */
- addi.d A0, A0, 0x10
- addi.d B0, B0, 0x08
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_N1_M2_L71
-
- .L_N1_M2_L0:
- #if defined(TRMMKERNEL)
- vfmul.d D0, D0, VALPHA
- #else
- /* Load C0 */
- vld U0, C0, 0x00
- vfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */
- #endif // #if defined(TRMMKERNEL)
-
- vstelm.d D0, C0, 0x00, 0x00
- vstelm.d D0, C0, 0x08, 0x01
-
- /* Add stride for C */
- addi.d C0, C0, 0x10
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- sub.d L, K, OFF
- #ifdef LEFT
- addi.d L, L, -2
- #else
- addi.d L, L, -1
- #endif
- slli.d T0, L, 0x04
- add.d A0, A0, T0
- slli.d T0, L, 0x03
- add.d B0, B0, T0
- #endif
-
- #ifdef LEFT
- addi.d OFF, OFF, 0x02
- #endif
- #endif // #if defined(TRMMKERNEL)
-
- /********LOOP (if(N & 1 ) && (M & 2) ) End************/
-
- .L_N1_M1:
- andi I, M, 1
- beq ZERO,I, .L_N1_M0
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- move B0, B
- #else
- slli.d T0, OFF, 0x03
- add.d A0, A0, T0
- add.d B0, B, T0
- #endif
-
- #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- sub.d L, K, OFF
- #elif defined(LEFT)
- /* number of values in A */
- addi.d L, OFF, 1
- #else
- /* number of values in B */
- addi.d L, OFF, 1
- #endif
- #else // #if !defined(TRMMKERNEL)
- move B0, B
- move L, K /* L = bk */
- #endif
-
- /* Load 1 * 64 from A0 */
- vldrepl.d U0, A0, 0x00
-
- vldrepl.d U4, B0, 0x00
- /* line 1 */
- vfmul.d D0, U0, U4
-
- /* Add stride for A0 and B0 */
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x08
- /* Reduce L */
- addi.d L, L, -1
- srai.d TL, L, 3 /* TL = (L-1) >> 3 */
- /* if (TL < 1) goto L_N1_M1_L7 */
- beq ZERO,TL, .L_N1_M1_L7
-
- vldrepl.d U8, A0, 0x00
-
- addi.d TL, TL, -1
-
- vldrepl.d U12, B0, 0x00
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x08
-
- beq ZERO, TL, .L_N1_M1_TL1_END
-
- .L_N1_M1_TL1: /* TL-- */
- KERNEL8x1x1
-
- addi.d TL, TL, -1 /* TL-- */
- blt ZERO,TL, .L_N1_M1_TL1
- .L_N1_M1_TL1_END:
- KERNEL8x1x1_END
-
- .L_N1_M1_L7:
- /* if (!(L & 7)) goto L_N1_M1_L0 */
- andi TL, L, 7
- beq TL, ZERO,.L_N1_M1_L0
-
- .L_N1_M1_L71:
- vldrepl.d U0, A0, 0x00
-
- vldrepl.d U4, B0, 0x00
- vfmadd.d D0, U0, U4, D0
-
- /* Add stride for A0, B0 */
- addi.d A0, A0, 0x08
- addi.d B0, B0, 0x08
-
- addi.d TL, TL, -1
- blt ZERO,TL, .L_N1_M1_L71
-
- .L_N1_M1_L0:
- #if defined(TRMMKERNEL)
- vfmul.d D0, D0, VALPHA
- #else
- /* Load C0 */
- vldrepl.d U0, C0, 0x00
- vfmadd.d D0, D0, VALPHA, U0 /* D0 = U0 + (D0 * VALPHA) */
- #endif // #if defined(TRMMKERNEL)
-
- vstelm.d D0, C0, 0x00, 0x00
-
- /* Add stride for C */
- addi.d C0, C0, 0x08
-
- #if defined(TRMMKERNEL)
- #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
- sub.d L, K, OFF
- #ifdef LEFT
- addi.d L, L, -1
- #else
- addi.d L, L, -1
- #endif
- slli.d T0, L, 0x03
- add.d A0, A0, T0
- add.d B0, B0, T0
- #endif
-
- #ifdef LEFT
- addi.d OFF, OFF, 0x01
- #endif
- #endif // #if defined(TRMMKERNEL)
-
- /********LOOP (if(N & 1 ) && (M & 1) ) End************/
-
- .L_N1_M0:
-
- /************************* Condition 3 if((N & 1) && (M >> 3)) End !!! *************************
- * dgemm_core_16x1 */
-
- .L_N0:
- /* Restore regs */
- LDARG $r23, $sp, 0
- LDARG $r24, $sp, 8
- LDARG $r25, $sp, 16
- LDARG $r26, $sp, 24
- LDARG $r27, $sp, 32
- LD $f24, $sp, 40
- LD $f25, $sp, 48
- LD $f26, $sp, 56
- LD $f27, $sp, 64
- LD $f28, $sp, 72
- LD $f29, $sp, 80
- LD $f30, $sp, 88
- LD $f31, $sp, 96
- addi.d $sp, $sp, 112
-
- jirl $r0, $r1, 0x0
-
- EPILOGUE
|