|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689 |
- /*******************************************************************************
- Copyright (c) 2015, The OpenBLAS Project
- All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- 3. Neither the name of the OpenBLAS project nor the names of
- its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *******************************************************************************/
-
- #define ASSEMBLER
- #include "common.h"
-
- /* X0 X1 X2 s0 X3 x4 x5 x6 */
- /*int CNAME(BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha0,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc )*/
-
- #define origM x0
- #define origN x1
- #define origK x2
- #define origPA x3
- #define origPB x4
- #define pC x5
- #define LDC x6
- #define temp x7
- #define counterL x8
- #define counterI x9
- #define counterJ x10
- #define pB x11
- #define pCRow0 x12
- #define pCRow1 x13
- #define pCRow2 x14
- #define pA x15
-
- #define alpha0 d2
- #define alphaV0 v2.d[0]
- #define alpha1 d3
- #define alphaV1 v3.d[0]
- #define alpha2 d6
- #define alphaV2 v6.d[0]
- #define alpha3 d7
- #define alphaV3 v7.d[0]
-
- // 00 origM
- // 01 origN
- // 02 origK
- // 03 origPA
- // 04 origPB
- // 05 pC
- // 06 origLDC -> LDC
- // 07 temp
- // 08 counterL
- // 09 counterI
- // 10 counterJ
- // 11 pB
- // 12 pCRow0
- // 13 pCRow1
- // 14 pCRow2
- // 15 pA
- // 16
- // 17
- // 18 must save
- // 19 must save
- // 20 must save
- // 21 must save
- // 22 must save
- // 23 must save
- // 24 must save
- // 25 must save
- // 26 must save
- // 27 must save
- // 28 must save
- // 29 frame
- // 30 link
- // 31 sp
-
- //v00 ALPHA -> pA00, pA01
- //v01 pA02, pA03
- //v02 ALPHA0
- //v03 ALPHA1
- //v04 pA10, pA11
- //v05 pA12, pA13
- //v06 ALPHA2
- //v07 ALPHA3
- //v08 must save pB0_0, pB0_1
- //v09 must save pB0_2, pB0_3
- //v10 must save pB0_4, pB0_5
- //v11 must save pB0_6, pB0_7
- //v12 must save pB1_0, pB1_1
- //v13 must save pB1_2, pB1_3
- //v14 must save pB1_4, pB1_5
- //v15 must save pB1_6, pB1_7
- //v16 must save C00, C01
- //v17 must save C02, C03
- //v18 C04, C05
- //v19 C06, C07
- //v20 C10, C11
- //v21 C12, C13
- //v22 C14, C15
- //v23 C16, C17
- //v24 C20, C21
- //v25 C22, C23
- //v26 C24, C25
- //v27 C26, C27
- //v28 C30, C31
- //v29 C32, C33
- //v30 C34, C35
- //v31 C36, C37
-
- /*******************************************************************************
- * Macro definitions
- *******************************************************************************/
-
- .macro INIT4x8
- fmov d16, xzr
- fmov d17, xzr
- fmov d18, xzr
- fmov d19, d16
- fmov d20, xzr
- fmov d21, d16
- fmov d22, d17
- fmov d23, d18
- fmov d24, xzr
- fmov d25, d16
- fmov d26, d17
- fmov d27, d18
- fmov d28, xzr
- fmov d29, d16
- fmov d30, d17
- fmov d31, d18
- .endm
-
- .macro KERNEL4x8_I
- ld1 {v8.2d, v9.2d}, [pB]
- add pB, pB, #32
- ld1 {v0.2d, v1.2d}, [pA]
- add pA, pA, #32
- ld1 {v10.2d, v11.2d}, [pB]
- add pB, pB, #32
-
- fmul v16.2d, v0.2d, v8.d[0]
- fmul v17.2d, v1.2d, v8.d[0]
- fmul v18.2d, v0.2d, v8.d[1]
- fmul v19.2d, v1.2d, v8.d[1]
-
- fmul v20.2d, v0.2d, v9.d[0]
- fmul v21.2d, v1.2d, v9.d[0]
- fmul v22.2d, v0.2d, v9.d[1]
- fmul v23.2d, v1.2d, v9.d[1]
-
- fmul v24.2d, v0.2d, v10.d[0]
- fmul v25.2d, v1.2d, v10.d[0]
- fmul v26.2d, v0.2d, v10.d[1]
- fmul v27.2d, v1.2d, v10.d[1]
-
- fmul v28.2d, v0.2d, v11.d[0]
- fmul v29.2d, v1.2d, v11.d[0]
- fmul v30.2d, v0.2d, v11.d[1]
- fmul v31.2d, v1.2d, v11.d[1]
-
- ld1 {v12.2d, v13.2d}, [pB]
- add pB, pB, #32
- ld1 {v4.2d, v5.2d}, [pA]
- add pA, pA, #32
- ld1 {v14.2d, v15.2d}, [pB]
- add pB, pB, #32
- .endm
-
- .macro KERNEL4x8_M1
- fmla v16.2d, v0.2d, v8.d[0]
- fmla v17.2d, v1.2d, v8.d[0]
- fmla v18.2d, v0.2d, v8.d[1]
- fmla v19.2d, v1.2d, v8.d[1]
-
- fmla v20.2d, v0.2d, v9.d[0]
- fmla v21.2d, v1.2d, v9.d[0]
- fmla v22.2d, v0.2d, v9.d[1]
- fmla v23.2d, v1.2d, v9.d[1]
-
- fmla v24.2d, v0.2d, v10.d[0]
- fmla v25.2d, v1.2d, v10.d[0]
- fmla v26.2d, v0.2d, v10.d[1]
- fmla v27.2d, v1.2d, v10.d[1]
-
- fmla v28.2d, v0.2d, v11.d[0]
- fmla v29.2d, v1.2d, v11.d[0]
- fmla v30.2d, v0.2d, v11.d[1]
- fmla v31.2d, v1.2d, v11.d[1]
-
- ld1 {v12.2d, v13.2d}, [pB] // For next round
- add pB, pB, #32
- ld1 {v4.2d, v5.2d}, [pA] // For next round
- add pA, pA, #32
- ld1 {v14.2d, v15.2d}, [pB]
- add pB, pB, #32
-
- prfm PLDL1KEEP, [pA, #512]
- .endm
-
- .macro KERNEL4x8_M2
- fmla v16.2d, v4.2d, v12.d[0]
- fmla v17.2d, v5.2d, v12.d[0]
- fmla v18.2d, v4.2d, v12.d[1]
- fmla v19.2d, v5.2d, v12.d[1]
-
- fmla v20.2d, v4.2d, v13.d[0]
- fmla v21.2d, v5.2d, v13.d[0]
- fmla v22.2d, v4.2d, v13.d[1]
- fmla v23.2d, v5.2d, v13.d[1]
-
- fmla v24.2d, v4.2d, v14.d[0]
- fmla v25.2d, v5.2d, v14.d[0]
- fmla v26.2d, v4.2d, v14.d[1]
- fmla v27.2d, v5.2d, v14.d[1]
-
- fmla v28.2d, v4.2d, v15.d[0]
- fmla v29.2d, v5.2d, v15.d[0]
- fmla v30.2d, v4.2d, v15.d[1]
- fmla v31.2d, v5.2d, v15.d[1]
-
- ld1 {v8.2d, v9.2d}, [pB] // For next round
- add pB, pB, #32
- ld1 {v0.2d, v1.2d}, [pA] // For next round
- add pA, pA, #32
- ld1 {v10.2d, v11.2d}, [pB]
- add pB, pB, #32
-
- prfm PLDL1KEEP, [pB, #512]
- .endm
-
- .macro KERNEL4x8_E
- fmla v16.2d, v4.2d, v12.d[0]
- fmla v17.2d, v5.2d, v12.d[0]
- fmla v18.2d, v4.2d, v12.d[1]
- fmla v19.2d, v5.2d, v12.d[1]
-
- fmla v20.2d, v4.2d, v13.d[0]
- fmla v21.2d, v5.2d, v13.d[0]
- fmla v22.2d, v4.2d, v13.d[1]
- fmla v23.2d, v5.2d, v13.d[1]
-
- fmla v24.2d, v4.2d, v14.d[0]
- fmla v25.2d, v5.2d, v14.d[0]
- fmla v26.2d, v4.2d, v14.d[1]
- fmla v27.2d, v5.2d, v14.d[1]
-
- fmla v28.2d, v4.2d, v15.d[0]
- fmla v29.2d, v5.2d, v15.d[0]
- fmla v30.2d, v4.2d, v15.d[1]
- fmla v31.2d, v5.2d, v15.d[1]
- .endm
-
- .macro KERNEL4x8_SUB
- ld1 {v8.2d, v9.2d}, [pB] // For next round
- add pB, pB, #32
- ld1 {v0.2d, v1.2d}, [pA] // For next round
- add pA, pA, #32
- ld1 {v10.2d, v11.2d}, [pB]
- add pB, pB, #32
-
- fmla v16.2d, v0.2d, v8.d[0]
- fmla v17.2d, v1.2d, v8.d[0]
- fmla v18.2d, v0.2d, v8.d[1]
- fmla v19.2d, v1.2d, v8.d[1]
-
- fmla v20.2d, v0.2d, v9.d[0]
- fmla v21.2d, v1.2d, v9.d[0]
- fmla v22.2d, v0.2d, v9.d[1]
- fmla v23.2d, v1.2d, v9.d[1]
-
- fmla v24.2d, v0.2d, v10.d[0]
- fmla v25.2d, v1.2d, v10.d[0]
- fmla v26.2d, v0.2d, v10.d[1]
- fmla v27.2d, v1.2d, v10.d[1]
-
- fmla v28.2d, v0.2d, v11.d[0]
- fmla v29.2d, v1.2d, v11.d[0]
- fmla v30.2d, v0.2d, v11.d[1]
- fmla v31.2d, v1.2d, v11.d[1]
- .endm
-
- .macro SAVE4x8
- add pCRow1, pCRow0, LDC
-
- ld1 {v8.2d, v9.2d}, [pCRow0]
- fmla v8.2d, v16.2d, alphaV0
- fmla v9.2d, v17.2d, alphaV1
- st1 {v8.2d, v9.2d}, [pCRow0]
-
- add pCRow2, pCRow1, LDC
-
- ld1 {v10.2d, v11.2d}, [pCRow1]
- fmla v10.2d, v18.2d, alphaV2
- fmla v11.2d, v19.2d, alphaV3
- st1 {v10.2d, v11.2d}, [pCRow1]
-
- add pCRow1, pCRow2, LDC
-
- ld1 {v12.2d, v13.2d}, [pCRow2]
- fmla v12.2d, v20.2d, alphaV0
- fmla v13.2d, v21.2d, alphaV1
- st1 {v12.2d, v13.2d}, [pCRow2]
-
- add pCRow2, pCRow1, LDC
-
- ld1 {v14.2d, v15.2d}, [pCRow1]
- fmla v14.2d, v22.2d, alphaV2
- fmla v15.2d, v23.2d, alphaV3
- st1 {v14.2d, v15.2d}, [pCRow1]
-
- add pCRow1, pCRow2, LDC
-
- ld1 {v8.2d, v9.2d}, [pCRow2]
- fmla v8.2d, v24.2d, alphaV0
- fmla v9.2d, v25.2d, alphaV1
- st1 {v8.2d, v9.2d}, [pCRow2]
-
- add pCRow2, pCRow1, LDC
-
- ld1 {v10.2d, v11.2d}, [pCRow1]
- fmla v10.2d, v26.2d, alphaV2
- fmla v11.2d, v27.2d, alphaV3
- st1 {v10.2d, v11.2d}, [pCRow1]
-
- add pCRow1, pCRow2, LDC
-
- ld1 {v12.2d, v13.2d}, [pCRow2]
- fmla v12.2d, v28.2d, alphaV0
- fmla v13.2d, v29.2d, alphaV1
- st1 {v12.2d, v13.2d}, [pCRow2]
-
- ld1 {v14.2d, v15.2d}, [pCRow1]
- fmla v14.2d, v30.2d, alphaV2
- fmla v15.2d, v31.2d, alphaV3
- st1 {v14.2d, v15.2d}, [pCRow1]
-
- add pCRow0, pCRow0, #32
- .endm
-
- /******************************************************************************/
-
- .macro INIT2x8
- fmov d16, xzr
- fmov d18, xzr
- fmov d20, xzr
- fmov d22, d16
- fmov d24, xzr
- fmov d26, d16
- fmov d28, xzr
- fmov d30, d16
- .endm
-
- .macro KERNEL2x8_SUB
- ld1 {v8.2d, v9.2d}, [pB]
- add pB, pB, #32
- ld1 {v0.2d}, [pA]
- add pA, pA, #16
- ld1 {v10.2d, v11.2d}, [pB]
- add pB, pB, #32
-
- fmla v16.2d, v0.2d, v8.d[0]
- fmla v18.2d, v0.2d, v8.d[1]
-
- fmla v20.2d, v0.2d, v9.d[0]
- fmla v22.2d, v0.2d, v9.d[1]
-
- fmla v24.2d, v0.2d, v10.d[0]
- fmla v26.2d, v0.2d, v10.d[1]
-
- fmla v28.2d, v0.2d, v11.d[0]
- fmla v30.2d, v0.2d, v11.d[1]
- .endm
-
- .macro SAVE2x8
- add pCRow1, pCRow0, LDC
-
- ld1 {v8.2d}, [pCRow0]
- fmla v8.2d, v16.2d, alphaV0
- st1 {v8.2d}, [pCRow0]
-
- add pCRow2, pCRow1, LDC
-
- ld1 {v10.2d}, [pCRow1]
- fmla v10.2d, v18.2d, alphaV2
- st1 {v10.2d}, [pCRow1]
-
- add pCRow1, pCRow2, LDC
-
- ld1 {v12.2d}, [pCRow2]
- fmla v12.2d, v20.2d, alphaV0
- st1 {v12.2d}, [pCRow2]
-
- add pCRow2, pCRow1, LDC
-
- ld1 {v14.2d}, [pCRow1]
- fmla v14.2d, v22.2d, alphaV2
- st1 {v14.2d}, [pCRow1]
-
- add pCRow1, pCRow2, LDC
-
- ld1 {v8.2d}, [pCRow2]
- fmla v8.2d, v24.2d, alphaV0
- st1 {v8.2d}, [pCRow2]
-
- add pCRow2, pCRow1, LDC
-
- ld1 {v10.2d}, [pCRow1]
- fmla v10.2d, v26.2d, alphaV2
- st1 {v10.2d}, [pCRow1]
-
- add pCRow1, pCRow2, LDC
-
- ld1 {v12.2d}, [pCRow2]
- fmla v12.2d, v28.2d, alphaV0
- st1 {v12.2d}, [pCRow2]
-
- add pCRow2, pCRow1, LDC
-
- ld1 {v14.2d}, [pCRow1]
- fmla v14.2d, v30.2d, alphaV2
- st1 {v14.2d}, [pCRow1]
-
- add pCRow0, pCRow0, #16
- .endm
-
- /******************************************************************************/
-
- .macro INIT1x8
- fmov d16, xzr
- fmov d20, xzr
- fmov d24, xzr
- fmov d28, xzr
- .endm
-
- .macro KERNEL1x8_SUB
- ld1 {v8.2d, v9.2d}, [pB]
- add pB, pB, #32
- ldr d0, [pA]
- add pA, pA, #8
- ld1 {v10.2d, v11.2d}, [pB]
- add pB, pB, #32
-
- fmla v16.2d, v8.2d, v0.d[0]
- fmla v20.2d, v9.2d, v0.d[0]
- fmla v24.2d, v10.2d, v0.d[0]
- fmla v28.2d, v11.2d, v0.d[0]
- .endm
-
- .macro SAVE1x8
- add pCRow1, pCRow0, LDC
-
- ld1 {v8.d}[0], [pCRow0]
- ld1 {v8.d}[1], [pCRow1]
- fmla v8.2d, v16.2d, alphaV0
- st1 {v8.d}[0], [pCRow0]
- st1 {v8.d}[1], [pCRow1]
-
- add pCRow2, pCRow1, LDC
- add pCRow1, pCRow2, LDC
-
- ld1 {v10.d}[0], [pCRow2]
- ld1 {v10.d}[1], [pCRow1]
- fmla v10.2d, v20.2d, alphaV1
- st1 {v10.d}[0], [pCRow2]
- st1 {v10.d}[1], [pCRow1]
-
- add pCRow2, pCRow1, LDC
- add pCRow1, pCRow2, LDC
-
- ld1 {v12.d}[0], [pCRow2]
- ld1 {v12.d}[1], [pCRow1]
- fmla v12.2d, v24.2d, alphaV2
- st1 {v12.d}[0], [pCRow2]
- st1 {v12.d}[1], [pCRow1]
-
- add pCRow2, pCRow1, LDC
- add pCRow1, pCRow2, LDC
-
- ld1 {v14.d}[0], [pCRow2]
- ld1 {v14.d}[1], [pCRow1]
- fmla v14.2d, v28.2d, alphaV3
- st1 {v14.d}[0], [pCRow2]
- st1 {v14.d}[1], [pCRow1]
-
- add pCRow0, pCRow0, #8
- .endm
-
- /******************************************************************************/
-
- .macro INIT4x4
- fmov d16, xzr
- fmov d17, d16
- fmov d20, d17
- fmov d21, d16
- fmov d24, d17
- fmov d25, d16
- fmov d28, d17
- fmov d29, d16
- .endm
-
- .macro KERNEL4x4_I
- ld1 {v8.2d, v9.2d}, [pB]
- add pB, pB, #32
- ld1 {v0.2d, v1.2d}, [pA]
- add pA, pA, #32
-
- fmul v16.2d, v0.2d, v8.d[0]
- fmul v29.2d, v1.2d, v9.d[1]
-
- fmul v20.2d, v0.2d, v8.d[1]
- fmul v25.2d, v1.2d, v9.d[0]
-
- fmul v24.2d, v0.2d, v9.d[0]
- fmul v21.2d, v1.2d, v8.d[1]
-
- fmul v28.2d, v0.2d, v9.d[1]
- fmul v17.2d, v1.2d, v8.d[0]
-
- ld1 {v12.2d, v13.2d}, [pB]
- add pB, pB, #32
- ld1 {v4.2d, v5.2d}, [pA]
- add pA, pA, #32
- .endm
-
- .macro KERNEL4x4_M1
- fmla v16.2d, v0.2d, v8.d[0]
- fmla v29.2d, v1.2d, v9.d[1]
-
- ld1 {v12.2d, v13.2d}, [pB] // For next round
- add pB, pB, #32
-
- fmla v20.2d, v0.2d, v8.d[1]
- fmla v25.2d, v1.2d, v9.d[0]
-
- ld1 {v4.2d, v5.2d}, [pA] // For next round
- add pA, pA, #32
-
- fmla v24.2d, v0.2d, v9.d[0]
- fmla v21.2d, v1.2d, v8.d[1]
-
- prfm PLDL1KEEP, [pA, #512]
-
- fmla v28.2d, v0.2d, v9.d[1]
- fmla v17.2d, v1.2d, v8.d[0]
- .endm
-
- .macro KERNEL4x4_M2
- fmla v16.2d, v4.2d, v12.d[0]
- fmla v29.2d, v5.2d, v13.d[1]
-
- ld1 {v8.2d, v9.2d}, [pB] // For next round
- add pB, pB, #32
-
- fmla v20.2d, v4.2d, v12.d[1]
- fmla v25.2d, v5.2d, v13.d[0]
-
- ld1 {v0.2d, v1.2d}, [pA] // For next round
- add pA, pA, #32
-
- fmla v24.2d, v4.2d, v13.d[0]
- fmla v21.2d, v5.2d, v12.d[1]
-
- prfm PLDL1KEEP, [pB, #512]
-
- fmla v28.2d, v4.2d, v13.d[1]
- fmla v17.2d, v5.2d, v12.d[0]
- .endm
-
- .macro KERNEL4x4_E
- fmla v16.2d, v4.2d, v12.d[0]
- fmla v29.2d, v5.2d, v13.d[1]
-
- fmla v20.2d, v4.2d, v12.d[1]
- fmla v25.2d, v5.2d, v13.d[0]
-
- fmla v24.2d, v4.2d, v13.d[0]
- fmla v21.2d, v5.2d, v12.d[1]
-
- fmla v28.2d, v4.2d, v13.d[1]
- fmla v17.2d, v5.2d, v12.d[0]
- .endm
-
- .macro KERNEL4x4_SUB
- ld1 {v8.2d, v9.2d}, [pB]
- add pB, pB, #32
- ld1 {v0.2d, v1.2d}, [pA]
- add pA, pA, #32
-
- fmla v16.2d, v0.2d, v8.d[0]
- fmla v29.2d, v1.2d, v9.d[1]
-
- fmla v20.2d, v0.2d, v8.d[1]
- fmla v25.2d, v1.2d, v9.d[0]
-
- fmla v24.2d, v0.2d, v9.d[0]
- fmla v21.2d, v1.2d, v8.d[1]
-
- fmla v28.2d, v0.2d, v9.d[1]
- fmla v17.2d, v1.2d, v8.d[0]
- .endm
-
- .macro SAVE4x4
- ld1 {v8.2d, v9.2d}, [pCRow0]
- fmla v8.2d, v16.2d, alphaV0
- fmla v9.2d, v17.2d, alphaV1
- st1 {v8.2d, v9.2d}, [pCRow0]
-
- add pCRow1, pCRow0, LDC
-
- ld1 {v12.2d, v13.2d}, [pCRow1]
- fmla v12.2d, v20.2d, alphaV2
- fmla v13.2d, v21.2d, alphaV3
- st1 {v12.2d, v13.2d}, [pCRow1]
-
- add pCRow2, pCRow1, LDC
-
- ld1 {v8.2d, v9.2d}, [pCRow2]
- fmla v8.2d, v24.2d, alphaV0
- fmla v9.2d, v25.2d, alphaV1
- st1 {v8.2d, v9.2d}, [pCRow2]
-
- add pCRow1, pCRow2, LDC
-
- ld1 {v12.2d, v13.2d}, [pCRow1]
- fmla v12.2d, v28.2d, alphaV2
- fmla v13.2d, v29.2d, alphaV3
- st1 {v12.2d, v13.2d}, [pCRow1]
-
- add pCRow0, pCRow0, #32
- .endm
-
- /******************************************************************************/
-
- .macro INIT2x4
- fmov d16, xzr
- fmov d20, d16
- fmov d24, d20
- fmov d28, d16
- .endm
-
- .macro KERNEL2x4_SUB
- ld1 {v8.2d, v9.2d}, [pB]
- add pB, pB, #32
- ld1 {v0.2d}, [pA]
- add pA, pA, #16
-
- fmla v16.2d, v0.2d, v8.d[0]
- fmla v20.2d, v0.2d, v8.d[1]
- fmla v24.2d, v0.2d, v9.d[0]
- fmla v28.2d, v0.2d, v9.d[1]
- .endm
-
- .macro SAVE2x4
- ld1 {v8.2d}, [pCRow0]
- fmla v8.2d, v16.2d, alphaV0
- st1 {v8.2d}, [pCRow0]
-
- add pCRow1, pCRow0, LDC
-
- ld1 {v12.2d}, [pCRow1]
- fmla v12.2d, v20.2d, alphaV1
- st1 {v12.2d}, [pCRow1]
-
- add pCRow2, pCRow1, LDC
-
- ld1 {v8.2d}, [pCRow2]
- fmla v8.2d, v24.2d, alphaV2
- st1 {v8.2d}, [pCRow2]
-
- add pCRow1, pCRow2, LDC
-
- ld1 {v12.2d}, [pCRow1]
- fmla v12.2d, v28.2d, alphaV3
- st1 {v12.2d}, [pCRow1]
-
- add pCRow0, pCRow0, #16
- .endm
-
- /******************************************************************************/
-
- .macro INIT1x4
- fmov d16, xzr
- fmov d20, d16
- .endm
-
- .macro KERNEL1x4_SUB
- ldr d0, [pA]
- add pA, pA, #8
-
- ld1 {v8.2d, v9.2d}, [pB]
- add pB, pB, #32
-
- fmla v16.2d, v8.2d, v0.d[0]
- fmla v20.2d, v9.2d, v0.d[0]
- .endm
-
- .macro SAVE1x4
- add pCRow1, pCRow0, LDC
-
- ld1 {v8.d}[0], [pCRow0]
- ld1 {v8.d}[1], [pCRow1]
- fmla v8.2d, v16.2d, alphaV0
- st1 {v8.d}[0], [pCRow0]
- st1 {v8.d}[1], [pCRow1]
-
- add pCRow2, pCRow1, LDC
- add pCRow1, pCRow2, LDC
-
- ld1 {v12.d}[0], [pCRow2]
- ld1 {v12.d}[1], [pCRow1]
- fmla v12.2d, v20.2d, alphaV1
- st1 {v12.d}[0], [pCRow2]
- st1 {v12.d}[1], [pCRow1]
-
- add pCRow0, pCRow0, #8
- .endm
-
- /******************************************************************************/
-
- .macro INIT4x2
- fmov d16, xzr
- fmov d17, d16
- fmov d20, d17
- fmov d21, d16
- .endm
-
- .macro KERNEL4x2_SUB
- ld1 {v8.2d}, [pB]
- add pB, pB, #16
- ld1 {v0.2d, v1.2d}, [pA]
- add pA, pA, #32
-
- fmla v16.2d, v0.2d, v8.d[0]
- fmla v17.2d, v1.2d, v8.d[0]
- fmla v20.2d, v0.2d, v8.d[1]
- fmla v21.2d, v1.2d, v8.d[1]
- .endm
-
- .macro SAVE4x2
- ld1 {v8.2d, v9.2d}, [pCRow0]
- fmla v8.2d, v16.2d, alphaV0
- fmla v9.2d, v17.2d, alphaV1
- st1 {v8.2d, v9.2d}, [pCRow0]
-
- add pCRow1, pCRow0, LDC
-
- ld1 {v12.2d, v13.2d}, [pCRow1]
- fmla v12.2d, v20.2d, alphaV2
- fmla v13.2d, v21.2d, alphaV3
- st1 {v12.2d, v13.2d}, [pCRow1]
-
- add pCRow0, pCRow0, #32
- .endm
-
- /******************************************************************************/
-
- .macro INIT2x2
- fmov d16, xzr
- fmov d20, d16
- .endm
-
- .macro KERNEL2x2_SUB
- ld1 {v8.2d}, [pB]
- add pB, pB, #16
-
- ld1 {v0.2d}, [pA]
- add pA, pA, #16
-
- fmla v16.2d, v0.2d, v8.d[0]
- fmla v20.2d, v0.2d, v8.d[1]
- .endm
-
- .macro SAVE2x2
- ld1 {v8.2d}, [pCRow0]
- fmla v8.2d, v16.2d, alphaV0
- st1 {v8.2d}, [pCRow0]
-
- add pCRow1 , pCRow0, LDC
-
- ld1 {v12.2d}, [pCRow1]
- fmla v12.2d, v20.2d, alphaV1
- st1 {v12.2d}, [pCRow1]
-
- add pCRow0, pCRow0, #16
- .endm
-
- /******************************************************************************/
-
- .macro INIT1x2
- fmov d16, xzr
- .endm
-
- .macro KERNEL1x2_SUB
- ld1 {v8.2d} , [pB]
- add pB , pB, #16
-
- ldr d0 , [pA]
- add pA, pA, #8
-
- fmla v16.2d, v8.2d, v0.d[0]
- .endm
-
- .macro SAVE1x2
- add pCRow1 , pCRow0, LDC
-
- ld1 {v8.d}[0], [pCRow0]
- ld1 {v8.d}[1], [pCRow1]
- fmla v8.2d, v16.2d, alphaV0
- st1 {v8.d}[0], [pCRow0]
- st1 {v8.d}[1], [pCRow1]
-
- add pCRow0, pCRow0, #8
- .endm
-
- /******************************************************************************/
-
- .macro INIT4x1
- fmov d16, xzr
- fmov d17, d16
- .endm
-
- .macro KERNEL4x1_SUB
- ldr d8, [pB]
- add pB , pB, #8
-
- ld1 {v0.2d, v1.2d}, [pA]
- add pA , pA, #32
-
- fmla v16.2d, v0.2d, v8.d[0]
- fmla v17.2d, v1.2d, v8.d[0]
- .endm
-
- .macro SAVE4x1
- ld1 {v8.2d, v9.2d}, [pCRow0]
- fmla v8.2d, v16.2d, alphaV0
- fmla v9.2d, v17.2d, alphaV1
- st1 {v8.2d, v9.2d}, [pCRow0]
-
- add pCRow0, pCRow0, #32
- .endm
-
-
-
-
- /******************************************************************************/
-
- .macro INIT2x1
- fmov d16, xzr
- .endm
-
- .macro KERNEL2x1_SUB
- ldr d8, [pB]
- add pB , pB, #8
-
- ld1 {v0.2d}, [pA]
- add pA , pA, #16
-
- fmla v16.2d, v0.2d, v8.d[0]
- .endm
-
- .macro SAVE2x1
- ld1 {v8.2d}, [pCRow0]
- fmla v8.2d, v16.2d, alphaV0
- st1 {v8.2d}, [pCRow0]
-
- add pCRow0, pCRow0, #16
- .endm
-
- /******************************************************************************/
-
- .macro INIT1x1
- fmov d16, xzr
- .endm
-
- .macro KERNEL1x1_SUB
- ldr d8, [pB]
- add pB , pB, #8
-
- ldr d0, [pA]
- add pA , pA, #8
-
- fmadd d16, d0, d8, d16
- .endm
-
- .macro SAVE1x1
- ldr d8, [pCRow0]
- fmadd d8, d16, alpha0, d8
- str d8, [pCRow0]
-
- add pCRow0, pCRow0, #8
- .endm
-
- /*******************************************************************************
- * End of macro definitions
- *******************************************************************************/
-
- PROLOGUE
-
- .align 5
- add sp, sp, #-(11 * 16)
- stp d8, d9, [sp, #(0 * 16)]
- stp d10, d11, [sp, #(1 * 16)]
- stp d12, d13, [sp, #(2 * 16)]
- stp d14, d15, [sp, #(3 * 16)]
- stp d16, d17, [sp, #(4 * 16)]
- stp x18, x19, [sp, #(5 * 16)]
- stp x20, x21, [sp, #(6 * 16)]
- stp x22, x23, [sp, #(7 * 16)]
- stp x24, x25, [sp, #(8 * 16)]
- stp x26, x27, [sp, #(9 * 16)]
- str x28, [sp, #(10 * 16)]
-
- fmov alpha0, d0
- fmov alpha1, d0
- fmov alpha2, d0
- fmov alpha3, d0
-
- lsl LDC, LDC, #3 // ldc = ldc * 8
-
- mov pB, origPB
-
- mov counterJ, origN
- asr counterJ, counterJ, #3 // J = J / 8
- cmp counterJ, #0
- ble dgemm_kernel_L4_BEGIN
-
- /******************************************************************************/
-
- dgemm_kernel_L8_BEGIN:
-
- mov pCRow0, pC // pCRow0 = C
- add pC, pC, LDC, lsl #3
-
- mov pA, origPA // pA = start of A array
-
- dgemm_kernel_L8_M4_BEGIN:
-
- mov counterI, origM
- asr counterI, counterI, #2 // counterI = counterI / 4
- cmp counterI, #0
- ble dgemm_kernel_L8_M2_BEGIN
-
- dgemm_kernel_L8_M4_20:
-
- mov pB, origPB
-
- asr counterL , origK, #1 // L = K / 2
- cmp counterL , #2 // is there at least 4 to do?
- blt dgemm_kernel_L8_M4_32
-
- KERNEL4x8_I // do one in the K
- KERNEL4x8_M2 // do another in the K
-
- subs counterL, counterL, #2
- ble dgemm_kernel_L8_M4_22a
- .align 5
-
- dgemm_kernel_L8_M4_22:
-
- KERNEL4x8_M1
- KERNEL4x8_M2
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L8_M4_22
-
-
- dgemm_kernel_L8_M4_22a:
-
- KERNEL4x8_M1
- KERNEL4x8_E
-
- b dgemm_kernel_L8_M4_44
-
- dgemm_kernel_L8_M4_32:
-
- tst counterL, #1
- ble dgemm_kernel_L8_M4_40
-
- KERNEL4x8_I
-
- KERNEL4x8_E
-
- b dgemm_kernel_L8_M4_44
-
-
- dgemm_kernel_L8_M4_40:
-
- INIT4x8
-
- dgemm_kernel_L8_M4_44:
-
- ands counterL , origK, #1
- ble dgemm_kernel_L8_M4_100
-
- dgemm_kernel_L8_M4_46:
-
- KERNEL4x8_SUB
-
- dgemm_kernel_L8_M4_100:
-
- SAVE4x8
-
- dgemm_kernel_L8_M4_END:
- subs counterI, counterI, #1
- bne dgemm_kernel_L8_M4_20
-
- dgemm_kernel_L8_M2_BEGIN:
-
- mov counterI, origM
- tst counterI , #3
- ble dgemm_kernel_L8_END
-
- tst counterI, #2 // counterI = counterI / 2
- ble dgemm_kernel_L8_M1_BEGIN
-
- dgemm_kernel_L8_M2_20:
-
- INIT2x8
-
- mov pB, origPB
-
- asr counterL , origK, #3 // counterL = counterL / 8
- cmp counterL , #0
- ble dgemm_kernel_L8_M2_40
-
- dgemm_kernel_L8_M2_22:
-
- KERNEL2x8_SUB
- KERNEL2x8_SUB
- KERNEL2x8_SUB
- KERNEL2x8_SUB
-
- KERNEL2x8_SUB
- KERNEL2x8_SUB
- KERNEL2x8_SUB
- KERNEL2x8_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L8_M2_22
-
-
- dgemm_kernel_L8_M2_40:
-
- ands counterL , origK, #7 // counterL = counterL % 8
- ble dgemm_kernel_L8_M2_100
-
- dgemm_kernel_L8_M2_42:
-
- KERNEL2x8_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L8_M2_42
-
- dgemm_kernel_L8_M2_100:
-
- SAVE2x8
-
- dgemm_kernel_L8_M2_END:
-
-
- dgemm_kernel_L8_M1_BEGIN:
-
- tst counterI, #1 // counterI = counterI % 2
- ble dgemm_kernel_L8_END
-
- dgemm_kernel_L8_M1_20:
-
- INIT1x8
-
- mov pB, origPB
-
- asr counterL , origK, #3 // counterL = counterL / 8
- cmp counterL , #0
- ble dgemm_kernel_L8_M1_40
-
- dgemm_kernel_L8_M1_22:
- KERNEL1x8_SUB
- KERNEL1x8_SUB
- KERNEL1x8_SUB
- KERNEL1x8_SUB
-
- KERNEL1x8_SUB
- KERNEL1x8_SUB
- KERNEL1x8_SUB
- KERNEL1x8_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L8_M1_22
-
-
- dgemm_kernel_L8_M1_40:
-
- ands counterL , origK, #7 // counterL = counterL % 8
- ble dgemm_kernel_L8_M1_100
-
- dgemm_kernel_L8_M1_42:
-
- KERNEL1x8_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L8_M1_42
-
- dgemm_kernel_L8_M1_100:
-
- SAVE1x8
-
- dgemm_kernel_L8_END:
-
- lsl temp, origK, #6
- add origPB, origPB, temp // B = B + K * 8 * 8
-
- subs counterJ, counterJ , #1 // j--
- bgt dgemm_kernel_L8_BEGIN
-
-
- /******************************************************************************/
-
- dgemm_kernel_L4_BEGIN:
-
- mov counterJ , origN
- tst counterJ , #7
- ble dgemm_kernel_L999
-
- tst counterJ , #4
- ble dgemm_kernel_L2_BEGIN
-
- mov pCRow0, pC // pCRow0 = C
- add pC, pC, LDC, lsl #2
-
- mov pA, origPA // pA = start of A array
-
- dgemm_kernel_L4_M4_BEGIN:
-
- mov counterI, origM
- asr counterI, counterI, #2 // counterI = counterI / 4
- cmp counterI, #0
- ble dgemm_kernel_L4_M2_BEGIN
-
- dgemm_kernel_L4_M4_20:
-
- mov pB, origPB
-
- asr counterL , origK, #1 // L = K / 2
- cmp counterL , #2 // is there at least 4 to do?
- blt dgemm_kernel_L4_M4_32
-
- KERNEL4x4_I // do one in the K
- KERNEL4x4_M2 // do another in the K
-
- subs counterL, counterL, #2
- ble dgemm_kernel_L4_M4_22a
- .align 5
-
- dgemm_kernel_L4_M4_22:
-
- KERNEL4x4_M1
- KERNEL4x4_M2
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L4_M4_22
-
-
- dgemm_kernel_L4_M4_22a:
-
- KERNEL4x4_M1
- KERNEL4x4_E
-
- b dgemm_kernel_L4_M4_44
-
- dgemm_kernel_L4_M4_32:
-
- tst counterL, #1
- ble dgemm_kernel_L4_M4_40
-
- KERNEL4x4_I
-
- KERNEL4x4_E
-
- b dgemm_kernel_L4_M4_44
-
-
- dgemm_kernel_L4_M4_40:
-
- INIT4x4
-
- dgemm_kernel_L4_M4_44:
-
- ands counterL , origK, #1
- ble dgemm_kernel_L4_M4_100
-
- dgemm_kernel_L4_M4_46:
-
- KERNEL4x4_SUB
-
- dgemm_kernel_L4_M4_100:
-
- SAVE4x4
-
- dgemm_kernel_L4_M4_END:
- subs counterI, counterI, #1
- bne dgemm_kernel_L4_M4_20
-
- dgemm_kernel_L4_M2_BEGIN:
-
- mov counterI, origM
- tst counterI , #3
- ble dgemm_kernel_L4_END
-
- tst counterI, #2 // counterI = counterI / 2
- ble dgemm_kernel_L4_M1_BEGIN
-
- dgemm_kernel_L4_M2_20:
-
- INIT2x4
-
- mov pB, origPB
-
- asr counterL , origK, #3 // counterL = counterL / 8
- cmp counterL , #0
- ble dgemm_kernel_L4_M2_40
-
- dgemm_kernel_L4_M2_22:
-
- KERNEL2x4_SUB
- KERNEL2x4_SUB
- KERNEL2x4_SUB
- KERNEL2x4_SUB
-
- KERNEL2x4_SUB
- KERNEL2x4_SUB
- KERNEL2x4_SUB
- KERNEL2x4_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L4_M2_22
-
-
- dgemm_kernel_L4_M2_40:
-
- ands counterL , origK, #7 // counterL = counterL % 8
- ble dgemm_kernel_L4_M2_100
-
- dgemm_kernel_L4_M2_42:
-
- KERNEL2x4_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L4_M2_42
-
- dgemm_kernel_L4_M2_100:
-
- SAVE2x4
-
- dgemm_kernel_L4_M2_END:
-
-
- dgemm_kernel_L4_M1_BEGIN:
-
- tst counterI, #1 // counterI = counterI % 2
- ble dgemm_kernel_L4_END
-
- dgemm_kernel_L4_M1_20:
-
- INIT1x4
-
- mov pB, origPB
-
- asr counterL , origK, #3 // counterL = counterL / 8
- cmp counterL , #0
- ble dgemm_kernel_L4_M1_40
-
- dgemm_kernel_L4_M1_22:
- KERNEL1x4_SUB
- KERNEL1x4_SUB
- KERNEL1x4_SUB
- KERNEL1x4_SUB
-
- KERNEL1x4_SUB
- KERNEL1x4_SUB
- KERNEL1x4_SUB
- KERNEL1x4_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L4_M1_22
-
-
- dgemm_kernel_L4_M1_40:
-
- ands counterL , origK, #7 // counterL = counterL % 8
- ble dgemm_kernel_L4_M1_100
-
- dgemm_kernel_L4_M1_42:
-
- KERNEL1x4_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L4_M1_42
-
- dgemm_kernel_L4_M1_100:
-
- SAVE1x4
-
- dgemm_kernel_L4_END:
-
- lsl temp, origK, #5
- add origPB, origPB, temp // B = B + K * 4 * 8
-
- /******************************************************************************/
-
- dgemm_kernel_L2_BEGIN: // less than 2 left in N direction
-
- mov counterJ , origN
- tst counterJ , #3
- ble dgemm_kernel_L999 // error, N was less than 4?
-
- tst counterJ , #2
- ble dgemm_kernel_L1_BEGIN
-
- mov pCRow0, pC // pCRow0 = pC
-
- add pC,pC,LDC, lsl #1
-
- mov pA, origPA // pA = A
-
-
- dgemm_kernel_L2_M4_BEGIN:
-
- mov counterI, origM
- asr counterI, counterI, #2 // counterI = counterI / 4
- cmp counterI,#0
- ble dgemm_kernel_L2_M2_BEGIN
-
- dgemm_kernel_L2_M4_20:
-
- INIT4x2
-
- mov pB, origPB
-
- asr counterL , origK, #3 // counterL = counterL / 8
- cmp counterL,#0
- ble dgemm_kernel_L2_M4_40
- .align 5
-
- dgemm_kernel_L2_M4_22:
- KERNEL4x2_SUB
- KERNEL4x2_SUB
- KERNEL4x2_SUB
- KERNEL4x2_SUB
-
- KERNEL4x2_SUB
- KERNEL4x2_SUB
- KERNEL4x2_SUB
- KERNEL4x2_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L2_M4_22
-
-
- dgemm_kernel_L2_M4_40:
-
- ands counterL , origK, #7 // counterL = counterL % 8
- ble dgemm_kernel_L2_M4_100
-
- dgemm_kernel_L2_M4_42:
-
- KERNEL4x2_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L2_M4_42
-
- dgemm_kernel_L2_M4_100:
-
- SAVE4x2
-
- dgemm_kernel_L2_M4_END:
-
- subs counterI, counterI, #1
- bgt dgemm_kernel_L2_M4_20
-
-
- dgemm_kernel_L2_M2_BEGIN:
-
- mov counterI, origM
- tst counterI , #3
- ble dgemm_kernel_L2_END
-
- tst counterI, #2 // counterI = counterI / 2
- ble dgemm_kernel_L2_M1_BEGIN
-
- dgemm_kernel_L2_M2_20:
-
- INIT2x2
-
- mov pB, origPB
-
- asr counterL , origK, #3 // counterL = counterL / 8
- cmp counterL,#0
- ble dgemm_kernel_L2_M2_40
-
- dgemm_kernel_L2_M2_22:
-
- KERNEL2x2_SUB
- KERNEL2x2_SUB
- KERNEL2x2_SUB
- KERNEL2x2_SUB
-
- KERNEL2x2_SUB
- KERNEL2x2_SUB
- KERNEL2x2_SUB
- KERNEL2x2_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L2_M2_22
-
-
- dgemm_kernel_L2_M2_40:
-
- ands counterL , origK, #7 // counterL = counterL % 8
- ble dgemm_kernel_L2_M2_100
-
- dgemm_kernel_L2_M2_42:
-
- KERNEL2x2_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L2_M2_42
-
- dgemm_kernel_L2_M2_100:
-
- SAVE2x2
-
- dgemm_kernel_L2_M2_END:
-
-
- dgemm_kernel_L2_M1_BEGIN:
-
- tst counterI, #1 // counterI = counterI % 2
- ble dgemm_kernel_L2_END
-
- dgemm_kernel_L2_M1_20:
-
- INIT1x2
-
- mov pB, origPB
-
- asr counterL , origK, #3 // counterL = counterL / 8
- cmp counterL, #0
- ble dgemm_kernel_L2_M1_40
-
- dgemm_kernel_L2_M1_22:
- KERNEL1x2_SUB
- KERNEL1x2_SUB
- KERNEL1x2_SUB
- KERNEL1x2_SUB
-
- KERNEL1x2_SUB
- KERNEL1x2_SUB
- KERNEL1x2_SUB
- KERNEL1x2_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L2_M1_22
-
-
- dgemm_kernel_L2_M1_40:
-
- ands counterL , origK, #7 // counterL = counterL % 8
- ble dgemm_kernel_L2_M1_100
-
- dgemm_kernel_L2_M1_42:
-
- KERNEL1x2_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L2_M1_42
-
- dgemm_kernel_L2_M1_100:
-
- SAVE1x2
-
- dgemm_kernel_L2_END:
- add origPB, origPB, origK, lsl #4 // B = B + K * 2 * 8
-
- /******************************************************************************/
-
- dgemm_kernel_L1_BEGIN:
-
- mov counterJ , origN
- tst counterJ , #1
- ble dgemm_kernel_L999 // done
-
-
- mov pCRow0, pC // pCRow0 = C
- add pC , pC , LDC // Update pC to point to next
-
- mov pA, origPA // pA = A
-
- dgemm_kernel_L1_M4_BEGIN:
-
- mov counterI, origM
- asr counterI, counterI, #2 // counterI = counterI / 4
- cmp counterI, #0
- ble dgemm_kernel_L1_M2_BEGIN
-
- dgemm_kernel_L1_M4_20:
-
- INIT4x1
-
- mov pB, origPB
- asr counterL , origK, #3 // counterL = counterL / 8
- cmp counterL , #0
- ble dgemm_kernel_L1_M4_40
- .align 5
-
- dgemm_kernel_L1_M4_22:
- KERNEL4x1_SUB
- KERNEL4x1_SUB
- KERNEL4x1_SUB
- KERNEL4x1_SUB
-
- KERNEL4x1_SUB
- KERNEL4x1_SUB
- KERNEL4x1_SUB
- KERNEL4x1_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L1_M4_22
-
-
- dgemm_kernel_L1_M4_40:
-
- ands counterL , origK, #7 // counterL = counterL % 8
- ble dgemm_kernel_L1_M4_100
-
- dgemm_kernel_L1_M4_42:
-
- KERNEL4x1_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L1_M4_42
-
- dgemm_kernel_L1_M4_100:
-
- SAVE4x1
-
- dgemm_kernel_L1_M4_END:
-
- subs counterI, counterI, #1
- bgt dgemm_kernel_L1_M4_20
-
-
- dgemm_kernel_L1_M2_BEGIN:
-
- mov counterI, origM
- tst counterI , #3
- ble dgemm_kernel_L1_END
-
- tst counterI, #2 // counterI = counterI / 2
- ble dgemm_kernel_L1_M1_BEGIN
-
- dgemm_kernel_L1_M2_20:
-
- INIT2x1
-
- mov pB, origPB
-
- asr counterL , origK, #3 // counterL = counterL / 8
- cmp counterL , #0
- ble dgemm_kernel_L1_M2_40
-
- dgemm_kernel_L1_M2_22:
-
- KERNEL2x1_SUB
- KERNEL2x1_SUB
- KERNEL2x1_SUB
- KERNEL2x1_SUB
-
- KERNEL2x1_SUB
- KERNEL2x1_SUB
- KERNEL2x1_SUB
- KERNEL2x1_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L1_M2_22
-
-
- dgemm_kernel_L1_M2_40:
-
- ands counterL , origK, #7 // counterL = counterL % 8
- ble dgemm_kernel_L1_M2_100
-
- dgemm_kernel_L1_M2_42:
-
- KERNEL2x1_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L1_M2_42
-
- dgemm_kernel_L1_M2_100:
-
- SAVE2x1
-
- dgemm_kernel_L1_M2_END:
-
-
- dgemm_kernel_L1_M1_BEGIN:
-
- tst counterI, #1 // counterI = counterI % 2
- ble dgemm_kernel_L1_END
-
- dgemm_kernel_L1_M1_20:
-
- INIT1x1
-
- mov pB, origPB
-
- asr counterL , origK, #3 // counterL = counterL / 8
- cmp counterL , #0
- ble dgemm_kernel_L1_M1_40
-
- dgemm_kernel_L1_M1_22:
- KERNEL1x1_SUB
- KERNEL1x1_SUB
- KERNEL1x1_SUB
- KERNEL1x1_SUB
-
- KERNEL1x1_SUB
- KERNEL1x1_SUB
- KERNEL1x1_SUB
- KERNEL1x1_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L1_M1_22
-
-
- dgemm_kernel_L1_M1_40:
-
- ands counterL , origK, #7 // counterL = counterL % 8
- ble dgemm_kernel_L1_M1_100
-
- dgemm_kernel_L1_M1_42:
-
- KERNEL1x1_SUB
-
- subs counterL, counterL, #1
- bgt dgemm_kernel_L1_M1_42
-
- dgemm_kernel_L1_M1_100:
-
- SAVE1x1
-
-
- dgemm_kernel_L1_END:
-
-
- dgemm_kernel_L999:
- mov x0, #0 // set return value
- ldp d8, d9, [sp, #(0 * 16)]
- ldp d10, d11, [sp, #(1 * 16)]
- ldp d12, d13, [sp, #(2 * 16)]
- ldp d14, d15, [sp, #(3 * 16)]
- ldp d16, d17, [sp, #(4 * 16)]
- ldp x18, x19, [sp, #(5 * 16)]
- ldp x20, x21, [sp, #(6 * 16)]
- ldp x22, x23, [sp, #(7 * 16)]
- ldp x24, x25, [sp, #(8 * 16)]
- ldp x26, x27, [sp, #(9 * 16)]
- ldr x28, [sp, #(10 * 16)]
- add sp, sp, #(11*16)
- ret
-
- EPILOGUE
-
|