Browse Source

optimized zgemv_n kernel for sandybridge

tags/v0.2.11^2
wernsaar 11 years ago
parent
commit
c1a6374c6f
3 changed files with 14 additions and 5 deletions
  1. +3
    -0
      kernel/x86_64/KERNEL.SANDYBRIDGE
  2. +3
    -0
      kernel/x86_64/zgemv_n.c
  3. +8
    -5
      kernel/x86_64/zgemv_n_microk_sandy-2.c

+ 3
- 0
kernel/x86_64/KERNEL.SANDYBRIDGE View File

@@ -1,6 +1,9 @@
SGEMVNKERNEL = sgemv_n.c
SGEMVTKERNEL = sgemv_t.c

ZGEMVNKERNEL = zgemv_n.c


SGEMMKERNEL = sgemm_kernel_16x4_sandy.S
SGEMMINCOPY = ../generic/gemm_ncopy_16.c
SGEMMITCOPY = ../generic/gemm_tcopy_16.c


+ 3
- 0
kernel/x86_64/zgemv_n.c View File

@@ -31,9 +31,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#if defined(HASWELL)
#include "zgemv_n_microk_haswell-2.c"
#elif defined(SANDYBRIDGE)
#include "zgemv_n_microk_sandy-2.c"
#endif



#define NBMAX 1024

#ifndef HAVE_KERNEL_16x4


+ 8
- 5
kernel/x86_64/zgemv_n_microk_sandy-2.c View File

@@ -50,39 +50,42 @@ static void zgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y)
".align 16 \n\t"
".L01LOOP%=: \n\t"

"prefetcht0 256(%4,%0,8) \n\t"
"vmovups (%4,%0,8), %%ymm8 \n\t" // 2 complex values form a0
"vmovups 32(%4,%0,8), %%ymm9 \n\t" // 2 complex values form a0

"vmulpd %%ymm8 , %%ymm0 , %%ymm12 \n\t"
"vmulpd %%ymm8 , %%ymm1 , %%ymm13 \n\t"
"prefetcht0 256(%5,%0,8) \n\t"
"vmulpd %%ymm9 , %%ymm0 , %%ymm14 \n\t"
"vmulpd %%ymm9 , %%ymm1 , %%ymm15 \n\t"

"vmovups (%5,%0,8), %%ymm8 \n\t" // 2 complex values form a0
"vmulpd %%ymm9 , %%ymm1 , %%ymm15 \n\t"
"vmovups 32(%5,%0,8), %%ymm9 \n\t" // 2 complex values form a0

"vmulpd %%ymm8 , %%ymm2 , %%ymm10 \n\t"
"vaddpd %%ymm12, %%ymm10, %%ymm12 \n\t"
"vmulpd %%ymm8 , %%ymm3 , %%ymm11 \n\t"
"vaddpd %%ymm13, %%ymm11, %%ymm13 \n\t"
"prefetcht0 256(%6,%0,8) \n\t"
"vmulpd %%ymm9 , %%ymm2 , %%ymm10 \n\t"
"vaddpd %%ymm14, %%ymm10, %%ymm14 \n\t"
"vmovups (%6,%0,8), %%ymm8 \n\t" // 2 complex values form a0
"vmulpd %%ymm9 , %%ymm3 , %%ymm11 \n\t"
"vaddpd %%ymm15, %%ymm11, %%ymm15 \n\t"

"vmovups (%6,%0,8), %%ymm8 \n\t" // 2 complex values form a0
"vmovups 32(%6,%0,8), %%ymm9 \n\t" // 2 complex values form a0

"vmulpd %%ymm8 , %%ymm4 , %%ymm10 \n\t"
"vaddpd %%ymm12, %%ymm10, %%ymm12 \n\t"
"vmulpd %%ymm8 , %%ymm5 , %%ymm11 \n\t"
"vaddpd %%ymm13, %%ymm11, %%ymm13 \n\t"
"prefetcht0 256(%7,%0,8) \n\t"
"vmulpd %%ymm9 , %%ymm4 , %%ymm10 \n\t"
"vaddpd %%ymm14, %%ymm10, %%ymm14 \n\t"
"vmovups (%7,%0,8), %%ymm8 \n\t" // 2 complex values form a0
"vmulpd %%ymm9 , %%ymm5 , %%ymm11 \n\t"
"vaddpd %%ymm15, %%ymm11, %%ymm15 \n\t"

"vmovups (%7,%0,8), %%ymm8 \n\t" // 2 complex values form a0
"vmovups 32(%7,%0,8), %%ymm9 \n\t" // 2 complex values form a0

"vmulpd %%ymm8 , %%ymm6 , %%ymm10 \n\t"
@@ -94,7 +97,7 @@ static void zgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y)
"vmulpd %%ymm9 , %%ymm7 , %%ymm11 \n\t"
"vaddpd %%ymm15, %%ymm11, %%ymm15 \n\t"

"prefetcht0 192(%3,%0,8) \n\t"
"prefetcht0 256(%3,%0,8) \n\t"
"vmovups (%3,%0,8), %%ymm10 \n\t"
"vmovups 32(%3,%0,8), %%ymm11 \n\t"



Loading…
Cancel
Save