Use .p2align instead of .align for portability on Haswell and Sandybridgetags/v0.3.0
| @@ -50,11 +50,11 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "vmulps (%5), %%ymm0 , %%ymm0 \n\t" | "vmulps (%5), %%ymm0 , %%ymm0 \n\t" | ||||
| #endif | #endif | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%2,%0,4), %%ymm5 \n\t" // 4 complex values from x | "vmovups (%2,%0,4), %%ymm5 \n\t" // 4 complex values from x | ||||
| ".align 2 \n\t" | |||||
| ".p2align 1 \n\t" | |||||
| "vmovups 32(%2,%0,4), %%ymm7 \n\t" // 4 complex values from x | "vmovups 32(%2,%0,4), %%ymm7 \n\t" // 4 complex values from x | ||||
| "vmovups 64(%2,%0,4), %%ymm9 \n\t" // 4 complex values from x | "vmovups 64(%2,%0,4), %%ymm9 \n\t" // 4 complex values from x | ||||
| "vmovups 96(%2,%0,4), %%ymm11 \n\t" // 4 complex values from x | "vmovups 96(%2,%0,4), %%ymm11 \n\t" // 4 complex values from x | ||||
| @@ -70,7 +70,7 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "vpermilps $0xb1 , %%ymm11, %%ymm10 \n\t" // exchange real and imag part | "vpermilps $0xb1 , %%ymm11, %%ymm10 \n\t" // exchange real and imag part | ||||
| "vfmadd213ps (%3,%0,4), %%ymm0 , %%ymm5 \n\t" | "vfmadd213ps (%3,%0,4), %%ymm0 , %%ymm5 \n\t" | ||||
| ".align 2 \n\t" | |||||
| ".p2align 1 \n\t" | |||||
| "vfmadd213ps 32(%3,%0,4), %%ymm0 , %%ymm7 \n\t" | "vfmadd213ps 32(%3,%0,4), %%ymm0 , %%ymm7 \n\t" | ||||
| "vfmadd213ps 64(%3,%0,4), %%ymm0 , %%ymm9 \n\t" | "vfmadd213ps 64(%3,%0,4), %%ymm0 , %%ymm9 \n\t" | ||||
| "vfmadd213ps 96(%3,%0,4), %%ymm0 , %%ymm11 \n\t" | "vfmadd213ps 96(%3,%0,4), %%ymm0 , %%ymm11 \n\t" | ||||
| @@ -96,7 +96,7 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "vfmadd231ps %%ymm1 , %%ymm10, %%ymm15 \n\t" | "vfmadd231ps %%ymm1 , %%ymm10, %%ymm15 \n\t" | ||||
| "vmovups %%ymm5 , (%3,%0,4) \n\t" | "vmovups %%ymm5 , (%3,%0,4) \n\t" | ||||
| ".align 2 \n\t" | |||||
| ".p2align 1 \n\t" | |||||
| "vmovups %%ymm7 , 32(%3,%0,4) \n\t" | "vmovups %%ymm7 , 32(%3,%0,4) \n\t" | ||||
| "vmovups %%ymm9 , 64(%3,%0,4) \n\t" | "vmovups %%ymm9 , 64(%3,%0,4) \n\t" | ||||
| "vmovups %%ymm11, 96(%3,%0,4) \n\t" | "vmovups %%ymm11, 96(%3,%0,4) \n\t" | ||||
| @@ -50,11 +50,11 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "vmulps (%5), %%ymm0 , %%ymm0 \n\t" | "vmulps (%5), %%ymm0 , %%ymm0 \n\t" | ||||
| #endif | #endif | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%2,%0,4), %%ymm5 \n\t" // 4 complex values from x | "vmovups (%2,%0,4), %%ymm5 \n\t" // 4 complex values from x | ||||
| ".align 2 \n\t" | |||||
| ".p2align 1 \n\t" | |||||
| "vmovups 32(%2,%0,4), %%ymm7 \n\t" // 4 complex values from x | "vmovups 32(%2,%0,4), %%ymm7 \n\t" // 4 complex values from x | ||||
| "vmovups 64(%2,%0,4), %%ymm9 \n\t" // 4 complex values from x | "vmovups 64(%2,%0,4), %%ymm9 \n\t" // 4 complex values from x | ||||
| "vmovups 96(%2,%0,4), %%ymm11 \n\t" // 4 complex values from x | "vmovups 96(%2,%0,4), %%ymm11 \n\t" // 4 complex values from x | ||||
| @@ -85,7 +85,7 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "vaddps %%ymm10, %%ymm11, %%ymm11 \n\t" | "vaddps %%ymm10, %%ymm11, %%ymm11 \n\t" | ||||
| "vmovups %%ymm5 , (%3,%0,4) \n\t" | "vmovups %%ymm5 , (%3,%0,4) \n\t" | ||||
| ".align 2 \n\t" | |||||
| ".p2align 1 \n\t" | |||||
| "vmovups %%ymm7 , 32(%3,%0,4) \n\t" | "vmovups %%ymm7 , 32(%3,%0,4) \n\t" | ||||
| "vmovups %%ymm9 , 64(%3,%0,4) \n\t" | "vmovups %%ymm9 , 64(%3,%0,4) \n\t" | ||||
| "vmovups %%ymm11, 96(%3,%0,4) \n\t" | "vmovups %%ymm11, 96(%3,%0,4) \n\t" | ||||
| @@ -46,7 +46,7 @@ static void cdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) | |||||
| "vxorps %%ymm6, %%ymm6, %%ymm6 \n\t" | "vxorps %%ymm6, %%ymm6, %%ymm6 \n\t" | ||||
| "vxorps %%ymm7, %%ymm7, %%ymm7 \n\t" | "vxorps %%ymm7, %%ymm7, %%ymm7 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%2,%0,4), %%ymm8 \n\t" // 2 * x | "vmovups (%2,%0,4), %%ymm8 \n\t" // 2 * x | ||||
| "vmovups 32(%2,%0,4), %%ymm9 \n\t" // 2 * x | "vmovups 32(%2,%0,4), %%ymm9 \n\t" // 2 * x | ||||
| @@ -46,7 +46,7 @@ static void cdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) | |||||
| "vxorps %%ymm6, %%ymm6, %%ymm6 \n\t" | "vxorps %%ymm6, %%ymm6, %%ymm6 \n\t" | ||||
| "vxorps %%ymm7, %%ymm7, %%ymm7 \n\t" | "vxorps %%ymm7, %%ymm7, %%ymm7 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%2,%0,4), %%ymm8 \n\t" // 2 * x | "vmovups (%2,%0,4), %%ymm8 \n\t" // 2 * x | ||||
| "vmovups 32(%2,%0,4), %%ymm9 \n\t" // 2 * x | "vmovups 32(%2,%0,4), %%ymm9 \n\t" // 2 * x | ||||
| @@ -54,7 +54,7 @@ static void cscal_kernel_16( BLASLONG n, FLOAT *alpha, FLOAT *x) | |||||
| "subq $16, %0 \n\t" | "subq $16, %0 \n\t" | ||||
| "jz 2f \n\t" | "jz 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| //"prefetcht0 128(%1) \n\t" | //"prefetcht0 128(%1) \n\t" | ||||
| @@ -156,7 +156,7 @@ static void cscal_kernel_16_zero_r( BLASLONG n, FLOAT *alpha, FLOAT *x) | |||||
| "subq $16, %0 \n\t" | "subq $16, %0 \n\t" | ||||
| "jz 2f \n\t" | "jz 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| //"prefetcht0 128(%1) \n\t" | //"prefetcht0 128(%1) \n\t" | ||||
| @@ -245,7 +245,7 @@ static void cscal_kernel_16_zero_i( BLASLONG n, FLOAT *alpha, FLOAT *x) | |||||
| "subq $16, %0 \n\t" | "subq $16, %0 \n\t" | ||||
| "jz 2f \n\t" | "jz 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| //"prefetcht0 128(%1) \n\t" | //"prefetcht0 128(%1) \n\t" | ||||
| @@ -312,7 +312,7 @@ static void cscal_kernel_16_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) | |||||
| "addq $128, %1 \n\t" | "addq $128, %1 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| //"prefetcht0 128(%1) \n\t" | //"prefetcht0 128(%1) \n\t" | ||||
| @@ -38,7 +38,7 @@ static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| ( | ( | ||||
| "vbroadcastsd (%4), %%ymm0 \n\t" // alpha | "vbroadcastsd (%4), %%ymm0 \n\t" // alpha | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%3,%0,8), %%ymm12 \n\t" // 4 * y | "vmovups (%3,%0,8), %%ymm12 \n\t" // 4 * y | ||||
| @@ -50,7 +50,7 @@ static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "subq $16, %1 \n\t" | "subq $16, %1 \n\t" | ||||
| "jz 2f \n\t" | "jz 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmulpd %%ymm4, %%ymm0, %%ymm4 \n\t" | "vmulpd %%ymm4, %%ymm0, %%ymm4 \n\t" | ||||
| @@ -41,7 +41,7 @@ static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) | |||||
| "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" | "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" | ||||
| "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" | "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%2,%0,8), %%ymm12 \n\t" // 2 * x | "vmovups (%2,%0,8), %%ymm12 \n\t" // 2 * x | ||||
| "vmovups 32(%2,%0,8), %%ymm13 \n\t" // 2 * x | "vmovups 32(%2,%0,8), %%ymm13 \n\t" // 2 * x | ||||
| @@ -41,7 +41,7 @@ static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) | |||||
| "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" | "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" | ||||
| "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" | "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%2,%0,8), %%ymm12 \n\t" // 2 * x | "vmovups (%2,%0,8), %%ymm12 \n\t" // 2 * x | ||||
| "vmovups 32(%2,%0,8), %%ymm13 \n\t" // 2 * x | "vmovups 32(%2,%0,8), %%ymm13 \n\t" // 2 * x | ||||
| @@ -53,7 +53,7 @@ static void dger_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "subq $8, %1 \n\t" | "subq $8, %1 \n\t" | ||||
| "jz 2f \n\t" | "jz 2f \n\t" | ||||
| ".align 8 \n\t" | |||||
| ".p2align 3 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmulpd %%xmm4, %%xmm0, %%xmm4 \n\t" | "vmulpd %%xmm4, %%xmm0, %%xmm4 \n\t" | ||||
| @@ -58,7 +58,7 @@ static void dscal_kernel_8( BLASLONG n, FLOAT *alpha, FLOAT *x) | |||||
| "subq $1 , %0 \n\t" | "subq $1 , %0 \n\t" | ||||
| "jz 2f \n\t" | "jz 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| // "prefetcht0 640(%1) \n\t" | // "prefetcht0 640(%1) \n\t" | ||||
| @@ -156,7 +156,7 @@ static void dscal_kernel_8_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) | |||||
| "cmpq $0, %0 \n\t" | "cmpq $0, %0 \n\t" | ||||
| "je 2f \n\t" | "je 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups %%xmm0 ,-128(%1) \n\t" | "vmovups %%xmm0 ,-128(%1) \n\t" | ||||
| @@ -58,7 +58,7 @@ static void dscal_kernel_8( BLASLONG n, FLOAT *alpha, FLOAT *x) | |||||
| "subq $1 , %0 \n\t" | "subq $1 , %0 \n\t" | ||||
| "jz 2f \n\t" | "jz 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "prefetcht0 640(%1) \n\t" | "prefetcht0 640(%1) \n\t" | ||||
| @@ -156,7 +156,7 @@ static void dscal_kernel_8_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) | |||||
| "cmpq $0, %0 \n\t" | "cmpq $0, %0 \n\t" | ||||
| "je 2f \n\t" | "je 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups %%xmm0 ,-128(%1) \n\t" | "vmovups %%xmm0 ,-128(%1) \n\t" | ||||
| @@ -44,7 +44,7 @@ static void dsymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FL | |||||
| "vbroadcastsd 16(%8), %%ymm6 \n\t" // temp1[1] | "vbroadcastsd 16(%8), %%ymm6 \n\t" // temp1[1] | ||||
| "vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1] | "vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1] | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y | "vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y | ||||
| @@ -44,7 +44,7 @@ static void dsymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FL | |||||
| "vbroadcastsd 16(%8), %%ymm6 \n\t" // temp1[1] | "vbroadcastsd 16(%8), %%ymm6 \n\t" // temp1[1] | ||||
| "vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1] | "vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1] | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y | "vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y | ||||
| @@ -46,7 +46,7 @@ static void dsymv_kernel_4x4(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *a2, FLOAT | |||||
| "vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1] | "vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1] | ||||
| "xorq %0,%0 \n\t" | "xorq %0,%0 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y | "vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y | ||||
| @@ -46,7 +46,7 @@ static void dsymv_kernel_4x4(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *a2, FLOAT | |||||
| "vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1] | "vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1] | ||||
| "xorq %0,%0 \n\t" | "xorq %0,%0 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y | "vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y | ||||
| @@ -24,7 +24,7 @@ static void dtrmm_kernel_4x8( BLASLONG n, FLOAT *alpha ,FLOAT *a, FLOAT *b, FLOA | |||||
| " cmp $0, %1 \n\t" | " cmp $0, %1 \n\t" | ||||
| " jz 2f \n\t" | " jz 2f \n\t" | ||||
| " .align 16 \n\t" | |||||
| " .p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| " vmovups (%2,%0,4) , %%ymm0 \n\t" | " vmovups (%2,%0,4) , %%ymm0 \n\t" | ||||
| " vmovups (%3,%0,8) , %%ymm1 \n\t" | " vmovups (%3,%0,8) , %%ymm1 \n\t" | ||||
| @@ -128,7 +128,7 @@ static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLON | |||||
| " cmpq %1, %0 \n\t" | " cmpq %1, %0 \n\t" | ||||
| " je 21f \n\t" | " je 21f \n\t" | ||||
| " .align 16 \n\t" | |||||
| " .p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| " vmovups (%2,%1,4), %%ymm4 \n\t" // read a | " vmovups (%2,%1,4), %%ymm4 \n\t" // read a | ||||
| @@ -38,7 +38,7 @@ static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| ( | ( | ||||
| "vbroadcastss (%4), %%ymm0 \n\t" // alpha | "vbroadcastss (%4), %%ymm0 \n\t" // alpha | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%3,%0,4), %%ymm12 \n\t" // 8 * y | "vmovups (%3,%0,4), %%ymm12 \n\t" // 8 * y | ||||
| @@ -50,7 +50,7 @@ static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "subq $32, %1 \n\t" | "subq $32, %1 \n\t" | ||||
| "jz 2f \n\t" | "jz 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmulps %%ymm4, %%ymm0, %%ymm4 \n\t" | "vmulps %%ymm4, %%ymm0, %%ymm4 \n\t" | ||||
| @@ -41,7 +41,7 @@ static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) | |||||
| "vxorps %%ymm6, %%ymm6, %%ymm6 \n\t" | "vxorps %%ymm6, %%ymm6, %%ymm6 \n\t" | ||||
| "vxorps %%ymm7, %%ymm7, %%ymm7 \n\t" | "vxorps %%ymm7, %%ymm7, %%ymm7 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%2,%0,4), %%ymm12 \n\t" // 2 * x | "vmovups (%2,%0,4), %%ymm12 \n\t" // 2 * x | ||||
| "vmovups 32(%2,%0,4), %%ymm13 \n\t" // 2 * x | "vmovups 32(%2,%0,4), %%ymm13 \n\t" // 2 * x | ||||
| @@ -41,7 +41,7 @@ static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) | |||||
| "vxorps %%ymm6, %%ymm6, %%ymm6 \n\t" | "vxorps %%ymm6, %%ymm6, %%ymm6 \n\t" | ||||
| "vxorps %%ymm7, %%ymm7, %%ymm7 \n\t" | "vxorps %%ymm7, %%ymm7, %%ymm7 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%2,%0,4), %%ymm12 \n\t" // 2 * x | "vmovups (%2,%0,4), %%ymm12 \n\t" // 2 * x | ||||
| "vmovups 32(%2,%0,4), %%ymm13 \n\t" // 2 * x | "vmovups 32(%2,%0,4), %%ymm13 \n\t" // 2 * x | ||||
| @@ -129,7 +129,7 @@ static void sgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO | |||||
| "je 4f \n\t" | "je 4f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vxorps %%ymm4 , %%ymm4 , %%ymm4 \n\t" | "vxorps %%ymm4 , %%ymm4 , %%ymm4 \n\t" | ||||
| "vxorps %%ymm5 , %%ymm5 , %%ymm5 \n\t" | "vxorps %%ymm5 , %%ymm5 , %%ymm5 \n\t" | ||||
| @@ -299,7 +299,7 @@ static void sgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT | |||||
| "je 4f \n\t" | "je 4f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vxorps %%ymm4 , %%ymm4 , %%ymm4 \n\t" | "vxorps %%ymm4 , %%ymm4 , %%ymm4 \n\t" | ||||
| "vxorps %%ymm5 , %%ymm5 , %%ymm5 \n\t" | "vxorps %%ymm5 , %%ymm5 , %%ymm5 \n\t" | ||||
| @@ -85,7 +85,7 @@ static void sgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) | |||||
| "je 4f \n\t" | "je 4f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "prefetcht0 384(%2,%0,4) \n\t" | "prefetcht0 384(%2,%0,4) \n\t" | ||||
| "vmovups (%2,%0,4), %%ymm12 \n\t" // 8 * x | "vmovups (%2,%0,4), %%ymm12 \n\t" // 8 * x | ||||
| @@ -53,7 +53,7 @@ static void sger_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "subq $16, %1 \n\t" | "subq $16, %1 \n\t" | ||||
| "jz 2f \n\t" | "jz 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmulps %%xmm4, %%xmm0, %%xmm4 \n\t" | "vmulps %%xmm4, %%xmm0, %%xmm4 \n\t" | ||||
| @@ -43,7 +43,7 @@ static void ssymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FL | |||||
| "vbroadcastss 8(%8), %%xmm6 \n\t" // temp1[1] | "vbroadcastss 8(%8), %%xmm6 \n\t" // temp1[1] | ||||
| "vbroadcastss 12(%8), %%xmm7 \n\t" // temp1[1] | "vbroadcastss 12(%8), %%xmm7 \n\t" // temp1[1] | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%3,%0,4), %%xmm9 \n\t" // 2 * y | "vmovups (%3,%0,4), %%xmm9 \n\t" // 2 * y | ||||
| @@ -45,7 +45,7 @@ static void ssymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FL | |||||
| "vbroadcastss 8(%8), %%xmm6 \n\t" // temp1[1] | "vbroadcastss 8(%8), %%xmm6 \n\t" // temp1[1] | ||||
| "vbroadcastss 12(%8), %%xmm7 \n\t" // temp1[1] | "vbroadcastss 12(%8), %%xmm7 \n\t" // temp1[1] | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%3,%0,4), %%xmm9 \n\t" // 2 * y | "vmovups (%3,%0,4), %%xmm9 \n\t" // 2 * y | ||||
| @@ -143,7 +143,7 @@ static void ssymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FL | |||||
| "vbroadcastss 8(%8), %%ymm6 \n\t" // temp1[1] | "vbroadcastss 8(%8), %%ymm6 \n\t" // temp1[1] | ||||
| "vbroadcastss 12(%8), %%ymm7 \n\t" // temp1[1] | "vbroadcastss 12(%8), %%ymm7 \n\t" // temp1[1] | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%3,%0,4), %%ymm9 \n\t" // 2 * y | "vmovups (%3,%0,4), %%ymm9 \n\t" // 2 * y | ||||
| @@ -46,7 +46,7 @@ static void ssymv_kernel_4x4(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *a2, FLOAT | |||||
| "vbroadcastss 12(%8), %%ymm7 \n\t" // temp1[1] | "vbroadcastss 12(%8), %%ymm7 \n\t" // temp1[1] | ||||
| "xorq %0,%0 \n\t" | "xorq %0,%0 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%3,%0,4), %%ymm9 \n\t" // 2 * y | "vmovups (%3,%0,4), %%ymm9 \n\t" // 2 * y | ||||
| @@ -46,7 +46,7 @@ static void ssymv_kernel_4x4(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *a2, FLOAT | |||||
| "vbroadcastss 12(%8), %%ymm7 \n\t" // temp1[1] | "vbroadcastss 12(%8), %%ymm7 \n\t" // temp1[1] | ||||
| "xorq %0,%0 \n\t" | "xorq %0,%0 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%3,%0,4), %%ymm9 \n\t" // 2 * y | "vmovups (%3,%0,4), %%ymm9 \n\t" // 2 * y | ||||
| @@ -50,11 +50,11 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "vmulpd (%5), %%ymm0 , %%ymm0 \n\t" | "vmulpd (%5), %%ymm0 , %%ymm0 \n\t" | ||||
| #endif | #endif | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%2,%0,8), %%ymm5 \n\t" // 2 complex values from x | "vmovups (%2,%0,8), %%ymm5 \n\t" // 2 complex values from x | ||||
| ".align 2 \n\t" | |||||
| ".p2align 1 \n\t" | |||||
| "vmovups 32(%2,%0,8), %%ymm7 \n\t" // 2 complex values from x | "vmovups 32(%2,%0,8), %%ymm7 \n\t" // 2 complex values from x | ||||
| "vmovups 64(%2,%0,8), %%ymm9 \n\t" // 2 complex values from x | "vmovups 64(%2,%0,8), %%ymm9 \n\t" // 2 complex values from x | ||||
| "vmovups 96(%2,%0,8), %%ymm11 \n\t" // 2 complex values from x | "vmovups 96(%2,%0,8), %%ymm11 \n\t" // 2 complex values from x | ||||
| @@ -70,7 +70,7 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "vpermilpd $0x5 , %%ymm11, %%ymm10 \n\t" // exchange real and imag part | "vpermilpd $0x5 , %%ymm11, %%ymm10 \n\t" // exchange real and imag part | ||||
| "vfmadd213pd (%3,%0,8), %%ymm0 , %%ymm5 \n\t" | "vfmadd213pd (%3,%0,8), %%ymm0 , %%ymm5 \n\t" | ||||
| ".align 2 \n\t" | |||||
| ".p2align 1 \n\t" | |||||
| "vfmadd213pd 32(%3,%0,8), %%ymm0 , %%ymm7 \n\t" | "vfmadd213pd 32(%3,%0,8), %%ymm0 , %%ymm7 \n\t" | ||||
| "vfmadd213pd 64(%3,%0,8), %%ymm0 , %%ymm9 \n\t" | "vfmadd213pd 64(%3,%0,8), %%ymm0 , %%ymm9 \n\t" | ||||
| "vfmadd213pd 96(%3,%0,8), %%ymm0 , %%ymm11 \n\t" | "vfmadd213pd 96(%3,%0,8), %%ymm0 , %%ymm11 \n\t" | ||||
| @@ -96,7 +96,7 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "vfmadd231pd %%ymm1 , %%ymm10, %%ymm15 \n\t" | "vfmadd231pd %%ymm1 , %%ymm10, %%ymm15 \n\t" | ||||
| "vmovups %%ymm5 , (%3,%0,8) \n\t" | "vmovups %%ymm5 , (%3,%0,8) \n\t" | ||||
| ".align 2 \n\t" | |||||
| ".p2align 1 \n\t" | |||||
| "vmovups %%ymm7 , 32(%3,%0,8) \n\t" | "vmovups %%ymm7 , 32(%3,%0,8) \n\t" | ||||
| "vmovups %%ymm9 , 64(%3,%0,8) \n\t" | "vmovups %%ymm9 , 64(%3,%0,8) \n\t" | ||||
| "vmovups %%ymm11, 96(%3,%0,8) \n\t" | "vmovups %%ymm11, 96(%3,%0,8) \n\t" | ||||
| @@ -54,11 +54,11 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "vmulpd (%5), %%ymm0 , %%ymm0 \n\t" | "vmulpd (%5), %%ymm0 , %%ymm0 \n\t" | ||||
| #endif | #endif | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%2,%0,8), %%ymm5 \n\t" // 4 complex values from x | "vmovups (%2,%0,8), %%ymm5 \n\t" // 4 complex values from x | ||||
| ".align 2 \n\t" | |||||
| ".p2align 1 \n\t" | |||||
| "vmovups 32(%2,%0,8), %%ymm7 \n\t" // 4 complex values from x | "vmovups 32(%2,%0,8), %%ymm7 \n\t" // 4 complex values from x | ||||
| "vmovups 64(%2,%0,8), %%ymm9 \n\t" // 4 complex values from x | "vmovups 64(%2,%0,8), %%ymm9 \n\t" // 4 complex values from x | ||||
| "vmovups 96(%2,%0,8), %%ymm11 \n\t" // 4 complex values from x | "vmovups 96(%2,%0,8), %%ymm11 \n\t" // 4 complex values from x | ||||
| @@ -89,7 +89,7 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "vaddpd %%ymm10, %%ymm11, %%ymm11 \n\t" | "vaddpd %%ymm10, %%ymm11, %%ymm11 \n\t" | ||||
| "vmovups %%ymm5 , (%3,%0,8) \n\t" | "vmovups %%ymm5 , (%3,%0,8) \n\t" | ||||
| ".align 2 \n\t" | |||||
| ".p2align 1 \n\t" | |||||
| "vmovups %%ymm7 , 32(%3,%0,8) \n\t" | "vmovups %%ymm7 , 32(%3,%0,8) \n\t" | ||||
| "vmovups %%ymm9 , 64(%3,%0,8) \n\t" | "vmovups %%ymm9 , 64(%3,%0,8) \n\t" | ||||
| "vmovups %%ymm11, 96(%3,%0,8) \n\t" | "vmovups %%ymm11, 96(%3,%0,8) \n\t" | ||||
| @@ -127,13 +127,13 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "vmulpd (%5), %%ymm0 , %%ymm0 \n\t" | "vmulpd (%5), %%ymm0 , %%ymm0 \n\t" | ||||
| #endif | #endif | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "prefetcht0 512(%2,%0,8) \n\t" | "prefetcht0 512(%2,%0,8) \n\t" | ||||
| "prefetcht0 576(%2,%0,8) \n\t" | "prefetcht0 576(%2,%0,8) \n\t" | ||||
| "vmovups (%2,%0,8), %%ymm5 \n\t" // 4 complex values from x | "vmovups (%2,%0,8), %%ymm5 \n\t" // 4 complex values from x | ||||
| ".align 2 \n\t" | |||||
| ".p2align 1 \n\t" | |||||
| "vmovups 32(%2,%0,8), %%ymm7 \n\t" // 4 complex values from x | "vmovups 32(%2,%0,8), %%ymm7 \n\t" // 4 complex values from x | ||||
| "vmovups 64(%2,%0,8), %%ymm9 \n\t" // 4 complex values from x | "vmovups 64(%2,%0,8), %%ymm9 \n\t" // 4 complex values from x | ||||
| "vmovups 96(%2,%0,8), %%ymm11 \n\t" // 4 complex values from x | "vmovups 96(%2,%0,8), %%ymm11 \n\t" // 4 complex values from x | ||||
| @@ -166,7 +166,7 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) | |||||
| "vaddpd %%ymm10, %%ymm11, %%ymm11 \n\t" | "vaddpd %%ymm10, %%ymm11, %%ymm11 \n\t" | ||||
| "vmovups %%ymm5 , (%3,%0,8) \n\t" | "vmovups %%ymm5 , (%3,%0,8) \n\t" | ||||
| ".align 2 \n\t" | |||||
| ".p2align 1 \n\t" | |||||
| "vmovups %%ymm7 , 32(%3,%0,8) \n\t" | "vmovups %%ymm7 , 32(%3,%0,8) \n\t" | ||||
| "vmovups %%ymm9 , 64(%3,%0,8) \n\t" | "vmovups %%ymm9 , 64(%3,%0,8) \n\t" | ||||
| "vmovups %%ymm11, 96(%3,%0,8) \n\t" | "vmovups %%ymm11, 96(%3,%0,8) \n\t" | ||||
| @@ -50,7 +50,7 @@ static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) | |||||
| "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" | "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" | ||||
| "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" | "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x | "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x | ||||
| "vmovups 32(%2,%0,8), %%ymm9 \n\t" // 2 * x | "vmovups 32(%2,%0,8), %%ymm9 \n\t" // 2 * x | ||||
| @@ -131,7 +131,7 @@ static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) | |||||
| "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" | "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" | ||||
| "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" | "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "prefetcht0 512(%2,%0,8) \n\t" | "prefetcht0 512(%2,%0,8) \n\t" | ||||
| "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x | "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x | ||||
| @@ -49,7 +49,7 @@ if ( n < 1280 ) | |||||
| "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" | "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" | ||||
| "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" | "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x | "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x | ||||
| "vmovups 32(%2,%0,8), %%ymm9 \n\t" // 2 * x | "vmovups 32(%2,%0,8), %%ymm9 \n\t" // 2 * x | ||||
| @@ -137,7 +137,7 @@ if ( n < 1280 ) | |||||
| "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" | "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" | ||||
| "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" | "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| "prefetcht0 512(%2,%0,8) \n\t" | "prefetcht0 512(%2,%0,8) \n\t" | ||||
| "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x | "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x | ||||
| @@ -47,7 +47,7 @@ static void zgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) | |||||
| "vbroadcastsd 56(%2), %%ymm7 \n\t" // imag part x3 | "vbroadcastsd 56(%2), %%ymm7 \n\t" // imag part x3 | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| //"prefetcht0 256(%4,%0,8) \n\t" | //"prefetcht0 256(%4,%0,8) \n\t" | ||||
| @@ -164,7 +164,7 @@ static void zgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) | |||||
| "vbroadcastsd 16(%2), %%ymm2 \n\t" // real part x1 | "vbroadcastsd 16(%2), %%ymm2 \n\t" // real part x1 | ||||
| "vbroadcastsd 24(%2), %%ymm3 \n\t" // imag part x1 | "vbroadcastsd 24(%2), %%ymm3 \n\t" // imag part x1 | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| // "prefetcht0 256(%4,%0,8) \n\t" | // "prefetcht0 256(%4,%0,8) \n\t" | ||||
| @@ -253,7 +253,7 @@ static void zgemv_kernel_4x1( BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y) | |||||
| "vbroadcastsd (%2), %%ymm0 \n\t" // real part x0 | "vbroadcastsd (%2), %%ymm0 \n\t" // real part x0 | ||||
| "vbroadcastsd 8(%2), %%ymm1 \n\t" // imag part x0 | "vbroadcastsd 8(%2), %%ymm1 \n\t" // imag part x0 | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| // "prefetcht0 256(%4,%0,8) \n\t" | // "prefetcht0 256(%4,%0,8) \n\t" | ||||
| @@ -355,7 +355,7 @@ static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest,FLOAT a | |||||
| "vbroadcastsd (%4), %%ymm0 \n\t" // alpha_r | "vbroadcastsd (%4), %%ymm0 \n\t" // alpha_r | ||||
| "vbroadcastsd (%5), %%ymm1 \n\t" // alpha_i | "vbroadcastsd (%5), %%ymm1 \n\t" // alpha_i | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| // "prefetcht0 192(%2,%0,8) \n\t" | // "prefetcht0 192(%2,%0,8) \n\t" | ||||
| "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 complex values from src | "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 complex values from src | ||||
| @@ -54,7 +54,7 @@ static void zscal_kernel_8( BLASLONG n, FLOAT *alpha, FLOAT *x) | |||||
| "subq $8 , %0 \n\t" | "subq $8 , %0 \n\t" | ||||
| "jz 2f \n\t" | "jz 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| //"prefetcht0 128(%1) \n\t" | //"prefetcht0 128(%1) \n\t" | ||||
| @@ -156,7 +156,7 @@ static void zscal_kernel_8_zero_r( BLASLONG n, FLOAT *alpha, FLOAT *x) | |||||
| "subq $8 , %0 \n\t" | "subq $8 , %0 \n\t" | ||||
| "jz 2f \n\t" | "jz 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| //"prefetcht0 128(%1) \n\t" | //"prefetcht0 128(%1) \n\t" | ||||
| @@ -245,7 +245,7 @@ static void zscal_kernel_8_zero_i( BLASLONG n, FLOAT *alpha, FLOAT *x) | |||||
| "subq $8 , %0 \n\t" | "subq $8 , %0 \n\t" | ||||
| "jz 2f \n\t" | "jz 2f \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| //"prefetcht0 128(%1) \n\t" | //"prefetcht0 128(%1) \n\t" | ||||
| @@ -312,7 +312,7 @@ static void zscal_kernel_8_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) | |||||
| "addq $128, %1 \n\t" | "addq $128, %1 \n\t" | ||||
| ".align 16 \n\t" | |||||
| ".p2align 4 \n\t" | |||||
| "1: \n\t" | "1: \n\t" | ||||
| //"prefetcht0 128(%1) \n\t" | //"prefetcht0 128(%1) \n\t" | ||||