Refs JuliaLang/julia#5728. Fix gemv performance bug on Haswell Mac OSX.
On Mac OS X, it should use .align 4 (equal to .align 16 on Linux). I didn't get the performance benefit from .align. Thus, I deleted it.
This commit is contained in:
parent
a04d0555ba
commit
6e7be06e07
|
@ -51,7 +51,7 @@ static void cgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y)
|
||||||
"cmpq $0 , %1 \n\t"
|
"cmpq $0 , %1 \n\t"
|
||||||
"je 2f \n\t"
|
"je 2f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"prefetcht0 320(%4,%0,4) \n\t"
|
"prefetcht0 320(%4,%0,4) \n\t"
|
||||||
"vmovups (%4,%0,4), %%ymm8 \n\t" // 4 complex values form a0
|
"vmovups (%4,%0,4), %%ymm8 \n\t" // 4 complex values form a0
|
||||||
|
@ -202,7 +202,7 @@ static void cgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y)
|
||||||
"cmpq $0 , %1 \n\t"
|
"cmpq $0 , %1 \n\t"
|
||||||
"je 2f \n\t"
|
"je 2f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"prefetcht0 320(%4,%0,4) \n\t"
|
"prefetcht0 320(%4,%0,4) \n\t"
|
||||||
"vmovups (%4,%0,4), %%ymm8 \n\t" // 4 complex values form a0
|
"vmovups (%4,%0,4), %%ymm8 \n\t" // 4 complex values form a0
|
||||||
|
@ -322,7 +322,7 @@ static void cgemv_kernel_4x1( BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y)
|
||||||
"cmpq $0 , %1 \n\t"
|
"cmpq $0 , %1 \n\t"
|
||||||
"je 2f \n\t"
|
"je 2f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"prefetcht0 320(%4,%0,4) \n\t"
|
"prefetcht0 320(%4,%0,4) \n\t"
|
||||||
"vmovups (%4,%0,4), %%ymm8 \n\t" // 4 complex values form a0
|
"vmovups (%4,%0,4), %%ymm8 \n\t" // 4 complex values form a0
|
||||||
|
@ -454,7 +454,7 @@ static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest,FLOAT a
|
||||||
"cmpq $0 , %1 \n\t"
|
"cmpq $0 , %1 \n\t"
|
||||||
"je 2f \n\t"
|
"je 2f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"vmovups (%2,%0,4), %%ymm8 \n\t" // 4 complex values from src
|
"vmovups (%2,%0,4), %%ymm8 \n\t" // 4 complex values from src
|
||||||
"vmovups 32(%2,%0,4), %%ymm9 \n\t"
|
"vmovups 32(%2,%0,4), %%ymm9 \n\t"
|
||||||
|
|
|
@ -76,7 +76,7 @@ static void cgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
|
||||||
"cmpq $0, %1 \n\t"
|
"cmpq $0, %1 \n\t"
|
||||||
"je 3f \n\t"
|
"je 3f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"prefetcht0 192(%4,%0,4) \n\t"
|
"prefetcht0 192(%4,%0,4) \n\t"
|
||||||
"vmovups (%4,%0,4), %%ymm4 \n\t" // 4 complex values from a0
|
"vmovups (%4,%0,4), %%ymm4 \n\t" // 4 complex values from a0
|
||||||
|
@ -292,7 +292,7 @@ static void cgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
|
||||||
"cmpq $0, %1 \n\t"
|
"cmpq $0, %1 \n\t"
|
||||||
"je 3f \n\t"
|
"je 3f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"prefetcht0 192(%4,%0,4) \n\t"
|
"prefetcht0 192(%4,%0,4) \n\t"
|
||||||
"vmovups (%4,%0,4), %%ymm4 \n\t" // 4 complex values from a0
|
"vmovups (%4,%0,4), %%ymm4 \n\t" // 4 complex values from a0
|
||||||
|
@ -446,7 +446,7 @@ static void cgemv_kernel_4x1( BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT *
|
||||||
"cmpq $0, %1 \n\t"
|
"cmpq $0, %1 \n\t"
|
||||||
"je 3f \n\t"
|
"je 3f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"prefetcht0 192(%4,%0,4) \n\t"
|
"prefetcht0 192(%4,%0,4) \n\t"
|
||||||
"vmovups (%4,%0,4), %%ymm4 \n\t" // 4 complex values from a0
|
"vmovups (%4,%0,4), %%ymm4 \n\t" // 4 complex values from a0
|
||||||
|
|
|
@ -82,7 +82,7 @@ static void dgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
|
||||||
"shufpd $0, %%xmm12, %%xmm12 \n\t"
|
"shufpd $0, %%xmm12, %%xmm12 \n\t"
|
||||||
"shufpd $0, %%xmm13, %%xmm13 \n\t"
|
"shufpd $0, %%xmm13, %%xmm13 \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"movups (%3,%0,8), %%xmm4 \n\t" // 2 * y
|
"movups (%3,%0,8), %%xmm4 \n\t" // 2 * y
|
||||||
"movups 16(%3,%0,8), %%xmm5 \n\t" // 2 * y
|
"movups 16(%3,%0,8), %%xmm5 \n\t" // 2 * y
|
||||||
|
@ -129,7 +129,7 @@ static void dgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef HAVE_KERNEL_4x2
|
#ifndef HAVE_KERNEL_4x1
|
||||||
|
|
||||||
static void dgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT *alpha) __attribute__ ((noinline));
|
static void dgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT *alpha) __attribute__ ((noinline));
|
||||||
|
|
||||||
|
@ -144,7 +144,7 @@ static void dgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT *a
|
||||||
"mulsd (%5), %%xmm12 \n\t" // alpha
|
"mulsd (%5), %%xmm12 \n\t" // alpha
|
||||||
"shufpd $0, %%xmm12, %%xmm12 \n\t"
|
"shufpd $0, %%xmm12, %%xmm12 \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"movups (%4,%0,8), %%xmm8 \n\t" // 2 * a
|
"movups (%4,%0,8), %%xmm8 \n\t" // 2 * a
|
||||||
"movups 16(%4,%0,8), %%xmm9 \n\t" // 2 * a
|
"movups 16(%4,%0,8), %%xmm9 \n\t" // 2 * a
|
||||||
|
|
|
@ -52,7 +52,7 @@ static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
|
||||||
"subq $4 , %1 \n\t"
|
"subq $4 , %1 \n\t"
|
||||||
"jz 2f \n\t"
|
"jz 2f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
|
|
||||||
"vmulpd %%ymm0 , %%ymm12, %%ymm4 \n\t"
|
"vmulpd %%ymm0 , %%ymm12, %%ymm4 \n\t"
|
||||||
|
@ -114,3 +114,78 @@ static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#define HAVE_KERNEL_4x2
|
||||||
|
|
||||||
|
static void dgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT *alpha) __attribute__ ((noinline));
|
||||||
|
|
||||||
|
static void dgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT *alpha)
|
||||||
|
{
|
||||||
|
|
||||||
|
BLASLONG register i = 0;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"vbroadcastsd (%2), %%ymm12 \n\t" // x0
|
||||||
|
"vbroadcastsd 8(%2), %%ymm13 \n\t" // x1
|
||||||
|
|
||||||
|
"vmovups (%4,%0,8), %%ymm0 \n\t"
|
||||||
|
"vmovups (%5,%0,8), %%ymm1 \n\t"
|
||||||
|
|
||||||
|
"vbroadcastsd (%6), %%ymm6 \n\t" // alpha
|
||||||
|
|
||||||
|
"addq $4 , %0 \n\t"
|
||||||
|
"subq $4 , %1 \n\t"
|
||||||
|
"jz 2f \n\t"
|
||||||
|
|
||||||
|
"1: \n\t"
|
||||||
|
|
||||||
|
"vmulpd %%ymm0 , %%ymm12, %%ymm4 \n\t"
|
||||||
|
"vmulpd %%ymm1 , %%ymm13, %%ymm5 \n\t"
|
||||||
|
"vmovups (%4,%0,8), %%ymm0 \n\t"
|
||||||
|
"vmovups (%5,%0,8), %%ymm1 \n\t"
|
||||||
|
|
||||||
|
"vmovups -32(%3,%0,8), %%ymm8 \n\t" // 4 * y
|
||||||
|
"vaddpd %%ymm4 , %%ymm5 , %%ymm4 \n\t"
|
||||||
|
"vfmadd231pd %%ymm6 , %%ymm4 , %%ymm8 \n\t"
|
||||||
|
|
||||||
|
"vmovups %%ymm8, -32(%3,%0,8) \n\t" // 4 * y
|
||||||
|
|
||||||
|
"addq $4 , %0 \n\t"
|
||||||
|
"subq $4 , %1 \n\t"
|
||||||
|
"jnz 1b \n\t"
|
||||||
|
|
||||||
|
|
||||||
|
"2: \n\t"
|
||||||
|
|
||||||
|
"vmulpd %%ymm0 , %%ymm12, %%ymm4 \n\t"
|
||||||
|
"vmulpd %%ymm1 , %%ymm13, %%ymm5 \n\t"
|
||||||
|
|
||||||
|
|
||||||
|
"vmovups -32(%3,%0,8), %%ymm8 \n\t" // 4 * y
|
||||||
|
"vaddpd %%ymm4 , %%ymm5 , %%ymm4 \n\t"
|
||||||
|
"vfmadd231pd %%ymm6 , %%ymm4 , %%ymm8 \n\t"
|
||||||
|
|
||||||
|
"vmovups %%ymm8, -32(%3,%0,8) \n\t" // 4 * y
|
||||||
|
|
||||||
|
|
||||||
|
"vzeroupper \n\t"
|
||||||
|
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"r" (i), // 0
|
||||||
|
"r" (n), // 1
|
||||||
|
"r" (x), // 2
|
||||||
|
"r" (y), // 3
|
||||||
|
"r" (ap[0]), // 4
|
||||||
|
"r" (ap[1]), // 5
|
||||||
|
"r" (alpha) // 6
|
||||||
|
: "cc",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm4", "%xmm5",
|
||||||
|
"%xmm6",
|
||||||
|
"%xmm8",
|
||||||
|
"%xmm12", "%xmm13",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
|
@ -95,7 +95,7 @@ static void dgemv_kernel_4x2(BLASLONG n, FLOAT *ap0, FLOAT *ap1, FLOAT *x, FLOAT
|
||||||
"cmpq $0, %1 \n\t"
|
"cmpq $0, %1 \n\t"
|
||||||
"je 3f \n\t"
|
"je 3f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
|
|
||||||
"movups (%5,%0,8) , %%xmm14 \n\t" // x
|
"movups (%5,%0,8) , %%xmm14 \n\t" // x
|
||||||
|
@ -171,7 +171,7 @@ static void dgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y)
|
||||||
"cmpq $0, %1 \n\t"
|
"cmpq $0, %1 \n\t"
|
||||||
"je 3f \n\t"
|
"je 3f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
|
|
||||||
"movups (%3,%0,8) , %%xmm12 \n\t"
|
"movups (%3,%0,8) , %%xmm12 \n\t"
|
||||||
|
@ -245,7 +245,7 @@ static void add_y(BLASLONG n, FLOAT da , FLOAT *src, FLOAT *dest, BLASLONG inc_d
|
||||||
"movsd (%2) , %%xmm10 \n\t"
|
"movsd (%2) , %%xmm10 \n\t"
|
||||||
"shufpd $0 , %%xmm10 , %%xmm10 \n\t"
|
"shufpd $0 , %%xmm10 , %%xmm10 \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
|
|
||||||
"movups (%3,%0,8) , %%xmm12 \n\t"
|
"movups (%3,%0,8) , %%xmm12 \n\t"
|
||||||
|
|
|
@ -59,7 +59,7 @@ static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y)
|
||||||
"cmpq $0, %1 \n\t"
|
"cmpq $0, %1 \n\t"
|
||||||
"je 3f \n\t"
|
"je 3f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
// "prefetcht0 384(%2,%0,8) \n\t"
|
// "prefetcht0 384(%2,%0,8) \n\t"
|
||||||
"vmovups (%2,%0,8), %%ymm12 \n\t" // 4 * x
|
"vmovups (%2,%0,8), %%ymm12 \n\t" // 4 * x
|
||||||
|
|
|
@ -131,7 +131,7 @@ static void sgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
|
||||||
"shufps $0, %%xmm12, %%xmm12 \n\t"
|
"shufps $0, %%xmm12, %%xmm12 \n\t"
|
||||||
"shufps $0, %%xmm13, %%xmm13 \n\t"
|
"shufps $0, %%xmm13, %%xmm13 \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"movups (%3,%0,4), %%xmm4 \n\t" // 4 * y
|
"movups (%3,%0,4), %%xmm4 \n\t" // 4 * y
|
||||||
|
|
||||||
|
@ -189,7 +189,7 @@ static void sgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT *a
|
||||||
"cmpq $0, %1 \n\t"
|
"cmpq $0, %1 \n\t"
|
||||||
"je 2f \n\t"
|
"je 2f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"movups (%3,%0,4), %%xmm4 \n\t" // 4 * y
|
"movups (%3,%0,4), %%xmm4 \n\t" // 4 * y
|
||||||
"movups 16(%3,%0,4), %%xmm5 \n\t" // 4 * y
|
"movups 16(%3,%0,4), %%xmm5 \n\t" // 4 * y
|
||||||
|
@ -264,7 +264,7 @@ static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest)
|
||||||
__asm__ __volatile__
|
__asm__ __volatile__
|
||||||
(
|
(
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
|
|
||||||
"movups (%2,%0,4) , %%xmm12 \n\t"
|
"movups (%2,%0,4) , %%xmm12 \n\t"
|
||||||
|
|
|
@ -112,7 +112,7 @@ static void sgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO
|
||||||
"je 4f \n\t"
|
"je 4f \n\t"
|
||||||
|
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
|
|
||||||
"vxorps %%ymm4 , %%ymm4, %%ymm4 \n\t"
|
"vxorps %%ymm4 , %%ymm4, %%ymm4 \n\t"
|
||||||
|
@ -246,7 +246,7 @@ static void sgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
|
||||||
"je 4f \n\t"
|
"je 4f \n\t"
|
||||||
|
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"vxorps %%ymm4 , %%ymm4, %%ymm4 \n\t"
|
"vxorps %%ymm4 , %%ymm4, %%ymm4 \n\t"
|
||||||
"vxorps %%ymm5 , %%ymm5, %%ymm5 \n\t"
|
"vxorps %%ymm5 , %%ymm5, %%ymm5 \n\t"
|
||||||
|
|
|
@ -105,7 +105,7 @@ static void sgemv_kernel_4x2(BLASLONG n, FLOAT *ap0, FLOAT *ap1, FLOAT *x, FLOAT
|
||||||
"cmpq $0, %1 \n\t"
|
"cmpq $0, %1 \n\t"
|
||||||
"je 3f \n\t"
|
"je 3f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
|
|
||||||
"movups (%5,%0,4) , %%xmm14 \n\t" // x
|
"movups (%5,%0,4) , %%xmm14 \n\t" // x
|
||||||
|
@ -183,7 +183,7 @@ static void sgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y)
|
||||||
"cmpq $0, %1 \n\t"
|
"cmpq $0, %1 \n\t"
|
||||||
"je 3f \n\t"
|
"je 3f \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
|
|
||||||
"movups (%3,%0,4) , %%xmm12 \n\t"
|
"movups (%3,%0,4) , %%xmm12 \n\t"
|
||||||
|
@ -258,7 +258,7 @@ static void add_y(BLASLONG n, FLOAT da , FLOAT *src, FLOAT *dest, BLASLONG inc_d
|
||||||
"movss (%2) , %%xmm10 \n\t"
|
"movss (%2) , %%xmm10 \n\t"
|
||||||
"shufps $0 , %%xmm10 , %%xmm10 \n\t"
|
"shufps $0 , %%xmm10 , %%xmm10 \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
|
|
||||||
"movups (%3,%0,4) , %%xmm12 \n\t"
|
"movups (%3,%0,4) , %%xmm12 \n\t"
|
||||||
|
|
|
@ -75,7 +75,7 @@ static void sgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y)
|
||||||
"je 4f \n\t"
|
"je 4f \n\t"
|
||||||
|
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"prefetcht0 384(%2,%0,4) \n\t"
|
"prefetcht0 384(%2,%0,4) \n\t"
|
||||||
"vmovups (%2,%0,4), %%ymm12 \n\t" // 8 * x
|
"vmovups (%2,%0,4), %%ymm12 \n\t" // 8 * x
|
||||||
|
|
|
@ -47,7 +47,7 @@ static void zgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y)
|
||||||
"vbroadcastsd 56(%2), %%ymm7 \n\t" // imag part x3
|
"vbroadcastsd 56(%2), %%ymm7 \n\t" // imag part x3
|
||||||
|
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"prefetcht0 192(%4,%0,8) \n\t"
|
"prefetcht0 192(%4,%0,8) \n\t"
|
||||||
"vmovups (%4,%0,8), %%ymm8 \n\t" // 2 complex values form a0
|
"vmovups (%4,%0,8), %%ymm8 \n\t" // 2 complex values form a0
|
||||||
|
@ -152,7 +152,7 @@ static void zgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y)
|
||||||
"vbroadcastsd 24(%2), %%ymm3 \n\t" // imag part x1
|
"vbroadcastsd 24(%2), %%ymm3 \n\t" // imag part x1
|
||||||
|
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"prefetcht0 192(%4,%0,8) \n\t"
|
"prefetcht0 192(%4,%0,8) \n\t"
|
||||||
"vmovups (%4,%0,8), %%ymm8 \n\t" // 2 complex values form a0
|
"vmovups (%4,%0,8), %%ymm8 \n\t" // 2 complex values form a0
|
||||||
|
@ -236,7 +236,7 @@ static void zgemv_kernel_4x1( BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y)
|
||||||
"vbroadcastsd (%2), %%ymm0 \n\t" // real part x0
|
"vbroadcastsd (%2), %%ymm0 \n\t" // real part x0
|
||||||
"vbroadcastsd 8(%2), %%ymm1 \n\t" // imag part x0
|
"vbroadcastsd 8(%2), %%ymm1 \n\t" // imag part x0
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"prefetcht0 192(%4,%0,8) \n\t"
|
"prefetcht0 192(%4,%0,8) \n\t"
|
||||||
"vmovups (%4,%0,8), %%ymm8 \n\t" // 2 complex values form a0
|
"vmovups (%4,%0,8), %%ymm8 \n\t" // 2 complex values form a0
|
||||||
|
@ -338,7 +338,7 @@ static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest,FLOAT a
|
||||||
"vbroadcastsd (%4), %%ymm0 \n\t" // alpha_r
|
"vbroadcastsd (%4), %%ymm0 \n\t" // alpha_r
|
||||||
"vbroadcastsd (%5), %%ymm1 \n\t" // alpha_i
|
"vbroadcastsd (%5), %%ymm1 \n\t" // alpha_i
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"prefetcht0 192(%2,%0,8) \n\t"
|
"prefetcht0 192(%2,%0,8) \n\t"
|
||||||
"vmovups (%2,%0,8), %%ymm8 \n\t" // 2 complex values from src
|
"vmovups (%2,%0,8), %%ymm8 \n\t" // 2 complex values from src
|
||||||
|
|
|
@ -46,7 +46,7 @@ static void zgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
|
||||||
"vxorpd %%ymm14, %%ymm14, %%ymm14 \n\t"
|
"vxorpd %%ymm14, %%ymm14, %%ymm14 \n\t"
|
||||||
"vxorpd %%ymm15, %%ymm15, %%ymm15 \n\t"
|
"vxorpd %%ymm15, %%ymm15, %%ymm15 \n\t"
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
|
|
||||||
"prefetcht0 192(%2,%0,8) \n\t"
|
"prefetcht0 192(%2,%0,8) \n\t"
|
||||||
|
@ -219,7 +219,7 @@ static void zgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
|
||||||
"vxorpd %%ymm10, %%ymm10, %%ymm10 \n\t" // temp
|
"vxorpd %%ymm10, %%ymm10, %%ymm10 \n\t" // temp
|
||||||
"vxorpd %%ymm11, %%ymm11, %%ymm11 \n\t" // temp
|
"vxorpd %%ymm11, %%ymm11, %%ymm11 \n\t" // temp
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
|
|
||||||
"prefetcht0 192(%2,%0,8) \n\t"
|
"prefetcht0 192(%2,%0,8) \n\t"
|
||||||
|
@ -341,7 +341,7 @@ static void zgemv_kernel_4x1( BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT *
|
||||||
"vxorpd %%ymm8 , %%ymm8 , %%ymm8 \n\t" // temp
|
"vxorpd %%ymm8 , %%ymm8 , %%ymm8 \n\t" // temp
|
||||||
"vxorpd %%ymm9 , %%ymm9 , %%ymm9 \n\t" // temp
|
"vxorpd %%ymm9 , %%ymm9 , %%ymm9 \n\t" // temp
|
||||||
|
|
||||||
".align 16 \n\t"
|
// ".align 16 \n\t"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
|
|
||||||
"prefetcht0 192(%2,%0,8) \n\t"
|
"prefetcht0 192(%2,%0,8) \n\t"
|
||||||
|
|
Loading…
Reference in New Issue