Merge pull request #561 from wernsaar/develop

updated dgemv_n sgemv_n kernels
This commit is contained in:
wernsaar 2015-05-04 11:11:13 +02:00
commit 2b83a69650
4 changed files with 126 additions and 411 deletions

View File

@ -37,48 +37,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define NBMAX 2048
#ifndef HAVE_KERNEL_4x8
static void dgemv_kernel_4x8(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, BLASLONG lda4, FLOAT *alpha)
{
BLASLONG i;
FLOAT *a0,*a1,*a2,*a3;
FLOAT *b0,*b1,*b2,*b3;
FLOAT *x4;
FLOAT x[8];
a0 = ap[0];
a1 = ap[1];
a2 = ap[2];
a3 = ap[3];
b0 = a0 + lda4 ;
b1 = a1 + lda4 ;
b2 = a2 + lda4 ;
b3 = a3 + lda4 ;
x4 = x + 4;
for ( i=0; i<8; i++)
x[i] = xo[i] * *alpha;
for ( i=0; i< n; i+=4 )
{
y[i] += a0[i]*x[0] + a1[i]*x[1] + a2[i]*x[2] + a3[i]*x[3];
y[i+1] += a0[i+1]*x[0] + a1[i+1]*x[1] + a2[i+1]*x[2] + a3[i+1]*x[3];
y[i+2] += a0[i+2]*x[0] + a1[i+2]*x[1] + a2[i+2]*x[2] + a3[i+2]*x[3];
y[i+3] += a0[i+3]*x[0] + a1[i+3]*x[1] + a2[i+3]*x[2] + a3[i+3]*x[3];
y[i] += b0[i]*x4[0] + b1[i]*x4[1] + b2[i]*x4[2] + b3[i]*x4[3];
y[i+1] += b0[i+1]*x4[0] + b1[i+1]*x4[1] + b2[i+1]*x4[2] + b3[i+1]*x4[3];
y[i+2] += b0[i+2]*x4[0] + b1[i+2]*x4[1] + b2[i+2]*x4[2] + b3[i+2]*x4[3];
y[i+3] += b0[i+3]*x4[0] + b1[i+3]*x4[1] + b2[i+3]*x4[2] + b3[i+3]*x4[3];
}
}
#endif
#ifndef HAVE_KERNEL_4x4
static void dgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT *alpha)
@ -257,7 +215,6 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
BLASLONG m3;
BLASLONG n2;
BLASLONG lda4 = lda << 2;
BLASLONG lda8 = lda << 3;
FLOAT xbuffer[8],*ybuffer;
if ( m < 1 ) return(0);
@ -265,23 +222,13 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
ybuffer = buffer;
if ( inc_x == 1 )
{
n1 = n >> 3 ;
n2 = n & 7 ;
}
else
{
n1 = n >> 2 ;
n2 = n & 3 ;
n1 = n >> 2 ;
n2 = n & 3 ;
}
m3 = m & 3 ;
m1 = m & -4 ;
m2 = (m & (NBMAX-1)) - m3 ;
y_ptr = y;
BLASLONG NB = NBMAX;
@ -314,22 +261,12 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
for( i = 0; i < n1 ; i++)
{
dgemv_kernel_4x8(NB,ap,x_ptr,ybuffer,lda4,&alpha);
ap[0] += lda8;
ap[1] += lda8;
ap[2] += lda8;
ap[3] += lda8;
a_ptr += lda8;
x_ptr += 8;
}
if ( n2 & 4 )
{
dgemv_kernel_4x4(NB,ap,x_ptr,ybuffer,&alpha);
ap[0] += lda4;
ap[1] += lda4;
ap[2] += lda4;
ap[3] += lda4;
a_ptr += lda4;
x_ptr += 4;
}

View File

@ -27,128 +27,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define HAVE_KERNEL_4x8 1
static void dgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLONG lda4, FLOAT *alpha) __attribute__ ((noinline));
static void dgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLONG lda4, FLOAT *alpha)
{
BLASLONG register i = 0;
__asm__ __volatile__
(
"vzeroupper \n\t"
"vbroadcastsd (%2), %%ymm12 \n\t" // x0
"vbroadcastsd 8(%2), %%ymm13 \n\t" // x1
"vbroadcastsd 16(%2), %%ymm14 \n\t" // x2
"vbroadcastsd 24(%2), %%ymm15 \n\t" // x3
"vbroadcastsd 32(%2), %%ymm0 \n\t" // x4
"vbroadcastsd 40(%2), %%ymm1 \n\t" // x5
"vbroadcastsd 48(%2), %%ymm2 \n\t" // x6
"vbroadcastsd 56(%2), %%ymm3 \n\t" // x7
"vbroadcastsd (%9), %%ymm6 \n\t" // alpha
"testq $0x04, %1 \n\t"
"jz 2f \n\t"
"vmovupd (%3,%0,8), %%ymm7 \n\t" // 4 * y
"vxorpd %%ymm4 , %%ymm4, %%ymm4 \n\t"
"vxorpd %%ymm5 , %%ymm5, %%ymm5 \n\t"
"vfmadd231pd (%4,%0,8), %%ymm12, %%ymm4 \n\t"
"vfmadd231pd (%5,%0,8), %%ymm13, %%ymm5 \n\t"
"vfmadd231pd (%6,%0,8), %%ymm14, %%ymm4 \n\t"
"vfmadd231pd (%7,%0,8), %%ymm15, %%ymm5 \n\t"
"vfmadd231pd (%4,%8,8), %%ymm0 , %%ymm4 \n\t"
"vfmadd231pd (%5,%8,8), %%ymm1 , %%ymm5 \n\t"
"vfmadd231pd (%6,%8,8), %%ymm2 , %%ymm4 \n\t"
"vfmadd231pd (%7,%8,8), %%ymm3 , %%ymm5 \n\t"
"vaddpd %%ymm4 , %%ymm5 , %%ymm5 \n\t"
"vmulpd %%ymm6 , %%ymm5 , %%ymm5 \n\t"
"vaddpd %%ymm7 , %%ymm5 , %%ymm5 \n\t"
"vmovupd %%ymm5, (%3,%0,8) \n\t" // 4 * y
"addq $4 , %8 \n\t"
"addq $4 , %0 \n\t"
"subq $4 , %1 \n\t"
"2: \n\t"
"cmpq $0, %1 \n\t"
"je 3f \n\t"
".align 16 \n\t"
"1: \n\t"
"vxorpd %%ymm4 , %%ymm4, %%ymm4 \n\t"
"vxorpd %%ymm5 , %%ymm5, %%ymm5 \n\t"
"vmovupd (%3,%0,8), %%ymm8 \n\t" // 4 * y
"vmovupd 32(%3,%0,8), %%ymm9 \n\t" // 4 * y
"vfmadd231pd (%4,%0,8), %%ymm12, %%ymm4 \n\t"
"vfmadd231pd 32(%4,%0,8), %%ymm12, %%ymm5 \n\t"
"vfmadd231pd (%5,%0,8), %%ymm13, %%ymm4 \n\t"
"vfmadd231pd 32(%5,%0,8), %%ymm13, %%ymm5 \n\t"
"vfmadd231pd (%6,%0,8), %%ymm14, %%ymm4 \n\t"
"vfmadd231pd 32(%6,%0,8), %%ymm14, %%ymm5 \n\t"
"vfmadd231pd (%7,%0,8), %%ymm15, %%ymm4 \n\t"
"vfmadd231pd 32(%7,%0,8), %%ymm15, %%ymm5 \n\t"
"vfmadd231pd (%4,%8,8), %%ymm0 , %%ymm4 \n\t"
"addq $8 , %0 \n\t"
"vfmadd231pd 32(%4,%8,8), %%ymm0 , %%ymm5 \n\t"
"vfmadd231pd (%5,%8,8), %%ymm1 , %%ymm4 \n\t"
"vfmadd231pd 32(%5,%8,8), %%ymm1 , %%ymm5 \n\t"
"vfmadd231pd (%6,%8,8), %%ymm2 , %%ymm4 \n\t"
"vfmadd231pd 32(%6,%8,8), %%ymm2 , %%ymm5 \n\t"
"vfmadd231pd (%7,%8,8), %%ymm3 , %%ymm4 \n\t"
"vfmadd231pd 32(%7,%8,8), %%ymm3 , %%ymm5 \n\t"
"vfmadd231pd %%ymm6 , %%ymm4 , %%ymm8 \n\t"
"vfmadd231pd %%ymm6 , %%ymm5 , %%ymm9 \n\t"
"addq $8 , %8 \n\t"
"vmovupd %%ymm8,-64(%3,%0,8) \n\t" // 4 * y
"subq $8 , %1 \n\t"
"vmovupd %%ymm9,-32(%3,%0,8) \n\t" // 4 * y
"jnz 1b \n\t"
"3: \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (ap[0]), // 4
"r" (ap[1]), // 5
"r" (ap[2]), // 6
"r" (ap[3]), // 7
"r" (lda4), // 8
"r" (alpha) // 9
: "cc",
"%xmm0", "%xmm1",
"%xmm2", "%xmm3",
"%xmm4", "%xmm5",
"%xmm6", "%xmm7",
"%xmm8", "%xmm9",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
}
#define HAVE_KERNEL_4x4 1
static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT *alpha) __attribute__ ((noinline));
@ -159,68 +37,59 @@ static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
__asm__ __volatile__
(
"vzeroupper \n\t"
"vbroadcastsd (%2), %%ymm12 \n\t" // x0
"vbroadcastsd 8(%2), %%ymm13 \n\t" // x1
"vbroadcastsd 16(%2), %%ymm14 \n\t" // x2
"vbroadcastsd 24(%2), %%ymm15 \n\t" // x3
"vmovups (%4,%0,8), %%ymm0 \n\t"
"vmovups (%5,%0,8), %%ymm1 \n\t"
"vmovups (%6,%0,8), %%ymm2 \n\t"
"vmovups (%7,%0,8), %%ymm3 \n\t"
"vbroadcastsd (%8), %%ymm6 \n\t" // alpha
"testq $0x04, %1 \n\t"
"jz 2f \n\t"
"vxorpd %%ymm4 , %%ymm4, %%ymm4 \n\t"
"vxorpd %%ymm5 , %%ymm5, %%ymm5 \n\t"
"vmovupd (%3,%0,8), %%ymm7 \n\t" // 4 * y
"vfmadd231pd (%4,%0,8), %%ymm12, %%ymm4 \n\t"
"vfmadd231pd (%5,%0,8), %%ymm13, %%ymm5 \n\t"
"vfmadd231pd (%6,%0,8), %%ymm14, %%ymm4 \n\t"
"vfmadd231pd (%7,%0,8), %%ymm15, %%ymm5 \n\t"
"vaddpd %%ymm4 , %%ymm5 , %%ymm5 \n\t"
"vmulpd %%ymm6 , %%ymm5 , %%ymm5 \n\t"
"vaddpd %%ymm7 , %%ymm5 , %%ymm5 \n\t"
"vmovupd %%ymm5, (%3,%0,8) \n\t" // 4 * y
"addq $4 , %0 \n\t"
"subq $4 , %1 \n\t"
"2: \n\t"
"cmpq $0, %1 \n\t"
"je 3f \n\t"
"addq $4 , %0 \n\t"
"subq $4 , %1 \n\t"
"jz 2f \n\t"
".align 16 \n\t"
"1: \n\t"
"vxorpd %%ymm4 , %%ymm4, %%ymm4 \n\t"
"vxorpd %%ymm5 , %%ymm5, %%ymm5 \n\t"
"vmovupd (%3,%0,8), %%ymm8 \n\t" // 4 * y
"vmovupd 32(%3,%0,8), %%ymm9 \n\t" // 4 * y
"vfmadd231pd (%4,%0,8), %%ymm12, %%ymm4 \n\t"
"vfmadd231pd 32(%4,%0,8), %%ymm12, %%ymm5 \n\t"
"vfmadd231pd (%5,%0,8), %%ymm13, %%ymm4 \n\t"
"vfmadd231pd 32(%5,%0,8), %%ymm13, %%ymm5 \n\t"
"vfmadd231pd (%6,%0,8), %%ymm14, %%ymm4 \n\t"
"vfmadd231pd 32(%6,%0,8), %%ymm14, %%ymm5 \n\t"
"vfmadd231pd (%7,%0,8), %%ymm15, %%ymm4 \n\t"
"vfmadd231pd 32(%7,%0,8), %%ymm15, %%ymm5 \n\t"
"vmulpd %%ymm0 , %%ymm12, %%ymm4 \n\t"
"vmulpd %%ymm1 , %%ymm13, %%ymm5 \n\t"
"vmovups (%4,%0,8), %%ymm0 \n\t"
"vmovups (%5,%0,8), %%ymm1 \n\t"
"vfmadd231pd %%ymm2 , %%ymm14, %%ymm4 \n\t"
"vfmadd231pd %%ymm3 , %%ymm15, %%ymm5 \n\t"
"vmovups (%6,%0,8), %%ymm2 \n\t"
"vmovups (%7,%0,8), %%ymm3 \n\t"
"vmovups -32(%3,%0,8), %%ymm8 \n\t" // 4 * y
"vaddpd %%ymm4 , %%ymm5 , %%ymm4 \n\t"
"vfmadd231pd %%ymm6 , %%ymm4 , %%ymm8 \n\t"
"vfmadd231pd %%ymm6 , %%ymm5 , %%ymm9 \n\t"
"vmovupd %%ymm8, (%3,%0,8) \n\t" // 4 * y
"vmovupd %%ymm9, 32(%3,%0,8) \n\t" // 4 * y
"vmovups %%ymm8, -32(%3,%0,8) \n\t" // 4 * y
"addq $8 , %0 \n\t"
"subq $8 , %1 \n\t"
"addq $4 , %0 \n\t"
"subq $4 , %1 \n\t"
"jnz 1b \n\t"
"2: \n\t"
"vmulpd %%ymm0 , %%ymm12, %%ymm4 \n\t"
"vmulpd %%ymm1 , %%ymm13, %%ymm5 \n\t"
"vfmadd231pd %%ymm2 , %%ymm14, %%ymm4 \n\t"
"vfmadd231pd %%ymm3 , %%ymm15, %%ymm5 \n\t"
"vmovups -32(%3,%0,8), %%ymm8 \n\t" // 4 * y
"vaddpd %%ymm4 , %%ymm5 , %%ymm4 \n\t"
"vfmadd231pd %%ymm6 , %%ymm4 , %%ymm8 \n\t"
"vmovups %%ymm8, -32(%3,%0,8) \n\t" // 4 * y
"3: \n\t"
"vzeroupper \n\t"
:

View File

@ -27,150 +27,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define HAVE_KERNEL_4x8 1
static void dgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLONG lda4, FLOAT *alpha) __attribute__ ((noinline));
static void dgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLONG lda4, FLOAT *alpha)
{
BLASLONG register i = 0;
__asm__ __volatile__
(
"movsd (%2), %%xmm12 \n\t" // x0
"movsd 8(%2), %%xmm13 \n\t" // x1
"movsd 16(%2), %%xmm14 \n\t" // x2
"movsd 24(%2), %%xmm15 \n\t" // x3
"shufpd $0, %%xmm12, %%xmm12\n\t"
"shufpd $0, %%xmm13, %%xmm13\n\t"
"shufpd $0, %%xmm14, %%xmm14\n\t"
"shufpd $0, %%xmm15, %%xmm15\n\t"
"movsd 32(%2), %%xmm0 \n\t" // x4
"movsd 40(%2), %%xmm1 \n\t" // x5
"movsd 48(%2), %%xmm2 \n\t" // x6
"movsd 56(%2), %%xmm3 \n\t" // x7
"shufpd $0, %%xmm0 , %%xmm0 \n\t"
"shufpd $0, %%xmm1 , %%xmm1 \n\t"
"shufpd $0, %%xmm2 , %%xmm2 \n\t"
"shufpd $0, %%xmm3 , %%xmm3 \n\t"
"movsd (%9), %%xmm6 \n\t" // alpha
"shufpd $0, %%xmm6 , %%xmm6 \n\t"
".align 16 \n\t"
"1: \n\t"
"xorpd %%xmm4 , %%xmm4 \n\t"
"xorpd %%xmm5 , %%xmm5 \n\t"
"movups (%3,%0,8), %%xmm7 \n\t" // 2 * y
".align 2 \n\t"
"movups (%4,%0,8), %%xmm8 \n\t"
"movups (%5,%0,8), %%xmm9 \n\t"
"movups (%6,%0,8), %%xmm10 \n\t"
"movups (%7,%0,8), %%xmm11 \n\t"
".align 2 \n\t"
"mulpd %%xmm12, %%xmm8 \n\t"
"mulpd %%xmm13, %%xmm9 \n\t"
"mulpd %%xmm14, %%xmm10 \n\t"
"mulpd %%xmm15, %%xmm11 \n\t"
"addpd %%xmm8 , %%xmm4 \n\t"
"addpd %%xmm9 , %%xmm5 \n\t"
"addpd %%xmm10, %%xmm4 \n\t"
"addpd %%xmm11, %%xmm5 \n\t"
"movups (%4,%8,8), %%xmm8 \n\t"
"movups (%5,%8,8), %%xmm9 \n\t"
"movups (%6,%8,8), %%xmm10 \n\t"
"movups (%7,%8,8), %%xmm11 \n\t"
".align 2 \n\t"
"mulpd %%xmm0 , %%xmm8 \n\t"
"mulpd %%xmm1 , %%xmm9 \n\t"
"mulpd %%xmm2 , %%xmm10 \n\t"
"mulpd %%xmm3 , %%xmm11 \n\t"
"addpd %%xmm8 , %%xmm4 \n\t"
"addpd %%xmm9 , %%xmm5 \n\t"
"addpd %%xmm10, %%xmm4 \n\t"
"addpd %%xmm11, %%xmm5 \n\t"
"addpd %%xmm5 , %%xmm4 \n\t"
"mulpd %%xmm6 , %%xmm4 \n\t"
"addpd %%xmm4 , %%xmm7 \n\t"
"movups %%xmm7 , (%3,%0,8) \n\t" // 2 * y
"xorpd %%xmm4 , %%xmm4 \n\t"
"xorpd %%xmm5 , %%xmm5 \n\t"
"movups 16(%3,%0,8), %%xmm7 \n\t" // 2 * y
".align 2 \n\t"
"movups 16(%4,%0,8), %%xmm8 \n\t"
"movups 16(%5,%0,8), %%xmm9 \n\t"
"movups 16(%6,%0,8), %%xmm10 \n\t"
"movups 16(%7,%0,8), %%xmm11 \n\t"
".align 2 \n\t"
"mulpd %%xmm12, %%xmm8 \n\t"
"mulpd %%xmm13, %%xmm9 \n\t"
"mulpd %%xmm14, %%xmm10 \n\t"
"mulpd %%xmm15, %%xmm11 \n\t"
"addpd %%xmm8 , %%xmm4 \n\t"
"addpd %%xmm9 , %%xmm5 \n\t"
"addpd %%xmm10, %%xmm4 \n\t"
"addpd %%xmm11, %%xmm5 \n\t"
"movups 16(%4,%8,8), %%xmm8 \n\t"
"movups 16(%5,%8,8), %%xmm9 \n\t"
"movups 16(%6,%8,8), %%xmm10 \n\t"
"movups 16(%7,%8,8), %%xmm11 \n\t"
".align 2 \n\t"
"mulpd %%xmm0 , %%xmm8 \n\t"
"mulpd %%xmm1 , %%xmm9 \n\t"
"mulpd %%xmm2 , %%xmm10 \n\t"
"mulpd %%xmm3 , %%xmm11 \n\t"
"addpd %%xmm8 , %%xmm4 \n\t"
"addpd %%xmm9 , %%xmm5 \n\t"
"addpd %%xmm10, %%xmm4 \n\t"
"addpd %%xmm11, %%xmm5 \n\t"
"addq $4 , %8 \n\t"
"addpd %%xmm5 , %%xmm4 \n\t"
"mulpd %%xmm6 , %%xmm4 \n\t"
"addpd %%xmm4 , %%xmm7 \n\t"
"movups %%xmm7 , 16(%3,%0,8) \n\t" // 2 * y
"addq $4 , %0 \n\t"
"subq $4 , %1 \n\t"
"jnz 1b \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (ap[0]), // 4
"r" (ap[1]), // 5
"r" (ap[2]), // 6
"r" (ap[3]), // 7
"r" (lda4), // 8
"r" (alpha) // 9
: "cc",
"%xmm0", "%xmm1",
"%xmm2", "%xmm3",
"%xmm4", "%xmm5",
"%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
}
#define HAVE_KERNEL_4x4 1
static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT *alpha) __attribute__ ((noinline));
@ -193,54 +49,105 @@ static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
"movsd (%8), %%xmm6 \n\t" // alpha
"shufpd $0, %%xmm6 , %%xmm6 \n\t"
"movups (%4,%0,8), %%xmm8 \n\t"
"movups 16(%4,%0,8), %%xmm0 \n\t"
"movups (%5,%0,8), %%xmm9 \n\t"
"movups 16(%5,%0,8), %%xmm1 \n\t"
"movups (%6,%0,8), %%xmm10 \n\t"
"movups 16(%6,%0,8), %%xmm2 \n\t"
"movups (%7,%0,8), %%xmm11 \n\t"
"movups 16(%7,%0,8), %%xmm3 \n\t"
"addq $4 , %0 \n\t"
"subq $4 , %1 \n\t"
"jz 2f \n\t"
".align 16 \n\t"
"1: \n\t"
"xorpd %%xmm4 , %%xmm4 \n\t"
"xorpd %%xmm5 , %%xmm5 \n\t"
"movups (%3,%0,8), %%xmm7 \n\t" // 2 * y
"movups -32(%3,%0,8), %%xmm7 \n\t" // 2 * y
"mulpd %%xmm12, %%xmm8 \n\t"
"mulpd %%xmm12, %%xmm0 \n\t"
"addpd %%xmm8 , %%xmm4 \n\t"
"addpd %%xmm0 , %%xmm5 \n\t"
"movups (%4,%0,8), %%xmm8 \n\t"
"movups 16(%4,%0,8), %%xmm0 \n\t"
"mulpd %%xmm13, %%xmm9 \n\t"
"mulpd %%xmm13, %%xmm1 \n\t"
"addpd %%xmm9 , %%xmm4 \n\t"
"addpd %%xmm1 , %%xmm5 \n\t"
"movups (%5,%0,8), %%xmm9 \n\t"
"movups 16(%5,%0,8), %%xmm1 \n\t"
"mulpd %%xmm14, %%xmm10 \n\t"
"mulpd %%xmm14, %%xmm2 \n\t"
"addpd %%xmm10 , %%xmm4 \n\t"
"addpd %%xmm2 , %%xmm5 \n\t"
"movups (%6,%0,8), %%xmm10 \n\t"
"movups 16(%6,%0,8), %%xmm2 \n\t"
"mulpd %%xmm15, %%xmm11 \n\t"
"mulpd %%xmm15, %%xmm3 \n\t"
"addpd %%xmm11 , %%xmm4 \n\t"
"addpd %%xmm3 , %%xmm5 \n\t"
"movups (%7,%0,8), %%xmm11 \n\t"
"mulpd %%xmm12, %%xmm8 \n\t"
"mulpd %%xmm13, %%xmm9 \n\t"
"mulpd %%xmm14, %%xmm10 \n\t"
"mulpd %%xmm15, %%xmm11 \n\t"
"addpd %%xmm8 , %%xmm4 \n\t"
"addpd %%xmm9 , %%xmm4 \n\t"
"addpd %%xmm10 , %%xmm4 \n\t"
"addpd %%xmm4 , %%xmm11 \n\t"
"movups 16(%7,%0,8), %%xmm3 \n\t"
"mulpd %%xmm6 , %%xmm11 \n\t"
"addpd %%xmm7 , %%xmm11 \n\t"
"movups %%xmm11, (%3,%0,8) \n\t" // 2 * y
"xorpd %%xmm4 , %%xmm4 \n\t"
"xorpd %%xmm5 , %%xmm5 \n\t"
"movups 16(%3,%0,8), %%xmm7 \n\t" // 2 * y
"mulpd %%xmm6 , %%xmm4 \n\t"
"addpd %%xmm7 , %%xmm4 \n\t"
"movups -16(%3,%0,8), %%xmm7 \n\t" // 2 * y
"movups %%xmm4 , -32(%3,%0,8) \n\t" // 2 * y
"movups 16(%4,%0,8), %%xmm8 \n\t"
"movups 16(%5,%0,8), %%xmm9 \n\t"
"movups 16(%6,%0,8), %%xmm10 \n\t"
"movups 16(%7,%0,8), %%xmm11 \n\t"
"mulpd %%xmm12, %%xmm8 \n\t"
"mulpd %%xmm13, %%xmm9 \n\t"
"mulpd %%xmm14, %%xmm10 \n\t"
"mulpd %%xmm15, %%xmm11 \n\t"
"addpd %%xmm8 , %%xmm4 \n\t"
"addpd %%xmm9 , %%xmm4 \n\t"
"addpd %%xmm10 , %%xmm4 \n\t"
"addpd %%xmm4 , %%xmm11 \n\t"
"mulpd %%xmm6 , %%xmm11 \n\t"
"addpd %%xmm7 , %%xmm11 \n\t"
"movups %%xmm11, 16(%3,%0,8) \n\t" // 2 * y
"mulpd %%xmm6 , %%xmm5 \n\t"
"addpd %%xmm7 , %%xmm5 \n\t"
"movups %%xmm5 , -16(%3,%0,8) \n\t" // 2 * y
"addq $4 , %0 \n\t"
"subq $4 , %1 \n\t"
"jnz 1b \n\t"
"2: \n\t"
"xorpd %%xmm4 , %%xmm4 \n\t"
"xorpd %%xmm5 , %%xmm5 \n\t"
"mulpd %%xmm12, %%xmm8 \n\t"
"addpd %%xmm8 , %%xmm4 \n\t"
"mulpd %%xmm13, %%xmm9 \n\t"
"addpd %%xmm9 , %%xmm4 \n\t"
"mulpd %%xmm14, %%xmm10 \n\t"
"addpd %%xmm10 , %%xmm4 \n\t"
"mulpd %%xmm15, %%xmm11 \n\t"
"addpd %%xmm11 , %%xmm4 \n\t"
"mulpd %%xmm12, %%xmm0 \n\t"
"addpd %%xmm0 , %%xmm5 \n\t"
"mulpd %%xmm13, %%xmm1 \n\t"
"addpd %%xmm1 , %%xmm5 \n\t"
"mulpd %%xmm14, %%xmm2 \n\t"
"addpd %%xmm2 , %%xmm5 \n\t"
"mulpd %%xmm15, %%xmm3 \n\t"
"addpd %%xmm3 , %%xmm5 \n\t"
"movups -32(%3,%0,8), %%xmm7 \n\t" // 2 * y
"mulpd %%xmm6 , %%xmm4 \n\t"
"addpd %%xmm7 , %%xmm4 \n\t"
"movups %%xmm4 , -32(%3,%0,8) \n\t" // 2 * y
"movups -16(%3,%0,8), %%xmm7 \n\t" // 2 * y
"mulpd %%xmm6 , %%xmm5 \n\t"
"addpd %%xmm7 , %%xmm5 \n\t"
"movups %%xmm5 , -16(%3,%0,8) \n\t" // 2 * y
:
:
"r" (i), // 0
@ -253,8 +160,8 @@ static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT
"r" (ap[3]), // 7
"r" (alpha) // 8
: "cc",
"%xmm4", "%xmm5",
"%xmm6", "%xmm7",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"

View File

@ -376,6 +376,8 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
sgemv_kernel_4x4(NB,ap,x_ptr,ybuffer,&alpha);
ap[0] += lda4;
ap[1] += lda4;
ap[2] += lda4;
ap[3] += lda4;
a_ptr += lda4;
x_ptr += 4;
}