From 44f2bf9bae7b25356fc0179d6b935de4edadc637 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Tue, 9 Sep 2014 13:34:22 +0200 Subject: [PATCH] added optimized dgemv_t kernel for haswell --- kernel/x86_64/dgemv_t_4.c | 623 +++++++++++++++++++++++ kernel/x86_64/dgemv_t_microk_haswell-4.c | 127 +++++ 2 files changed, 750 insertions(+) create mode 100644 kernel/x86_64/dgemv_t_4.c create mode 100644 kernel/x86_64/dgemv_t_microk_haswell-4.c diff --git a/kernel/x86_64/dgemv_t_4.c b/kernel/x86_64/dgemv_t_4.c new file mode 100644 index 000000000..0d0409bec --- /dev/null +++ b/kernel/x86_64/dgemv_t_4.c @@ -0,0 +1,623 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +#include "common.h" + +/* +#if defined(NEHALEM) +#include "dgemv_t_microk_nehalem-4.c" +#elif defined(BULLDOZER) || defined(PILEDRIVER) +#include "dgemv_t_microk_bulldozer-4.c" +#elif defined(SANDYBRIDGE) +#include "dgemv_t_microk_sandy-4.c" +#elif defined(HASWELL) +#include "dgemv_t_microk_haswell-4.c" +#endif +*/ + +#define NBMAX 2048 + +#ifndef HAVE_KERNEL_4x4 + +static void dgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) +{ + BLASLONG i; + FLOAT *a0,*a1,*a2,*a3; + a0 = ap[0]; + a1 = ap[1]; + a2 = ap[2]; + a3 = ap[3]; + FLOAT temp0 = 0.0; + FLOAT temp1 = 0.0; + FLOAT temp2 = 0.0; + FLOAT temp3 = 0.0; + + for ( i=0; i< n; i+=4 ) + { + temp0 += a0[i]*x[i] + a0[i+1]*x[i+1] + a0[i+2]*x[i+2] + a0[i+3]*x[i+3]; + temp1 += a1[i]*x[i] + a1[i+1]*x[i+1] + a1[i+2]*x[i+2] + a1[i+3]*x[i+3]; + temp2 += a2[i]*x[i] + a2[i+1]*x[i+1] + a2[i+2]*x[i+2] + a2[i+3]*x[i+3]; + temp3 += a3[i]*x[i] + a3[i+1]*x[i+1] + a3[i+2]*x[i+2] + a3[i+3]*x[i+3]; + } + y[0] = temp0; + y[1] = temp1; + y[2] = temp2; + y[3] = temp3; +} + +#endif + +static void dgemv_kernel_4x2(BLASLONG n, FLOAT *ap0, FLOAT *ap1, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); + +static void dgemv_kernel_4x2(BLASLONG n, FLOAT *ap0, FLOAT *ap1, FLOAT *x, FLOAT *y) +{ + BLASLONG i; + + i=0; + + __asm__ __volatile__ + ( + "xorpd %%xmm10 , %%xmm10 \n\t" + "xorpd %%xmm11 , %%xmm11 \n\t" + + "testq $2 , %1 \n\t" + "jz .L01LABEL%= \n\t" + + "movups (%5,%0,8) , %%xmm14 \n\t" // x + "movups (%3,%0,8) , %%xmm12 \n\t" // ap0 + "movups (%4,%0,8) , %%xmm13 \n\t" // ap1 + "mulpd %%xmm14 , %%xmm12 \n\t" + "mulpd %%xmm14 , %%xmm13 \n\t" + "addq $2 , %0 \n\t" + "addpd %%xmm12 , %%xmm10 \n\t" + "subq $2 , %1 \n\t" + "addpd %%xmm13 , %%xmm11 \n\t" + + ".L01LABEL%=: \n\t" + + "cmpq $0, %1 \n\t" + "je .L01END%= \n\t" + + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + + "movups (%5,%0,8) , %%xmm14 \n\t" // x + "movups (%3,%0,8) , %%xmm12 \n\t" // ap0 + "movups (%4,%0,8) , %%xmm13 \n\t" // ap1 + "mulpd %%xmm14 , %%xmm12 \n\t" + "mulpd %%xmm14 , %%xmm13 \n\t" + "addpd %%xmm12 , %%xmm10 \n\t" + "addpd %%xmm13 , %%xmm11 \n\t" + + "movups 16(%5,%0,8) , %%xmm14 \n\t" // x + "movups 16(%3,%0,8) , %%xmm12 \n\t" // ap0 + "movups 16(%4,%0,8) , %%xmm13 \n\t" // ap1 + "mulpd %%xmm14 , %%xmm12 \n\t" + "mulpd %%xmm14 , %%xmm13 \n\t" + "addpd %%xmm12 , %%xmm10 \n\t" + "addpd %%xmm13 , %%xmm11 \n\t" + + "addq $4 , %0 \n\t" + "subq $4 , %1 \n\t" + "jnz .L01LOOP%= \n\t" + + ".L01END%=: \n\t" + + "haddpd %%xmm10, %%xmm10 \n\t" + "haddpd %%xmm11, %%xmm11 \n\t" + + "movsd %%xmm10, (%2) \n\t" + "movsd %%xmm11,8(%2) \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (y), // 2 + "r" (ap0), // 3 + "r" (ap1), // 4 + "r" (x) // 5 + : "cc", + "%xmm4", "%xmm5", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + + +} + +static void dgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); + +static void dgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y) +{ + BLASLONG i; + + i=0; + + __asm__ __volatile__ + ( + "xorpd %%xmm9 , %%xmm9 \n\t" + "xorpd %%xmm10 , %%xmm10 \n\t" + + "testq $2 , %1 \n\t" + "jz .L01LABEL%= \n\t" + + "movups (%3,%0,8) , %%xmm12 \n\t" + "movups (%4,%0,8) , %%xmm11 \n\t" + "mulpd %%xmm11 , %%xmm12 \n\t" + "addq $2 , %0 \n\t" + "addpd %%xmm12 , %%xmm10 \n\t" + "subq $2 , %1 \n\t" + + ".L01LABEL%=: \n\t" + + "cmpq $0, %1 \n\t" + "je .L01END%= \n\t" + + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + + "movups (%3,%0,8) , %%xmm12 \n\t" + "movups 16(%3,%0,8) , %%xmm14 \n\t" + "movups (%4,%0,8) , %%xmm11 \n\t" + "movups 16(%4,%0,8) , %%xmm13 \n\t" + "mulpd %%xmm11 , %%xmm12 \n\t" + "mulpd %%xmm13 , %%xmm14 \n\t" + "addq $4 , %0 \n\t" + "addpd %%xmm12 , %%xmm10 \n\t" + "subq $4 , %1 \n\t" + "addpd %%xmm14 , %%xmm9 \n\t" + + "jnz .L01LOOP%= \n\t" + + ".L01END%=: \n\t" + + "addpd %%xmm9 , %%xmm10 \n\t" + "haddpd %%xmm10, %%xmm10 \n\t" + + "movsd %%xmm10, (%2) \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (y), // 2 + "r" (ap), // 3 + "r" (x) // 4 + : "cc", + "%xmm9", "%xmm10" , + "%xmm11", "%xmm12", "%xmm13", "%xmm14", + "memory" + ); + + +} + +static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) +{ + BLASLONG i; + for ( i=0; i> 2 ; + n2 = n & 3 ; + + m3 = m & 3 ; + m1 = m & -4 ; + m2 = (m & (NBMAX-1)) - m3 ; + + + BLASLONG NB = NBMAX; + + while ( NB == NBMAX ) + { + + m1 -= NB; + if ( m1 < 0) + { + if ( m2 == 0 ) break; + NB = m2; + } + + y_ptr = y; + a_ptr = a; + x_ptr = x; + + if ( inc_x == 1 ) + xbuffer = x_ptr; + else + copy_x(NB,x_ptr,xbuffer,inc_x); + + + FLOAT *ap[4]; + FLOAT *yp; + BLASLONG register lda4 = 4 * lda; + ap[0] = a_ptr; + ap[1] = a_ptr + lda; + ap[2] = ap[1] + lda; + ap[3] = ap[2] + lda; + + if ( n0 > 0 ) + { + BLASLONG nb1 = NBMAX / 4; + for( j=0; j 0 ) + { + add_y(n1*4, alpha, ytemp, y_ptr, inc_y ); + y_ptr += n1 * inc_y * 4; + a_ptr += n1 * lda4 ; + } + + if ( n2 & 2 ) + { + + dgemv_kernel_4x2(NB,ap[0],ap[1],xbuffer,ybuffer); + a_ptr += lda * 2; + *y_ptr += ybuffer[0] * alpha; + y_ptr += inc_y; + *y_ptr += ybuffer[1] * alpha; + y_ptr += inc_y; + + } + + if ( n2 & 1 ) + { + + dgemv_kernel_4x1(NB,a_ptr,xbuffer,ybuffer); + a_ptr += lda; + *y_ptr += ybuffer[0] * alpha; + y_ptr += inc_y; + + } + a += NB; + x += NB * inc_x; + } + + if ( m3 == 0 ) return(0); + + x_ptr = x; + a_ptr = a; + if ( m3 == 3 ) + { + FLOAT xtemp0 = *x_ptr * alpha; + x_ptr += inc_x; + FLOAT xtemp1 = *x_ptr * alpha; + x_ptr += inc_x; + FLOAT xtemp2 = *x_ptr * alpha; + + FLOAT *aj = a_ptr; + y_ptr = y; + + if ( lda == 3 && inc_y == 1 ) + { + + for ( j=0; j< ( n & -4) ; j+=4 ) + { + + y_ptr[j] += aj[0] * xtemp0 + aj[1] * xtemp1 + aj[2] * xtemp2; + y_ptr[j+1] += aj[3] * xtemp0 + aj[4] * xtemp1 + aj[5] * xtemp2; + y_ptr[j+2] += aj[6] * xtemp0 + aj[7] * xtemp1 + aj[8] * xtemp2; + y_ptr[j+3] += aj[9] * xtemp0 + aj[10] * xtemp1 + aj[11] * xtemp2; + aj += 12; + } + + for ( ; j