From 3119def9a7cdcbd1b030cd70054dc68a65ead41a Mon Sep 17 00:00:00 2001 From: Werner Saar Date: Fri, 10 Apr 2015 11:10:31 +0200 Subject: [PATCH] updated cdot and zdot --- kernel/x86_64/cdot.c | 2 - kernel/x86_64/zdot.c | 2 - kernel/x86_64/zdot_microk_bulldozer-2.c | 85 ++++++++++++++++++++++- kernel/x86_64/zdot_microk_haswell-2.c | 91 +++++++++++++++++++++++++ 4 files changed, 174 insertions(+), 6 deletions(-) diff --git a/kernel/x86_64/cdot.c b/kernel/x86_64/cdot.c index e0ba31ae7..266ab4fb9 100644 --- a/kernel/x86_64/cdot.c +++ b/kernel/x86_64/cdot.c @@ -34,8 +34,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "cdot_microk_bulldozer-2.c" #elif defined(STEAMROLLER) || defined(PILEDRIVER) #include "cdot_microk_steamroller-2.c" -#elif defined(NEHALEM) -#include "cdot_microk_nehalem-2.c" #elif defined(HASWELL) #include "cdot_microk_haswell-2.c" #elif defined(SANDYBRIDGE) diff --git a/kernel/x86_64/zdot.c b/kernel/x86_64/zdot.c index ee220c70e..c0cca521b 100644 --- a/kernel/x86_64/zdot.c +++ b/kernel/x86_64/zdot.c @@ -34,8 +34,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "zdot_microk_bulldozer-2.c" #elif defined(STEAMROLLER) || defined(PILEDRIVER) #include "zdot_microk_steamroller-2.c" -#elif defined(NEHALEM) -#include "zdot_microk_nehalem-2.c" #elif defined(HASWELL) #include "zdot_microk_haswell-2.c" #elif defined(SANDYBRIDGE) diff --git a/kernel/x86_64/zdot_microk_bulldozer-2.c b/kernel/x86_64/zdot_microk_bulldozer-2.c index d45c4ad38..30a9552d6 100644 --- a/kernel/x86_64/zdot_microk_bulldozer-2.c +++ b/kernel/x86_64/zdot_microk_bulldozer-2.c @@ -34,6 +34,9 @@ static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) BLASLONG register i = 0; + if ( n < 768 ) + { + __asm__ __volatile__ ( "vzeroupper \n\t" @@ -48,11 +51,88 @@ static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) ".align 16 \n\t" "1: \n\t" - "prefetcht0 512(%2,%0,8) \n\t" "vmovups (%2,%0,8), %%xmm8 \n\t" // 1 * x "vmovups 16(%2,%0,8), %%xmm9 \n\t" // 1 * x - "prefetcht0 512(%3,%0,8) \n\t" + "vmovups (%3,%0,8), %%xmm12 \n\t" // 1 * y + "vmovups 16(%3,%0,8), %%xmm13 \n\t" // 1 * y + + "vmovups 32(%2,%0,8), %%xmm10 \n\t" // 1 * x + "vmovups 48(%2,%0,8), %%xmm11 \n\t" // 1 * x + + "vmovups 32(%3,%0,8), %%xmm14 \n\t" // 1 * y + "vmovups 48(%3,%0,8), %%xmm15 \n\t" // 1 * y + + "vfmaddpd %%xmm0, %%xmm8 , %%xmm12, %%xmm0 \n\t" // x_r * y_r, x_i * y_i + "vfmaddpd %%xmm1, %%xmm9 , %%xmm13, %%xmm1 \n\t" // x_r * y_r, x_i * y_i + + "vpermilpd $0x1 , %%xmm12, %%xmm12 \n\t" + "vpermilpd $0x1 , %%xmm13, %%xmm13 \n\t" + + "vfmaddpd %%xmm2, %%xmm10, %%xmm14, %%xmm2 \n\t" // x_r * y_r, x_i * y_i + "vfmaddpd %%xmm3, %%xmm11, %%xmm15, %%xmm3 \n\t" // x_r * y_r, x_i * y_i + + "vpermilpd $0x1 , %%xmm14, %%xmm14 \n\t" + "vpermilpd $0x1 , %%xmm15, %%xmm15 \n\t" + + "vfmaddpd %%xmm4, %%xmm8 , %%xmm12, %%xmm4 \n\t" // x_r * y_i, x_i * y_r + "addq $8 , %0 \n\t" + "vfmaddpd %%xmm5, %%xmm9 , %%xmm13, %%xmm5 \n\t" // x_r * y_i, x_i * y_r + "vfmaddpd %%xmm6, %%xmm10, %%xmm14, %%xmm6 \n\t" // x_r * y_i, x_i * y_r + "subq $4 , %1 \n\t" + "vfmaddpd %%xmm7, %%xmm11, %%xmm15, %%xmm7 \n\t" // x_r * y_i, x_i * y_r + + "jnz 1b \n\t" + + "vaddpd %%xmm0, %%xmm1, %%xmm0 \n\t" + "vaddpd %%xmm2, %%xmm3, %%xmm2 \n\t" + "vaddpd %%xmm0, %%xmm2, %%xmm0 \n\t" + + "vaddpd %%xmm4, %%xmm5, %%xmm4 \n\t" + "vaddpd %%xmm6, %%xmm7, %%xmm6 \n\t" + "vaddpd %%xmm4, %%xmm6, %%xmm4 \n\t" + + "vmovups %%xmm0, (%4) \n\t" + "vmovups %%xmm4, 16(%4) \n\t" + "vzeroupper \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (x), // 2 + "r" (y), // 3 + "r" (dot) // 4 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + return; + + } + + __asm__ __volatile__ + ( + "vzeroupper \n\t" + "vxorpd %%xmm0, %%xmm0, %%xmm0 \n\t" + "vxorpd %%xmm1, %%xmm1, %%xmm1 \n\t" + "vxorpd %%xmm2, %%xmm2, %%xmm2 \n\t" + "vxorpd %%xmm3, %%xmm3, %%xmm3 \n\t" + "vxorpd %%xmm4, %%xmm4, %%xmm4 \n\t" + "vxorpd %%xmm5, %%xmm5, %%xmm5 \n\t" + "vxorpd %%xmm6, %%xmm6, %%xmm6 \n\t" + "vxorpd %%xmm7, %%xmm7, %%xmm7 \n\t" + + ".align 16 \n\t" + "1: \n\t" + "prefetcht0 384(%2,%0,8) \n\t" + "vmovups (%2,%0,8), %%xmm8 \n\t" // 1 * x + "vmovups 16(%2,%0,8), %%xmm9 \n\t" // 1 * x + + "prefetcht0 384(%3,%0,8) \n\t" "vmovups (%3,%0,8), %%xmm12 \n\t" // 1 * y "vmovups 16(%3,%0,8), %%xmm13 \n\t" // 1 * y @@ -110,6 +190,7 @@ static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "memory" ); + } diff --git a/kernel/x86_64/zdot_microk_haswell-2.c b/kernel/x86_64/zdot_microk_haswell-2.c index 04a6b971f..810cb4439 100644 --- a/kernel/x86_64/zdot_microk_haswell-2.c +++ b/kernel/x86_64/zdot_microk_haswell-2.c @@ -34,6 +34,10 @@ static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) BLASLONG register i = 0; + if ( n <=1280 ) + { + + __asm__ __volatile__ ( "vzeroupper \n\t" @@ -111,6 +115,93 @@ static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); + return; + } + + + __asm__ __volatile__ + ( + "vzeroupper \n\t" + "vxorpd %%ymm0, %%ymm0, %%ymm0 \n\t" + "vxorpd %%ymm1, %%ymm1, %%ymm1 \n\t" + "vxorpd %%ymm2, %%ymm2, %%ymm2 \n\t" + "vxorpd %%ymm3, %%ymm3, %%ymm3 \n\t" + "vxorpd %%ymm4, %%ymm4, %%ymm4 \n\t" + "vxorpd %%ymm5, %%ymm5, %%ymm5 \n\t" + "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" + "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" + + ".align 16 \n\t" + "1: \n\t" + "prefetcht0 512(%2,%0,8) \n\t" + "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x + "vmovups 32(%2,%0,8), %%ymm9 \n\t" // 2 * x + + "prefetcht0 512(%3,%0,8) \n\t" + "vmovups (%3,%0,8), %%ymm12 \n\t" // 2 * y + "vmovups 32(%3,%0,8), %%ymm13 \n\t" // 2 * y + + "prefetcht0 576(%2,%0,8) \n\t" + "vmovups 64(%2,%0,8), %%ymm10 \n\t" // 2 * x + "vmovups 96(%2,%0,8), %%ymm11 \n\t" // 2 * x + + "prefetcht0 576(%3,%0,8) \n\t" + "vmovups 64(%3,%0,8), %%ymm14 \n\t" // 2 * y + "vmovups 96(%3,%0,8), %%ymm15 \n\t" // 2 * y + + "vfmadd231pd %%ymm8 , %%ymm12, %%ymm0 \n\t" // x_r * y_r, x_i * y_i + "vfmadd231pd %%ymm9 , %%ymm13, %%ymm1 \n\t" // x_r * y_r, x_i * y_i + "vpermpd $0xb1 , %%ymm12, %%ymm12 \n\t" + "vpermpd $0xb1 , %%ymm13, %%ymm13 \n\t" + + "vfmadd231pd %%ymm10, %%ymm14, %%ymm2 \n\t" // x_r * y_r, x_i * y_i + "vfmadd231pd %%ymm11, %%ymm15, %%ymm3 \n\t" // x_r * y_r, x_i * y_i + "vpermpd $0xb1 , %%ymm14, %%ymm14 \n\t" + "vpermpd $0xb1 , %%ymm15, %%ymm15 \n\t" + + "vfmadd231pd %%ymm8 , %%ymm12, %%ymm4 \n\t" // x_r * y_i, x_i * y_r + "addq $16 , %0 \n\t" + "vfmadd231pd %%ymm9 , %%ymm13, %%ymm5 \n\t" // x_r * y_i, x_i * y_r + "vfmadd231pd %%ymm10, %%ymm14, %%ymm6 \n\t" // x_r * y_i, x_i * y_r + "subq $8 , %1 \n\t" + "vfmadd231pd %%ymm11, %%ymm15, %%ymm7 \n\t" // x_r * y_i, x_i * y_r + + "jnz 1b \n\t" + + "vaddpd %%ymm0, %%ymm1, %%ymm0 \n\t" + "vaddpd %%ymm2, %%ymm3, %%ymm2 \n\t" + "vaddpd %%ymm0, %%ymm2, %%ymm0 \n\t" + + "vaddpd %%ymm4, %%ymm5, %%ymm4 \n\t" + "vaddpd %%ymm6, %%ymm7, %%ymm6 \n\t" + "vaddpd %%ymm4, %%ymm6, %%ymm4 \n\t" + + "vextractf128 $1 , %%ymm0 , %%xmm1 \n\t" + "vextractf128 $1 , %%ymm4 , %%xmm5 \n\t" + + "vaddpd %%xmm0, %%xmm1, %%xmm0 \n\t" + "vaddpd %%xmm4, %%xmm5, %%xmm4 \n\t" + + "vmovups %%xmm0, (%4) \n\t" + "vmovups %%xmm4, 16(%4) \n\t" + "vzeroupper \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (x), // 2 + "r" (y), // 3 + "r" (dot) // 4 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +