From f5d847122a72631cd622bcaefd1c1e85dcc3d0d6 Mon Sep 17 00:00:00 2001 From: Werner Saar Date: Wed, 15 Apr 2015 11:59:38 +0200 Subject: [PATCH 1/4] updated caxpy_microk_bulldozer-2.c and caxpy.c --- kernel/x86_64/caxpy.c | 4 +- kernel/x86_64/caxpy_microk_bulldozer-2.c | 179 +++++++++++++++-------- 2 files changed, 124 insertions(+), 59 deletions(-) diff --git a/kernel/x86_64/caxpy.c b/kernel/x86_64/caxpy.c index be945a441..29342f46f 100644 --- a/kernel/x86_64/caxpy.c +++ b/kernel/x86_64/caxpy.c @@ -78,13 +78,13 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, if ( (inc_x == 1) && (inc_y == 1) ) { - int n1 = n & -8; + int n1 = n & -16; if ( n1 ) { da[0] = da_r; da[1] = da_i; - caxpy_kernel_8(n1, x, y , &da ); + caxpy_kernel_8(n1, x, y , da ); ix = 2 * n1; } i = n1; diff --git a/kernel/x86_64/caxpy_microk_bulldozer-2.c b/kernel/x86_64/caxpy_microk_bulldozer-2.c index 63575c374..33bda0943 100644 --- a/kernel/x86_64/caxpy_microk_bulldozer-2.c +++ b/kernel/x86_64/caxpy_microk_bulldozer-2.c @@ -31,89 +31,87 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *alpha) __att static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) { +#if !defined(CONJ) + FLOAT mvec[4] = { -1.0, 1.0, -1.0, 1.0 }; +#else + FLOAT mvec[4] = { 1.0, -1.0, 1.0, -1.0 }; +#endif BLASLONG register i = 0; + if ( n < 640 ) + { + __asm__ __volatile__ ( + "vzeroupper \n\t" "vbroadcastss (%4), %%xmm0 \n\t" // real part of alpha "vbroadcastss 4(%4), %%xmm1 \n\t" // imag part of alpha +#if !defined(CONJ) + "vmulps (%5), %%xmm1 , %%xmm1 \n\t" +#else + "vmulps (%5), %%xmm0 , %%xmm0 \n\t" +#endif ".align 16 \n\t" "1: \n\t" - "prefetcht0 768(%2,%0,4) \n\t" "vmovups (%2,%0,4), %%xmm5 \n\t" // 2 complex values from x + ".align 2 \n\t" "vmovups 16(%2,%0,4), %%xmm7 \n\t" // 2 complex values from x "vmovups 32(%2,%0,4), %%xmm9 \n\t" // 2 complex values from x "vmovups 48(%2,%0,4), %%xmm11 \n\t" // 2 complex values from x - "prefetcht0 768(%3,%0,4) \n\t" -#if !defined(CONJ) - "vfmaddps (%3,%0,4), %%xmm0 , %%xmm5, %%xmm12 \n\t" + "vmovups 64(%2,%0,4), %%xmm12 \n\t" // 2 complex values from x + "vmovups 80(%2,%0,4), %%xmm13 \n\t" // 2 complex values from x + "vmovups 96(%2,%0,4), %%xmm14 \n\t" // 2 complex values from x + "vmovups 112(%2,%0,4), %%xmm15 \n\t" // 2 complex values from x + "vpermilps $0xb1 , %%xmm5 , %%xmm4 \n\t" // exchange real and imag part - "vmulps %%xmm1, %%xmm4 , %%xmm4 \n\t" - - "vfmaddps 16(%3,%0,4), %%xmm0 , %%xmm7, %%xmm13 \n\t" "vpermilps $0xb1 , %%xmm7 , %%xmm6 \n\t" // exchange real and imag part - "vmulps %%xmm1, %%xmm6 , %%xmm6 \n\t" - - "vfmaddps 32(%3,%0,4), %%xmm0 , %%xmm9, %%xmm14 \n\t" "vpermilps $0xb1 , %%xmm9 , %%xmm8 \n\t" // exchange real and imag part - "vmulps %%xmm1, %%xmm8 , %%xmm8 \n\t" - - "vfmaddps 48(%3,%0,4), %%xmm0 , %%xmm11,%%xmm15 \n\t" "vpermilps $0xb1 , %%xmm11, %%xmm10 \n\t" // exchange real and imag part - "vmulps %%xmm1, %%xmm10, %%xmm10 \n\t" - "vaddsubps %%xmm4, %%xmm12, %%xmm12 \n\t" - "vaddsubps %%xmm6, %%xmm13, %%xmm13 \n\t" - "vaddsubps %%xmm8, %%xmm14, %%xmm14 \n\t" - "vaddsubps %%xmm10,%%xmm15, %%xmm15 \n\t" + "vfmaddps (%3,%0,4), %%xmm0 , %%xmm5, %%xmm5 \n\t" + ".align 2 \n\t" + "vfmaddps 16(%3,%0,4), %%xmm0 , %%xmm7, %%xmm7 \n\t" + "vfmaddps 32(%3,%0,4), %%xmm0 , %%xmm9, %%xmm9 \n\t" + "vfmaddps 48(%3,%0,4), %%xmm0 , %%xmm11,%%xmm11 \n\t" -#else + "vfmaddps %%xmm5 , %%xmm1 , %%xmm4 , %%xmm5 \n\t" + "vfmaddps %%xmm7 , %%xmm1 , %%xmm6 , %%xmm7 \n\t" + "vfmaddps %%xmm9 , %%xmm1 , %%xmm8 , %%xmm9 \n\t" + "vfmaddps %%xmm11, %%xmm1 , %%xmm10, %%xmm11 \n\t" - "vmulps %%xmm0, %%xmm5, %%xmm4 \n\t" // a_r*x_r, a_r*x_i - "vmulps %%xmm1, %%xmm5, %%xmm5 \n\t" // a_i*x_r, a_i*x_i - "vmulps %%xmm0, %%xmm7, %%xmm6 \n\t" // a_r*x_r, a_r*x_i - "vmulps %%xmm1, %%xmm7, %%xmm7 \n\t" // a_i*x_r, a_i*x_i - "vmulps %%xmm0, %%xmm9, %%xmm8 \n\t" // a_r*x_r, a_r*x_i - "vmulps %%xmm1, %%xmm9, %%xmm9 \n\t" // a_i*x_r, a_i*x_i - "vmulps %%xmm0, %%xmm11, %%xmm10 \n\t" // a_r*x_r, a_r*x_i - "vmulps %%xmm1, %%xmm11, %%xmm11 \n\t" // a_i*x_r, a_i*x_i + "vpermilps $0xb1 , %%xmm12, %%xmm4 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm13, %%xmm6 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm14, %%xmm8 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm15, %%xmm10 \n\t" // exchange real and imag part - "vpermilps $0xb1 , %%xmm4 , %%xmm4 \n\t" // exchange real and imag part - "vaddsubps %%xmm4 ,%%xmm5 , %%xmm4 \n\t" - "vpermilps $0xb1 , %%xmm4 , %%xmm4 \n\t" // exchange real and imag part + "vfmaddps 64(%3,%0,4), %%xmm0 , %%xmm12, %%xmm12 \n\t" + "vfmaddps 80(%3,%0,4), %%xmm0 , %%xmm13, %%xmm13 \n\t" + "vfmaddps 96(%3,%0,4), %%xmm0 , %%xmm14, %%xmm14 \n\t" + "vfmaddps 112(%3,%0,4), %%xmm0 , %%xmm15, %%xmm15 \n\t" - "vpermilps $0xb1 , %%xmm6 , %%xmm6 \n\t" // exchange real and imag part - "vaddsubps %%xmm6 ,%%xmm7 , %%xmm6 \n\t" - "vpermilps $0xb1 , %%xmm6 , %%xmm6 \n\t" // exchange real and imag part + "vfmaddps %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + "vfmaddps %%xmm13, %%xmm1 , %%xmm6 , %%xmm13 \n\t" + "vfmaddps %%xmm14, %%xmm1 , %%xmm8 , %%xmm14 \n\t" + "vfmaddps %%xmm15, %%xmm1 , %%xmm10, %%xmm15 \n\t" - "vpermilps $0xb1 , %%xmm8 , %%xmm8 \n\t" // exchange real and imag part - "vaddsubps %%xmm8 ,%%xmm9 , %%xmm8 \n\t" - "vpermilps $0xb1 , %%xmm8 , %%xmm8 \n\t" // exchange real and imag part + "vmovups %%xmm5 , (%3,%0,4) \n\t" + ".align 2 \n\t" + "vmovups %%xmm7 , 16(%3,%0,4) \n\t" + "vmovups %%xmm9 , 32(%3,%0,4) \n\t" + "vmovups %%xmm11, 48(%3,%0,4) \n\t" + "vmovups %%xmm12, 64(%3,%0,4) \n\t" + "vmovups %%xmm13, 80(%3,%0,4) \n\t" + "vmovups %%xmm14, 96(%3,%0,4) \n\t" + "vmovups %%xmm15,112(%3,%0,4) \n\t" - "vpermilps $0xb1 , %%xmm10, %%xmm10 \n\t" // exchange real and imag part - "vaddsubps %%xmm10,%%xmm11, %%xmm10 \n\t" - "vpermilps $0xb1 , %%xmm10, %%xmm10 \n\t" // exchange real and imag part - - "vaddps (%3,%0,4) ,%%xmm4 , %%xmm12 \n\t" - "vaddps 16(%3,%0,4) ,%%xmm6 , %%xmm13 \n\t" - "vaddps 32(%3,%0,4) ,%%xmm8 , %%xmm14 \n\t" - "vaddps 48(%3,%0,4) ,%%xmm10, %%xmm15 \n\t" - - -#endif - - "vmovups %%xmm12, (%3,%0,4) \n\t" - "vmovups %%xmm13, 16(%3,%0,4) \n\t" - "vmovups %%xmm14, 32(%3,%0,4) \n\t" - "vmovups %%xmm15, 48(%3,%0,4) \n\t" - - "addq $16, %0 \n\t" - "subq $8 , %1 \n\t" + "addq $32, %0 \n\t" + "subq $16, %1 \n\t" "jnz 1b \n\t" + "vzeroupper \n\t" : : @@ -121,7 +119,8 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "r" (n), // 1 "r" (x), // 2 "r" (y), // 3 - "r" (alpha) // 4 + "r" (alpha), // 4 + "r" (mvec) // 5 : "cc", "%xmm0", "%xmm1", "%xmm4", "%xmm5", "%xmm6", "%xmm7", @@ -129,7 +128,73 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); + return; + } + + __asm__ __volatile__ + ( + "vzeroupper \n\t" + "vbroadcastss (%4), %%xmm0 \n\t" // real part of alpha + "vbroadcastss 4(%4), %%xmm1 \n\t" // imag part of alpha +#if !defined(CONJ) + "vmulps (%5), %%xmm1 , %%xmm1 \n\t" +#else + "vmulps (%5), %%xmm0 , %%xmm0 \n\t" +#endif + + ".align 16 \n\t" + "1: \n\t" + + "prefetcht0 512(%2,%0,4) \n\t" + "vmovups (%2,%0,4), %%xmm5 \n\t" // 2 complex values from x + ".align 2 \n\t" + "vmovups 16(%2,%0,4), %%xmm7 \n\t" // 2 complex values from x + "vmovups 32(%2,%0,4), %%xmm9 \n\t" // 2 complex values from x + "vmovups 48(%2,%0,4), %%xmm11 \n\t" // 2 complex values from x + + "vpermilps $0xb1 , %%xmm5 , %%xmm4 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm7 , %%xmm6 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm9 , %%xmm8 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm11, %%xmm10 \n\t" // exchange real and imag part + + "prefetcht0 512(%3,%0,4) \n\t" + "vfmaddps (%3,%0,4), %%xmm0 , %%xmm5, %%xmm5 \n\t" + ".align 2 \n\t" + "vfmaddps 16(%3,%0,4), %%xmm0 , %%xmm7, %%xmm7 \n\t" + "vfmaddps 32(%3,%0,4), %%xmm0 , %%xmm9, %%xmm9 \n\t" + "vfmaddps 48(%3,%0,4), %%xmm0 , %%xmm11,%%xmm11 \n\t" + + "vfmaddps %%xmm5 , %%xmm1 , %%xmm4 , %%xmm5 \n\t" + "vfmaddps %%xmm7 , %%xmm1 , %%xmm6 , %%xmm7 \n\t" + "vfmaddps %%xmm9 , %%xmm1 , %%xmm8 , %%xmm9 \n\t" + "vfmaddps %%xmm11, %%xmm1 , %%xmm10, %%xmm11 \n\t" + + "vmovups %%xmm5 , (%3,%0,4) \n\t" + ".align 2 \n\t" + "vmovups %%xmm7 , 16(%3,%0,4) \n\t" + "vmovups %%xmm9 , 32(%3,%0,4) \n\t" + "vmovups %%xmm11, 48(%3,%0,4) \n\t" + + "addq $16, %0 \n\t" + "subq $8, %1 \n\t" + "jnz 1b \n\t" + "vzeroupper \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (x), // 2 + "r" (y), // 3 + "r" (alpha), // 4 + "r" (mvec) // 5 + : "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + } - From e9f33b4ca73ca58cbc21fdfc690bd7bc6a0993e9 Mon Sep 17 00:00:00 2001 From: Werner Saar Date: Wed, 15 Apr 2015 13:49:23 +0200 Subject: [PATCH 2/4] added optimized caxpy-kernel for steamroller --- kernel/x86_64/caxpy.c | 4 +- kernel/x86_64/caxpy_microk_steamroller-2.c | 200 +++++++++++++++++++++ 2 files changed, 203 insertions(+), 1 deletion(-) create mode 100644 kernel/x86_64/caxpy_microk_steamroller-2.c diff --git a/kernel/x86_64/caxpy.c b/kernel/x86_64/caxpy.c index 29342f46f..80d3a763c 100644 --- a/kernel/x86_64/caxpy.c +++ b/kernel/x86_64/caxpy.c @@ -29,7 +29,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(BULLDOZER) || defined(PILEDRIVER) || defined(STEAMROLLER) +#if defined(PILEDRIVER) || defined(STEAMROLLER) +#include "caxpy_microk_steamroller-2.c" +#elif defined(BULLDOZER) #include "caxpy_microk_bulldozer-2.c" #endif diff --git a/kernel/x86_64/caxpy_microk_steamroller-2.c b/kernel/x86_64/caxpy_microk_steamroller-2.c new file mode 100644 index 000000000..87370b032 --- /dev/null +++ b/kernel/x86_64/caxpy_microk_steamroller-2.c @@ -0,0 +1,200 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_8 1 +static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *alpha) __attribute__ ((noinline)); + +static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) +{ + +#if !defined(CONJ) + FLOAT mvec[4] = { -1.0, 1.0, -1.0, 1.0 }; +#else + FLOAT mvec[4] = { 1.0, -1.0, 1.0, -1.0 }; +#endif + + BLASLONG register i = 0; + + if ( n <= 2048 ) + { + + __asm__ __volatile__ + ( + "vzeroupper \n\t" + "vbroadcastss (%4), %%xmm0 \n\t" // real part of alpha + "vbroadcastss 4(%4), %%xmm1 \n\t" // imag part of alpha +#if !defined(CONJ) + "vmulps (%5), %%xmm1 , %%xmm1 \n\t" +#else + "vmulps (%5), %%xmm0 , %%xmm0 \n\t" +#endif + + ".align 16 \n\t" + "1: \n\t" + + "vmovups (%2,%0,4), %%xmm5 \n\t" // 2 complex values from x + ".align 2 \n\t" + "vmovups 16(%2,%0,4), %%xmm7 \n\t" // 2 complex values from x + "vmovups 32(%2,%0,4), %%xmm9 \n\t" // 2 complex values from x + "vmovups 48(%2,%0,4), %%xmm11 \n\t" // 2 complex values from x + + "vmovups 64(%2,%0,4), %%xmm12 \n\t" // 2 complex values from x + "vmovups 80(%2,%0,4), %%xmm13 \n\t" // 2 complex values from x + "vmovups 96(%2,%0,4), %%xmm14 \n\t" // 2 complex values from x + "vmovups 112(%2,%0,4), %%xmm15 \n\t" // 2 complex values from x + + "vpermilps $0xb1 , %%xmm5 , %%xmm4 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm7 , %%xmm6 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm9 , %%xmm8 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm11, %%xmm10 \n\t" // exchange real and imag part + + "vfmadd213ps (%3,%0,4), %%xmm0 , %%xmm5 \n\t" + ".align 2 \n\t" + "vfmadd213ps 16(%3,%0,4), %%xmm0 , %%xmm7 \n\t" + "vfmadd213ps 32(%3,%0,4), %%xmm0 , %%xmm9 \n\t" + "vfmadd213ps 48(%3,%0,4), %%xmm0 , %%xmm11 \n\t" + + "vfmadd231ps %%xmm1 , %%xmm4 , %%xmm5 \n\t" + "vfmadd231ps %%xmm1 , %%xmm6 , %%xmm7 \n\t" + "vfmadd231ps %%xmm1 , %%xmm8 , %%xmm9 \n\t" + "vfmadd231ps %%xmm1 , %%xmm10, %%xmm11 \n\t" + + "vpermilps $0xb1 , %%xmm12, %%xmm4 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm13, %%xmm6 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm14, %%xmm8 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm15, %%xmm10 \n\t" // exchange real and imag part + + "vfmadd213ps 64(%3,%0,4), %%xmm0 , %%xmm12 \n\t" + "vfmadd213ps 80(%3,%0,4), %%xmm0 , %%xmm13 \n\t" + "vfmadd213ps 96(%3,%0,4), %%xmm0 , %%xmm14 \n\t" + "vfmadd213ps 112(%3,%0,4), %%xmm0 , %%xmm15 \n\t" + + "vfmadd231ps %%xmm1 , %%xmm4 , %%xmm12 \n\t" + "vfmadd231ps %%xmm1 , %%xmm6 , %%xmm13 \n\t" + "vfmadd231ps %%xmm1 , %%xmm8 , %%xmm14 \n\t" + "vfmadd231ps %%xmm1 , %%xmm10, %%xmm15 \n\t" + + "vmovups %%xmm5 , (%3,%0,4) \n\t" + ".align 2 \n\t" + "vmovups %%xmm7 , 16(%3,%0,4) \n\t" + "vmovups %%xmm9 , 32(%3,%0,4) \n\t" + "vmovups %%xmm11, 48(%3,%0,4) \n\t" + "vmovups %%xmm12, 64(%3,%0,4) \n\t" + "vmovups %%xmm13, 80(%3,%0,4) \n\t" + "vmovups %%xmm14, 96(%3,%0,4) \n\t" + "vmovups %%xmm15,112(%3,%0,4) \n\t" + + "addq $32, %0 \n\t" + "subq $16, %1 \n\t" + "jnz 1b \n\t" + "vzeroupper \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (x), // 2 + "r" (y), // 3 + "r" (alpha), // 4 + "r" (mvec) // 5 + : "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + return; + } + + __asm__ __volatile__ + ( + "vzeroupper \n\t" + "vbroadcastss (%4), %%xmm0 \n\t" // real part of alpha + "vbroadcastss 4(%4), %%xmm1 \n\t" // imag part of alpha +#if !defined(CONJ) + "vmulps (%5), %%xmm1 , %%xmm1 \n\t" +#else + "vmulps (%5), %%xmm0 , %%xmm0 \n\t" +#endif + + ".align 16 \n\t" + "1: \n\t" + + "prefetcht0 512(%2,%0,4) \n\t" + "vmovups (%2,%0,4), %%xmm5 \n\t" // 2 complex values from x + ".align 2 \n\t" + "vmovups 16(%2,%0,4), %%xmm7 \n\t" // 2 complex values from x + "vmovups 32(%2,%0,4), %%xmm9 \n\t" // 2 complex values from x + "vmovups 48(%2,%0,4), %%xmm11 \n\t" // 2 complex values from x + + "vpermilps $0xb1 , %%xmm5 , %%xmm4 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm7 , %%xmm6 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm9 , %%xmm8 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%xmm11, %%xmm10 \n\t" // exchange real and imag part + + "prefetcht0 512(%3,%0,4) \n\t" + "vfmadd213ps (%3,%0,4), %%xmm0 , %%xmm5 \n\t" + ".align 2 \n\t" + "vfmadd213ps 16(%3,%0,4), %%xmm0 , %%xmm7 \n\t" + "vfmadd213ps 32(%3,%0,4), %%xmm0 , %%xmm9 \n\t" + "vfmadd213ps 48(%3,%0,4), %%xmm0 , %%xmm11 \n\t" + + "vfmadd231ps %%xmm1 , %%xmm4 , %%xmm5 \n\t" + "vfmadd231ps %%xmm1 , %%xmm6 , %%xmm7 \n\t" + "vfmadd231ps %%xmm1 , %%xmm8 , %%xmm9 \n\t" + "vfmadd231ps %%xmm1 , %%xmm10, %%xmm11 \n\t" + + "vmovups %%xmm5 , (%3,%0,4) \n\t" + ".align 2 \n\t" + "vmovups %%xmm7 , 16(%3,%0,4) \n\t" + "vmovups %%xmm9 , 32(%3,%0,4) \n\t" + "vmovups %%xmm11, 48(%3,%0,4) \n\t" + + "addq $16, %0 \n\t" + "subq $8, %1 \n\t" + "jnz 1b \n\t" + "vzeroupper \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (x), // 2 + "r" (y), // 3 + "r" (alpha), // 4 + "r" (mvec) // 5 + : "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + +} + From 248c9340c3e7c3627e0813f02df9d9149a71f812 Mon Sep 17 00:00:00 2001 From: Werner Saar Date: Wed, 15 Apr 2015 15:16:31 +0200 Subject: [PATCH 3/4] added optimized caxpy-kernel for haswell --- kernel/x86_64/KERNEL.HASWELL | 4 +- kernel/x86_64/caxpy.c | 5 +- kernel/x86_64/caxpy_microk_haswell-2.c | 132 +++++++++++++++++++++++++ 3 files changed, 138 insertions(+), 3 deletions(-) create mode 100644 kernel/x86_64/caxpy_microk_haswell-2.c diff --git a/kernel/x86_64/KERNEL.HASWELL b/kernel/x86_64/KERNEL.HASWELL index a6e085d18..9cce7772f 100644 --- a/kernel/x86_64/KERNEL.HASWELL +++ b/kernel/x86_64/KERNEL.HASWELL @@ -15,9 +15,9 @@ DDOTKERNEL = ddot.c CDOTKERNEL = cdot.c ZDOTKERNEL = zdot.c - -DAXPYKERNEL = daxpy.c SAXPYKERNEL = saxpy.c +DAXPYKERNEL = daxpy.c +CAXPYKERNEL = caxpy.c SGEMMKERNEL = sgemm_kernel_16x4_haswell.S SGEMMINCOPY = ../generic/gemm_ncopy_16.c diff --git a/kernel/x86_64/caxpy.c b/kernel/x86_64/caxpy.c index 80d3a763c..ce174c59d 100644 --- a/kernel/x86_64/caxpy.c +++ b/kernel/x86_64/caxpy.c @@ -33,6 +33,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "caxpy_microk_steamroller-2.c" #elif defined(BULLDOZER) #include "caxpy_microk_bulldozer-2.c" +#elif defined(HASWELL) +#include "caxpy_microk_haswell-2.c" #endif @@ -80,7 +82,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, if ( (inc_x == 1) && (inc_y == 1) ) { - int n1 = n & -16; + int n1 = n & -32; if ( n1 ) { @@ -89,6 +91,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, caxpy_kernel_8(n1, x, y , da ); ix = 2 * n1; } + i = n1; while(i < n) { diff --git a/kernel/x86_64/caxpy_microk_haswell-2.c b/kernel/x86_64/caxpy_microk_haswell-2.c new file mode 100644 index 000000000..7a9fc1b95 --- /dev/null +++ b/kernel/x86_64/caxpy_microk_haswell-2.c @@ -0,0 +1,132 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_8 1 +static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *alpha) __attribute__ ((noinline)); + +static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) +{ + +#if !defined(CONJ) + FLOAT mvec[8] = { -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0 }; +#else + FLOAT mvec[8] = { 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0 }; +#endif + + BLASLONG register i = 0; + + __asm__ __volatile__ + ( + "vzeroupper \n\t" + "vbroadcastss (%4), %%ymm0 \n\t" // real part of alpha + "vbroadcastss 4(%4), %%ymm1 \n\t" // imag part of alpha +#if !defined(CONJ) + "vmulps (%5), %%ymm1 , %%ymm1 \n\t" +#else + "vmulps (%5), %%ymm0 , %%ymm0 \n\t" +#endif + + ".align 16 \n\t" + "1: \n\t" + + "vmovups (%2,%0,4), %%ymm5 \n\t" // 4 complex values from x + ".align 2 \n\t" + "vmovups 32(%2,%0,4), %%ymm7 \n\t" // 4 complex values from x + "vmovups 64(%2,%0,4), %%ymm9 \n\t" // 4 complex values from x + "vmovups 96(%2,%0,4), %%ymm11 \n\t" // 4 complex values from x + + "vmovups 128(%2,%0,4), %%ymm12 \n\t" // 4 complex values from x + "vmovups 160(%2,%0,4), %%ymm13 \n\t" // 4 complex values from x + "vmovups 192(%2,%0,4), %%ymm14 \n\t" // 4 complex values from x + "vmovups 224(%2,%0,4), %%ymm15 \n\t" // 4 complex values from x + + "vpermilps $0xb1 , %%ymm5 , %%ymm4 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%ymm7 , %%ymm6 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%ymm9 , %%ymm8 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%ymm11, %%ymm10 \n\t" // exchange real and imag part + + "vfmadd213ps (%3,%0,4), %%ymm0 , %%ymm5 \n\t" + ".align 2 \n\t" + "vfmadd213ps 32(%3,%0,4), %%ymm0 , %%ymm7 \n\t" + "vfmadd213ps 64(%3,%0,4), %%ymm0 , %%ymm9 \n\t" + "vfmadd213ps 96(%3,%0,4), %%ymm0 , %%ymm11 \n\t" + + "vfmadd231ps %%ymm1 , %%ymm4 , %%ymm5 \n\t" + "vfmadd231ps %%ymm1 , %%ymm6 , %%ymm7 \n\t" + "vfmadd231ps %%ymm1 , %%ymm8 , %%ymm9 \n\t" + "vfmadd231ps %%ymm1 , %%ymm10, %%ymm11 \n\t" + + "vpermilps $0xb1 , %%ymm12, %%ymm4 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%ymm13, %%ymm6 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%ymm14, %%ymm8 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%ymm15, %%ymm10 \n\t" // exchange real and imag part + + "vfmadd213ps 128(%3,%0,4), %%ymm0 , %%ymm12 \n\t" + "vfmadd213ps 160(%3,%0,4), %%ymm0 , %%ymm13 \n\t" + "vfmadd213ps 192(%3,%0,4), %%ymm0 , %%ymm14 \n\t" + "vfmadd213ps 224(%3,%0,4), %%ymm0 , %%ymm15 \n\t" + + "vfmadd231ps %%ymm1 , %%ymm4 , %%ymm12 \n\t" + "vfmadd231ps %%ymm1 , %%ymm6 , %%ymm13 \n\t" + "vfmadd231ps %%ymm1 , %%ymm8 , %%ymm14 \n\t" + "vfmadd231ps %%ymm1 , %%ymm10, %%ymm15 \n\t" + + "vmovups %%ymm5 , (%3,%0,4) \n\t" + ".align 2 \n\t" + "vmovups %%ymm7 , 32(%3,%0,4) \n\t" + "vmovups %%ymm9 , 64(%3,%0,4) \n\t" + "vmovups %%ymm11, 96(%3,%0,4) \n\t" + + "vmovups %%ymm12,128(%3,%0,4) \n\t" + "vmovups %%ymm13,160(%3,%0,4) \n\t" + "vmovups %%ymm14,192(%3,%0,4) \n\t" + "vmovups %%ymm15,224(%3,%0,4) \n\t" + + "addq $64, %0 \n\t" + "subq $32, %1 \n\t" + "jnz 1b \n\t" + "vzeroupper \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (x), // 2 + "r" (y), // 3 + "r" (alpha), // 4 + "r" (mvec) // 5 + : "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + + +} + From 13889515b3531a3b453cad8917305a84468eb7f9 Mon Sep 17 00:00:00 2001 From: Werner Saar Date: Wed, 15 Apr 2015 16:29:25 +0200 Subject: [PATCH 4/4] added optimized caxpy-kernel for sandybridge --- kernel/x86_64/KERNEL.SANDYBRIDGE | 1 + kernel/x86_64/caxpy.c | 2 + kernel/x86_64/caxpy_microk_sandy-2.c | 116 +++++++++++++++++++++++++++ 3 files changed, 119 insertions(+) create mode 100644 kernel/x86_64/caxpy_microk_sandy-2.c diff --git a/kernel/x86_64/KERNEL.SANDYBRIDGE b/kernel/x86_64/KERNEL.SANDYBRIDGE index a60f4a17a..b783c9a90 100644 --- a/kernel/x86_64/KERNEL.SANDYBRIDGE +++ b/kernel/x86_64/KERNEL.SANDYBRIDGE @@ -11,6 +11,7 @@ ZDOTKERNEL = zdot.c SAXPYKERNEL = saxpy.c DAXPYKERNEL = daxpy.c +CAXPYKERNEL = caxpy.c SGEMMKERNEL = sgemm_kernel_16x4_sandy.S SGEMMINCOPY = ../generic/gemm_ncopy_16.c diff --git a/kernel/x86_64/caxpy.c b/kernel/x86_64/caxpy.c index ce174c59d..455d9d2ce 100644 --- a/kernel/x86_64/caxpy.c +++ b/kernel/x86_64/caxpy.c @@ -35,6 +35,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "caxpy_microk_bulldozer-2.c" #elif defined(HASWELL) #include "caxpy_microk_haswell-2.c" +#elif defined(SANDYBRIDGE) +#include "caxpy_microk_sandy-2.c" #endif diff --git a/kernel/x86_64/caxpy_microk_sandy-2.c b/kernel/x86_64/caxpy_microk_sandy-2.c new file mode 100644 index 000000000..dbfce208f --- /dev/null +++ b/kernel/x86_64/caxpy_microk_sandy-2.c @@ -0,0 +1,116 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_8 1 +static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *alpha) __attribute__ ((noinline)); + +static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) +{ + +#if !defined(CONJ) + FLOAT mvec[8] = { -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0 }; +#else + FLOAT mvec[8] = { 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0 }; +#endif + + BLASLONG register i = 0; + + __asm__ __volatile__ + ( + "vzeroupper \n\t" + "vbroadcastss (%4), %%ymm0 \n\t" // real part of alpha + "vbroadcastss 4(%4), %%ymm1 \n\t" // imag part of alpha +#if !defined(CONJ) + "vmulps (%5), %%ymm1 , %%ymm1 \n\t" +#else + "vmulps (%5), %%ymm0 , %%ymm0 \n\t" +#endif + + ".align 16 \n\t" + "1: \n\t" + + "vmovups (%2,%0,4), %%ymm5 \n\t" // 4 complex values from x + ".align 2 \n\t" + "vmovups 32(%2,%0,4), %%ymm7 \n\t" // 4 complex values from x + "vmovups 64(%2,%0,4), %%ymm9 \n\t" // 4 complex values from x + "vmovups 96(%2,%0,4), %%ymm11 \n\t" // 4 complex values from x + + "vpermilps $0xb1 , %%ymm5 , %%ymm4 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%ymm7 , %%ymm6 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%ymm9 , %%ymm8 \n\t" // exchange real and imag part + "vpermilps $0xb1 , %%ymm11, %%ymm10 \n\t" // exchange real and imag part + + "vmulps %%ymm5 , %%ymm0 , %%ymm5 \n\t" + "vmulps %%ymm7 , %%ymm0 , %%ymm7 \n\t" + "vmulps %%ymm9 , %%ymm0 , %%ymm9 \n\t" + "vmulps %%ymm11, %%ymm0 , %%ymm11 \n\t" + + "vaddps (%3,%0,4), %%ymm5 , %%ymm5 \n\t" + "vaddps 32(%3,%0,4), %%ymm7 , %%ymm7 \n\t" + "vaddps 64(%3,%0,4), %%ymm9 , %%ymm9 \n\t" + "vaddps 96(%3,%0,4), %%ymm11, %%ymm11 \n\t" + + "vmulps %%ymm4 , %%ymm1 , %%ymm4 \n\t" + "vmulps %%ymm6 , %%ymm1 , %%ymm6 \n\t" + "vmulps %%ymm8 , %%ymm1 , %%ymm8 \n\t" + "vmulps %%ymm10, %%ymm1 , %%ymm10 \n\t" + + "vaddps %%ymm4 , %%ymm5 , %%ymm5 \n\t" + "vaddps %%ymm6 , %%ymm7 , %%ymm7 \n\t" + "vaddps %%ymm8 , %%ymm9 , %%ymm9 \n\t" + "vaddps %%ymm10, %%ymm11, %%ymm11 \n\t" + + "vmovups %%ymm5 , (%3,%0,4) \n\t" + ".align 2 \n\t" + "vmovups %%ymm7 , 32(%3,%0,4) \n\t" + "vmovups %%ymm9 , 64(%3,%0,4) \n\t" + "vmovups %%ymm11, 96(%3,%0,4) \n\t" + + "addq $32, %0 \n\t" + "subq $16, %1 \n\t" + "jnz 1b \n\t" + "vzeroupper \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (x), // 2 + "r" (y), // 3 + "r" (alpha), // 4 + "r" (mvec) // 5 + : "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + + +} +