From e216f686cb9c5fa3a6160af753cfa46e71ea5085 Mon Sep 17 00:00:00 2001 From: Werner Saar Date: Tue, 28 Apr 2015 10:18:32 +0200 Subject: [PATCH 1/3] optimized saxpy and daxpy for sandybridge --- kernel/x86_64/daxpy.c | 4 -- kernel/x86_64/daxpy_microk_sandy-2.c | 87 ++++++++++++++++----------- kernel/x86_64/saxpy.c | 4 -- kernel/x86_64/saxpy_microk_sandy-2.c | 89 +++++++++++++++++----------- 4 files changed, 107 insertions(+), 77 deletions(-) diff --git a/kernel/x86_64/daxpy.c b/kernel/x86_64/daxpy.c index 9207e209f..56d323cbe 100644 --- a/kernel/x86_64/daxpy.c +++ b/kernel/x86_64/daxpy.c @@ -79,11 +79,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS if ( (inc_x == 1) && (inc_y == 1) ) { -#if defined(SANDYBRIDGE) - BLASLONG n1 = n & -32; -#else BLASLONG n1 = n & -16; -#endif if ( n1 ) daxpy_kernel_8(n1, x, y , &da ); diff --git a/kernel/x86_64/daxpy_microk_sandy-2.c b/kernel/x86_64/daxpy_microk_sandy-2.c index 963ad322d..522e084dc 100644 --- a/kernel/x86_64/daxpy_microk_sandy-2.c +++ b/kernel/x86_64/daxpy_microk_sandy-2.c @@ -37,48 +37,67 @@ static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) __asm__ __volatile__ ( "vbroadcastsd (%4), %%ymm0 \n\t" // alpha - - ".align 16 \n\t" - "1: \n\t" "vmovups (%3,%0,8), %%ymm8 \n\t" "vmovups 32(%3,%0,8), %%ymm9 \n\t" "vmovups 64(%3,%0,8), %%ymm10 \n\t" "vmovups 96(%3,%0,8), %%ymm11 \n\t" - "vmovups 128(%3,%0,8), %%ymm12 \n\t" - "vmovups 160(%3,%0,8), %%ymm13 \n\t" - "vmovups 192(%3,%0,8), %%ymm14 \n\t" - "vmovups 224(%3,%0,8), %%ymm15 \n\t" + "vmovups (%2,%0,8), %%ymm4 \n\t" + "vmovups 32(%2,%0,8), %%ymm5 \n\t" + "vmovups 64(%2,%0,8), %%ymm6 \n\t" + "vmovups 96(%2,%0,8), %%ymm7 \n\t" - "vmulpd (%2,%0,8), %%ymm0, %%ymm1 \n\t" - "vmulpd 32(%2,%0,8), %%ymm0, %%ymm2 \n\t" - "vaddpd %%ymm8 , %%ymm1, %%ymm8 \n\t" - "vmulpd 64(%2,%0,8), %%ymm0, %%ymm3 \n\t" - "vaddpd %%ymm9 , %%ymm2, %%ymm9 \n\t" - "vmulpd 96(%2,%0,8), %%ymm0, %%ymm4 \n\t" - "vaddpd %%ymm10, %%ymm3, %%ymm10 \n\t" - "vmulpd 128(%2,%0,8), %%ymm0, %%ymm5 \n\t" - "vaddpd %%ymm11, %%ymm4, %%ymm11 \n\t" - "vmulpd 160(%2,%0,8), %%ymm0, %%ymm6 \n\t" - "vaddpd %%ymm12, %%ymm5, %%ymm12 \n\t" - "vmulpd 192(%2,%0,8), %%ymm0, %%ymm7 \n\t" - "vmulpd 224(%2,%0,8), %%ymm0, %%ymm1 \n\t" + "addq $16, %0 \n\t" + "subq $16, %1 \n\t" + "jz 2f \n\t" - "vaddpd %%ymm13, %%ymm6, %%ymm13 \n\t" - "vmovups %%ymm8 , (%3,%0,8) \n\t" - "vaddpd %%ymm14, %%ymm7, %%ymm14 \n\t" - "vmovups %%ymm9 , 32(%3,%0,8) \n\t" - "vaddpd %%ymm15, %%ymm1, %%ymm15 \n\t" - "vmovups %%ymm10, 64(%3,%0,8) \n\t" - "vmovups %%ymm11, 96(%3,%0,8) \n\t" - "vmovups %%ymm12,128(%3,%0,8) \n\t" - "vmovups %%ymm13,160(%3,%0,8) \n\t" - "vmovups %%ymm14,192(%3,%0,8) \n\t" - "vmovups %%ymm15,224(%3,%0,8) \n\t" + ".align 16 \n\t" + "1: \n\t" - "addq $32, %0 \n\t" - "subq $32, %1 \n\t" + "vmulpd %%ymm4, %%ymm0, %%ymm4 \n\t" + "vaddpd %%ymm8 , %%ymm4, %%ymm12 \n\t" + "vmulpd %%ymm5, %%ymm0, %%ymm5 \n\t" + "vaddpd %%ymm9 , %%ymm5, %%ymm13 \n\t" + "vmulpd %%ymm6, %%ymm0, %%ymm6 \n\t" + "vaddpd %%ymm10, %%ymm6, %%ymm14 \n\t" + "vmulpd %%ymm7, %%ymm0, %%ymm7 \n\t" + "vaddpd %%ymm11, %%ymm7, %%ymm15 \n\t" + + "vmovups (%3,%0,8), %%ymm8 \n\t" + "vmovups 32(%3,%0,8), %%ymm9 \n\t" + "vmovups 64(%3,%0,8), %%ymm10 \n\t" + "vmovups 96(%3,%0,8), %%ymm11 \n\t" + + "vmovups (%2,%0,8), %%ymm4 \n\t" + "vmovups 32(%2,%0,8), %%ymm5 \n\t" + "vmovups 64(%2,%0,8), %%ymm6 \n\t" + "vmovups 96(%2,%0,8), %%ymm7 \n\t" + + "vmovups %%ymm12, -128(%3,%0,8) \n\t" + "vmovups %%ymm13, -96(%3,%0,8) \n\t" + "vmovups %%ymm14, -64(%3,%0,8) \n\t" + "vmovups %%ymm15, -32(%3,%0,8) \n\t" + + "addq $16, %0 \n\t" + "subq $16, %1 \n\t" "jnz 1b \n\t" - "vzeroupper \n\t" + + "2: \n\t" + "vmulpd %%ymm4, %%ymm0, %%ymm4 \n\t" + "vmulpd %%ymm5, %%ymm0, %%ymm5 \n\t" + "vmulpd %%ymm6, %%ymm0, %%ymm6 \n\t" + "vmulpd %%ymm7, %%ymm0, %%ymm7 \n\t" + + "vaddpd %%ymm8 , %%ymm4, %%ymm12 \n\t" + "vaddpd %%ymm9 , %%ymm5, %%ymm13 \n\t" + "vaddpd %%ymm10, %%ymm6, %%ymm14 \n\t" + "vaddpd %%ymm11, %%ymm7, %%ymm15 \n\t" + + "vmovups %%ymm12, -128(%3,%0,8) \n\t" + "vmovups %%ymm13, -96(%3,%0,8) \n\t" + "vmovups %%ymm14, -64(%3,%0,8) \n\t" + "vmovups %%ymm15, -32(%3,%0,8) \n\t" + + "vzeroupper \n\t" : : diff --git a/kernel/x86_64/saxpy.c b/kernel/x86_64/saxpy.c index b37e24d9b..0b76c42f7 100644 --- a/kernel/x86_64/saxpy.c +++ b/kernel/x86_64/saxpy.c @@ -75,11 +75,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS if ( (inc_x == 1) && (inc_y == 1) ) { -#if defined(SANDYBRIDGE) - BLASLONG n1 = n & -64; -#else BLASLONG n1 = n & -32; -#endif if ( n1 ) saxpy_kernel_16(n1, x, y , &da ); diff --git a/kernel/x86_64/saxpy_microk_sandy-2.c b/kernel/x86_64/saxpy_microk_sandy-2.c index 8a4392d37..159a23175 100644 --- a/kernel/x86_64/saxpy_microk_sandy-2.c +++ b/kernel/x86_64/saxpy_microk_sandy-2.c @@ -37,48 +37,67 @@ static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) __asm__ __volatile__ ( "vbroadcastss (%4), %%ymm0 \n\t" // alpha - - ".align 16 \n\t" - "1: \n\t" "vmovups (%3,%0,4), %%ymm8 \n\t" "vmovups 32(%3,%0,4), %%ymm9 \n\t" "vmovups 64(%3,%0,4), %%ymm10 \n\t" "vmovups 96(%3,%0,4), %%ymm11 \n\t" - "vmovups 128(%3,%0,4), %%ymm12 \n\t" - "vmovups 160(%3,%0,4), %%ymm13 \n\t" - "vmovups 192(%3,%0,4), %%ymm14 \n\t" - "vmovups 224(%3,%0,4), %%ymm15 \n\t" + "vmovups (%2,%0,4), %%ymm4 \n\t" + "vmovups 32(%2,%0,4), %%ymm5 \n\t" + "vmovups 64(%2,%0,4), %%ymm6 \n\t" + "vmovups 96(%2,%0,4), %%ymm7 \n\t" - "vmulps (%2,%0,4), %%ymm0, %%ymm1 \n\t" - "vmulps 32(%2,%0,4), %%ymm0, %%ymm2 \n\t" - "vaddps %%ymm8 , %%ymm1, %%ymm8 \n\t" - "vmulps 64(%2,%0,4), %%ymm0, %%ymm3 \n\t" - "vaddps %%ymm9 , %%ymm2, %%ymm9 \n\t" - "vmulps 96(%2,%0,4), %%ymm0, %%ymm4 \n\t" - "vaddps %%ymm10, %%ymm3, %%ymm10 \n\t" - "vmulps 128(%2,%0,4), %%ymm0, %%ymm5 \n\t" - "vaddps %%ymm11, %%ymm4, %%ymm11 \n\t" - "vmulps 160(%2,%0,4), %%ymm0, %%ymm6 \n\t" - "vaddps %%ymm12, %%ymm5, %%ymm12 \n\t" - "vmulps 192(%2,%0,4), %%ymm0, %%ymm7 \n\t" - "vmulps 224(%2,%0,4), %%ymm0, %%ymm1 \n\t" + "addq $32, %0 \n\t" + "subq $32, %1 \n\t" + "jz 2f \n\t" - "vaddps %%ymm13, %%ymm6, %%ymm13 \n\t" - "vmovups %%ymm8 , (%3,%0,4) \n\t" - "vaddps %%ymm14, %%ymm7, %%ymm14 \n\t" - "vmovups %%ymm9 , 32(%3,%0,4) \n\t" - "vaddps %%ymm15, %%ymm1, %%ymm15 \n\t" - "vmovups %%ymm10, 64(%3,%0,4) \n\t" - "vmovups %%ymm11, 96(%3,%0,4) \n\t" - "vmovups %%ymm12,128(%3,%0,4) \n\t" - "vmovups %%ymm13,160(%3,%0,4) \n\t" - "vmovups %%ymm14,192(%3,%0,4) \n\t" - "vmovups %%ymm15,224(%3,%0,4) \n\t" + ".align 16 \n\t" + "1: \n\t" - "addq $64, %0 \n\t" - "subq $64, %1 \n\t" + "vmulps %%ymm4, %%ymm0, %%ymm4 \n\t" + "vaddps %%ymm8 , %%ymm4, %%ymm12 \n\t" + "vmulps %%ymm5, %%ymm0, %%ymm5 \n\t" + "vaddps %%ymm9 , %%ymm5, %%ymm13 \n\t" + "vmulps %%ymm6, %%ymm0, %%ymm6 \n\t" + "vaddps %%ymm10, %%ymm6, %%ymm14 \n\t" + "vmulps %%ymm7, %%ymm0, %%ymm7 \n\t" + "vaddps %%ymm11, %%ymm7, %%ymm15 \n\t" + + "vmovups (%3,%0,4), %%ymm8 \n\t" + "vmovups 32(%3,%0,4), %%ymm9 \n\t" + "vmovups 64(%3,%0,4), %%ymm10 \n\t" + "vmovups 96(%3,%0,4), %%ymm11 \n\t" + + "vmovups (%2,%0,4), %%ymm4 \n\t" + "vmovups 32(%2,%0,4), %%ymm5 \n\t" + "vmovups 64(%2,%0,4), %%ymm6 \n\t" + "vmovups 96(%2,%0,4), %%ymm7 \n\t" + + "vmovups %%ymm12, -128(%3,%0,4) \n\t" + "vmovups %%ymm13, -96(%3,%0,4) \n\t" + "vmovups %%ymm14, -64(%3,%0,4) \n\t" + "vmovups %%ymm15, -32(%3,%0,4) \n\t" + + "addq $32, %0 \n\t" + "subq $32, %1 \n\t" "jnz 1b \n\t" - "vzeroupper \n\t" + + "2: \n\t" + "vmulps %%ymm4, %%ymm0, %%ymm4 \n\t" + "vmulps %%ymm5, %%ymm0, %%ymm5 \n\t" + "vmulps %%ymm6, %%ymm0, %%ymm6 \n\t" + "vmulps %%ymm7, %%ymm0, %%ymm7 \n\t" + + "vaddps %%ymm8 , %%ymm4, %%ymm12 \n\t" + "vaddps %%ymm9 , %%ymm5, %%ymm13 \n\t" + "vaddps %%ymm10, %%ymm6, %%ymm14 \n\t" + "vaddps %%ymm11, %%ymm7, %%ymm15 \n\t" + + "vmovups %%ymm12, -128(%3,%0,4) \n\t" + "vmovups %%ymm13, -96(%3,%0,4) \n\t" + "vmovups %%ymm14, -64(%3,%0,4) \n\t" + "vmovups %%ymm15, -32(%3,%0,4) \n\t" + + "vzeroupper \n\t" : : @@ -90,7 +109,7 @@ static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) : "cc", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", - "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); From b2e1797dc6397f4096856f1691b769815d60a907 Mon Sep 17 00:00:00 2001 From: Werner Saar Date: Tue, 28 Apr 2015 15:33:38 +0200 Subject: [PATCH 2/3] added optimized sger kernel for sandybridge --- kernel/x86_64/KERNEL.SANDYBRIDGE | 2 + kernel/x86_64/sger.c | 84 +++++++++++++++++++ kernel/x86_64/sger_microk_sandy-2.c | 124 ++++++++++++++++++++++++++++ 3 files changed, 210 insertions(+) create mode 100644 kernel/x86_64/sger.c create mode 100644 kernel/x86_64/sger_microk_sandy-2.c diff --git a/kernel/x86_64/KERNEL.SANDYBRIDGE b/kernel/x86_64/KERNEL.SANDYBRIDGE index 055072cfd..06bb7f317 100644 --- a/kernel/x86_64/KERNEL.SANDYBRIDGE +++ b/kernel/x86_64/KERNEL.SANDYBRIDGE @@ -1,3 +1,5 @@ +SGERKERNEL = sger.c + SGEMVNKERNEL = sgemv_n_4.c SGEMVTKERNEL = sgemv_t_4.c diff --git a/kernel/x86_64/sger.c b/kernel/x86_64/sger.c new file mode 100644 index 000000000..84c056c0d --- /dev/null +++ b/kernel/x86_64/sger.c @@ -0,0 +1,84 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +#if defined(SANDYBRIDGE) +#include "sger_microk_sandy-2.c" +#endif + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, + FLOAT *x, BLASLONG incx, + FLOAT *y, BLASLONG incy, + FLOAT *a, BLASLONG lda, FLOAT *buffer){ + + FLOAT *X = x; + + if (incx != 1) { + X = buffer; + COPY_K(m, x, incx, X, 1); + } + + BLASLONG m1 = m & -16; + + while (n > 0) + { + FLOAT y0 = alpha * *y; + if ( m1 > 0 ) + { + #ifdef HAVE_KERNEL_16 + sger_kernel_16(m1, X, a, &y0); + #else + AXPYU_K(m1, 0, 0, y0, X, 1, a, 1, NULL, 0); + #endif + } + + if ( m > m1 ) + { + AXPYU_K(m-m1, 0, 0, y0, X+m1 , 1, a+m1, 1, NULL, 0); + } + + a += lda; + y += incy; + n --; + } + + return 0; +} + diff --git a/kernel/x86_64/sger_microk_sandy-2.c b/kernel/x86_64/sger_microk_sandy-2.c new file mode 100644 index 000000000..51c3bef3e --- /dev/null +++ b/kernel/x86_64/sger_microk_sandy-2.c @@ -0,0 +1,124 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_16 1 +static void sger_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *alpha) __attribute__ ((noinline)); + +static void sger_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) +{ + + + BLASLONG register i = 0; + + __asm__ __volatile__ + ( + "vbroadcastss (%4), %%xmm0 \n\t" // alpha + "prefetcht0 256(%3,%0,4) \n\t" + "vmovups (%3,%0,4), %%xmm8 \n\t" + "vmovups 16(%3,%0,4), %%xmm9 \n\t" + "vmovups 32(%3,%0,4), %%xmm10 \n\t" + "vmovups 48(%3,%0,4), %%xmm11 \n\t" + + "prefetcht0 256(%2,%0,4) \n\t" + "vmovups (%2,%0,4), %%xmm4 \n\t" + "vmovups 16(%2,%0,4), %%xmm5 \n\t" + "vmovups 32(%2,%0,4), %%xmm6 \n\t" + "vmovups 48(%2,%0,4), %%xmm7 \n\t" + + "addq $16, %0 \n\t" + "subq $16, %1 \n\t" + "jz 2f \n\t" + + ".align 16 \n\t" + "1: \n\t" + + "vmulps %%xmm4, %%xmm0, %%xmm4 \n\t" + "vaddps %%xmm8 , %%xmm4, %%xmm12 \n\t" + "vmulps %%xmm5, %%xmm0, %%xmm5 \n\t" + "vaddps %%xmm9 , %%xmm5, %%xmm13 \n\t" + "vmulps %%xmm6, %%xmm0, %%xmm6 \n\t" + "vaddps %%xmm10, %%xmm6, %%xmm14 \n\t" + "vmulps %%xmm7, %%xmm0, %%xmm7 \n\t" + "vaddps %%xmm11, %%xmm7, %%xmm15 \n\t" + + "prefetcht0 256(%3,%0,4) \n\t" + "vmovups (%3,%0,4), %%xmm8 \n\t" + "vmovups 16(%3,%0,4), %%xmm9 \n\t" + "vmovups 32(%3,%0,4), %%xmm10 \n\t" + "vmovups 48(%3,%0,4), %%xmm11 \n\t" + + "prefetcht0 256(%2,%0,4) \n\t" + "vmovups (%2,%0,4), %%xmm4 \n\t" + "vmovups 16(%2,%0,4), %%xmm5 \n\t" + "vmovups 32(%2,%0,4), %%xmm6 \n\t" + "vmovups 48(%2,%0,4), %%xmm7 \n\t" + + "vmovups %%xmm12, -64(%3,%0,4) \n\t" + "vmovups %%xmm13, -48(%3,%0,4) \n\t" + "vmovups %%xmm14, -32(%3,%0,4) \n\t" + "vmovups %%xmm15, -16(%3,%0,4) \n\t" + + "addq $16, %0 \n\t" + "subq $16, %1 \n\t" + "jnz 1b \n\t" + + "2: \n\t" + "vmulps %%xmm4, %%xmm0, %%xmm4 \n\t" + "vmulps %%xmm5, %%xmm0, %%xmm5 \n\t" + "vmulps %%xmm6, %%xmm0, %%xmm6 \n\t" + "vmulps %%xmm7, %%xmm0, %%xmm7 \n\t" + + "vaddps %%xmm8 , %%xmm4, %%xmm12 \n\t" + "vaddps %%xmm9 , %%xmm5, %%xmm13 \n\t" + "vaddps %%xmm10, %%xmm6, %%xmm14 \n\t" + "vaddps %%xmm11, %%xmm7, %%xmm15 \n\t" + + "vmovups %%xmm12, -64(%3,%0,4) \n\t" + "vmovups %%xmm13, -48(%3,%0,4) \n\t" + "vmovups %%xmm14, -32(%3,%0,4) \n\t" + "vmovups %%xmm15, -16(%3,%0,4) \n\t" + + "vzeroupper \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (x), // 2 + "r" (y), // 3 + "r" (alpha) // 4 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + From 5e83d8072565975105e0546f57cb20d81db0aa5a Mon Sep 17 00:00:00 2001 From: Werner Saar Date: Tue, 28 Apr 2015 16:58:11 +0200 Subject: [PATCH 3/3] optimized dger kernel for sandybridge --- kernel/x86_64/KERNEL.SANDYBRIDGE | 1 + kernel/x86_64/dger.c | 84 +++++++++++++++++++ kernel/x86_64/dger_microk_sandy-2.c | 124 ++++++++++++++++++++++++++++ 3 files changed, 209 insertions(+) create mode 100644 kernel/x86_64/dger.c create mode 100644 kernel/x86_64/dger_microk_sandy-2.c diff --git a/kernel/x86_64/KERNEL.SANDYBRIDGE b/kernel/x86_64/KERNEL.SANDYBRIDGE index 06bb7f317..129d7e5c4 100644 --- a/kernel/x86_64/KERNEL.SANDYBRIDGE +++ b/kernel/x86_64/KERNEL.SANDYBRIDGE @@ -1,4 +1,5 @@ SGERKERNEL = sger.c +DGERKERNEL = dger.c SGEMVNKERNEL = sgemv_n_4.c SGEMVTKERNEL = sgemv_t_4.c diff --git a/kernel/x86_64/dger.c b/kernel/x86_64/dger.c new file mode 100644 index 000000000..157a8ea7f --- /dev/null +++ b/kernel/x86_64/dger.c @@ -0,0 +1,84 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +#if defined(SANDYBRIDGE) +#include "dger_microk_sandy-2.c" +#endif + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, + FLOAT *x, BLASLONG incx, + FLOAT *y, BLASLONG incy, + FLOAT *a, BLASLONG lda, FLOAT *buffer){ + + FLOAT *X = x; + + if (incx != 1) { + X = buffer; + COPY_K(m, x, incx, X, 1); + } + + BLASLONG m1 = m & -16; + + while (n > 0) + { + FLOAT y0 = alpha * *y; + if ( m1 > 0 ) + { + #ifdef HAVE_KERNEL_16 + dger_kernel_16(m1, X, a, &y0); + #else + AXPYU_K(m1, 0, 0, y0, X, 1, a, 1, NULL, 0); + #endif + } + + if ( m > m1 ) + { + AXPYU_K(m-m1, 0, 0, y0, X+m1 , 1, a+m1, 1, NULL, 0); + } + + a += lda; + y += incy; + n --; + } + + return 0; +} + diff --git a/kernel/x86_64/dger_microk_sandy-2.c b/kernel/x86_64/dger_microk_sandy-2.c new file mode 100644 index 000000000..564f1356d --- /dev/null +++ b/kernel/x86_64/dger_microk_sandy-2.c @@ -0,0 +1,124 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_16 1 +static void dger_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *alpha) __attribute__ ((noinline)); + +static void dger_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) +{ + + + BLASLONG register i = 0; + + __asm__ __volatile__ + ( + "vmovddup (%4), %%xmm0 \n\t" // alpha + "prefetcht0 256(%3,%0,8) \n\t" + "vmovups (%3,%0,8), %%xmm8 \n\t" + "vmovups 16(%3,%0,8), %%xmm9 \n\t" + "vmovups 32(%3,%0,8), %%xmm10 \n\t" + "vmovups 48(%3,%0,8), %%xmm11 \n\t" + + "prefetcht0 256(%2,%0,8) \n\t" + "vmovups (%2,%0,8), %%xmm4 \n\t" + "vmovups 16(%2,%0,8), %%xmm5 \n\t" + "vmovups 32(%2,%0,8), %%xmm6 \n\t" + "vmovups 48(%2,%0,8), %%xmm7 \n\t" + + "addq $8, %0 \n\t" + "subq $8, %1 \n\t" + "jz 2f \n\t" + + ".align 8 \n\t" + "1: \n\t" + + "vmulpd %%xmm4, %%xmm0, %%xmm4 \n\t" + "vaddpd %%xmm8 , %%xmm4, %%xmm12 \n\t" + "vmulpd %%xmm5, %%xmm0, %%xmm5 \n\t" + "vaddpd %%xmm9 , %%xmm5, %%xmm13 \n\t" + "vmulpd %%xmm6, %%xmm0, %%xmm6 \n\t" + "vaddpd %%xmm10, %%xmm6, %%xmm14 \n\t" + "vmulpd %%xmm7, %%xmm0, %%xmm7 \n\t" + "vaddpd %%xmm11, %%xmm7, %%xmm15 \n\t" + + "prefetcht0 256(%3,%0,8) \n\t" + "vmovups (%3,%0,8), %%xmm8 \n\t" + "vmovups 16(%3,%0,8), %%xmm9 \n\t" + "vmovups 32(%3,%0,8), %%xmm10 \n\t" + "vmovups 48(%3,%0,8), %%xmm11 \n\t" + + "prefetcht0 256(%2,%0,8) \n\t" + "vmovups (%2,%0,8), %%xmm4 \n\t" + "vmovups 16(%2,%0,8), %%xmm5 \n\t" + "vmovups 32(%2,%0,8), %%xmm6 \n\t" + "vmovups 48(%2,%0,8), %%xmm7 \n\t" + + "vmovups %%xmm12, -64(%3,%0,8) \n\t" + "vmovups %%xmm13, -48(%3,%0,8) \n\t" + "vmovups %%xmm14, -32(%3,%0,8) \n\t" + "vmovups %%xmm15, -16(%3,%0,8) \n\t" + + "addq $8, %0 \n\t" + "subq $8, %1 \n\t" + "jnz 1b \n\t" + + "2: \n\t" + "vmulpd %%xmm4, %%xmm0, %%xmm4 \n\t" + "vmulpd %%xmm5, %%xmm0, %%xmm5 \n\t" + "vmulpd %%xmm6, %%xmm0, %%xmm6 \n\t" + "vmulpd %%xmm7, %%xmm0, %%xmm7 \n\t" + + "vaddpd %%xmm8 , %%xmm4, %%xmm12 \n\t" + "vaddpd %%xmm9 , %%xmm5, %%xmm13 \n\t" + "vaddpd %%xmm10, %%xmm6, %%xmm14 \n\t" + "vaddpd %%xmm11, %%xmm7, %%xmm15 \n\t" + + "vmovups %%xmm12, -64(%3,%0,8) \n\t" + "vmovups %%xmm13, -48(%3,%0,8) \n\t" + "vmovups %%xmm14, -32(%3,%0,8) \n\t" + "vmovups %%xmm15, -16(%3,%0,8) \n\t" + + "vzeroupper \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (x), // 2 + "r" (y), // 3 + "r" (alpha) // 4 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +