Merge pull request #540 from wernsaar/develop

Optimized dot- and axpy-kernels
This commit is contained in:
wernsaar 2015-04-14 15:53:09 +02:00
commit fb02cb0a41
12 changed files with 922 additions and 4 deletions

View File

@ -1,3 +1,4 @@
SAXPYKERNEL = saxpy.c
DAXPYKERNEL = daxpy.c
CAXPYKERNEL = caxpy.c
ZAXPYKERNEL = zaxpy.c
@ -12,10 +13,10 @@ DGEMVNKERNEL = dgemv_n_bulldozer.S
DGEMVTKERNEL = dgemv_t_bulldozer.S
SDOTKERNEL = sdot.c
DDOTKERNEL = ddot.c
CDOTKERNEL = cdot.c
ZDOTKERNEL = zdot.c
DDOTKERNEL = ddot_bulldozer.S
DCOPYKERNEL = dcopy_bulldozer.S
SGEMMKERNEL = sgemm_kernel_16x2_piledriver.S

View File

@ -1,3 +1,4 @@
SAXPYKERNEL = saxpy.c
DAXPYKERNEL = daxpy.c
CAXPYKERNEL = caxpy.c
ZAXPYKERNEL = zaxpy.c

View File

@ -31,8 +31,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if defined(NEHALEM)
#include "daxpy_microk_nehalem-2.c"
#elif defined(BULLDOZER) || defined(PILEDRIVER) || defined(STEAMROLLER)
#elif defined(BULLDOZER)
#include "daxpy_microk_bulldozer-2.c"
#elif defined(STEAMROLLER)
#include "daxpy_microk_steamroller-2.c"
#elif defined(PILEDRIVER)
#include "daxpy_microk_piledriver-2.c"
#elif defined(HASWELL)
#include "daxpy_microk_haswell-2.c"
#elif defined(SANDYBRIDGE)

View File

@ -0,0 +1,160 @@
/***************************************************************************
Copyright (c) 2014, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define HAVE_KERNEL_8 1
static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *alpha) __attribute__ ((noinline));
static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
{
BLASLONG register i = 0;
if ( n < 640 )
{
__asm__ __volatile__
(
"vmovddup (%4), %%xmm0 \n\t" // alpha
".align 16 \n\t"
"1: \n\t"
"vmovups (%3,%0,8), %%xmm8 \n\t" // 2 y
"vmovups 16(%3,%0,8), %%xmm9 \n\t" // 2 y
"vmovups 32(%3,%0,8), %%xmm10 \n\t" // 2 y
"vmovups 48(%3,%0,8), %%xmm11 \n\t" // 2 y
"vmovups 64(%3,%0,8), %%xmm12 \n\t" // 2 y
"vmovups 80(%3,%0,8), %%xmm13 \n\t" // 2 y
"vmovups 96(%3,%0,8), %%xmm14 \n\t" // 2 y
"vmovups 112(%3,%0,8), %%xmm15 \n\t" // 2 y
"vfmadd231pd (%2,%0,8), %%xmm0 , %%xmm8 \n\t" // y += alpha * x
"vfmadd231pd 16(%2,%0,8), %%xmm0 , %%xmm9 \n\t" // y += alpha * x
"vfmadd231pd 32(%2,%0,8), %%xmm0 , %%xmm10 \n\t" // y += alpha * x
"vfmadd231pd 48(%2,%0,8), %%xmm0 , %%xmm11 \n\t" // y += alpha * x
"vfmadd231pd 64(%2,%0,8), %%xmm0 , %%xmm12 \n\t" // y += alpha * x
"vfmadd231pd 80(%2,%0,8), %%xmm0 , %%xmm13 \n\t" // y += alpha * x
"vfmadd231pd 96(%2,%0,8), %%xmm0 , %%xmm14 \n\t" // y += alpha * x
"vfmadd231pd 112(%2,%0,8), %%xmm0 , %%xmm15 \n\t" // y += alpha * x
"vmovups %%xmm8 , (%3,%0,8) \n\t"
"vmovups %%xmm9 , 16(%3,%0,8) \n\t"
"vmovups %%xmm10, 32(%3,%0,8) \n\t"
"vmovups %%xmm11, 48(%3,%0,8) \n\t"
"vmovups %%xmm12, 64(%3,%0,8) \n\t"
"vmovups %%xmm13, 80(%3,%0,8) \n\t"
"vmovups %%xmm14, 96(%3,%0,8) \n\t"
"vmovups %%xmm15,112(%3,%0,8) \n\t"
"addq $16, %0 \n\t"
"subq $16, %1 \n\t"
"jnz 1b \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (alpha) // 4
: "cc",
"%xmm0",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
return;
}
__asm__ __volatile__
(
"vmovddup (%4), %%xmm0 \n\t" // alpha
".align 16 \n\t"
"1: \n\t"
"prefetcht0 512(%3,%0,8) \n\t"
"vmovups (%3,%0,8), %%xmm8 \n\t" // 2 y
"vmovups 16(%3,%0,8), %%xmm9 \n\t" // 2 y
"vmovups 32(%3,%0,8), %%xmm10 \n\t" // 2 y
"vmovups 48(%3,%0,8), %%xmm11 \n\t" // 2 y
"prefetcht0 576(%3,%0,8) \n\t"
"vmovups 64(%3,%0,8), %%xmm12 \n\t" // 2 y
"vmovups 80(%3,%0,8), %%xmm13 \n\t" // 2 y
"vmovups 96(%3,%0,8), %%xmm14 \n\t" // 2 y
"vmovups 112(%3,%0,8), %%xmm15 \n\t" // 2 y
"prefetcht0 512(%2,%0,8) \n\t"
"vfmadd231pd (%2,%0,8), %%xmm0 , %%xmm8 \n\t" // y += alpha * x
"vfmadd231pd 16(%2,%0,8), %%xmm0 , %%xmm9 \n\t" // y += alpha * x
"vfmadd231pd 32(%2,%0,8), %%xmm0 , %%xmm10 \n\t" // y += alpha * x
"vfmadd231pd 48(%2,%0,8), %%xmm0 , %%xmm11 \n\t" // y += alpha * x
"prefetcht0 576(%2,%0,8) \n\t"
"vfmadd231pd 64(%2,%0,8), %%xmm0 , %%xmm12 \n\t" // y += alpha * x
"vfmadd231pd 80(%2,%0,8), %%xmm0 , %%xmm13 \n\t" // y += alpha * x
"vfmadd231pd 96(%2,%0,8), %%xmm0 , %%xmm14 \n\t" // y += alpha * x
"vfmadd231pd 112(%2,%0,8), %%xmm0 , %%xmm15 \n\t" // y += alpha * x
"vmovups %%xmm8 , (%3,%0,8) \n\t"
"vmovups %%xmm9 , 16(%3,%0,8) \n\t"
"vmovups %%xmm10, 32(%3,%0,8) \n\t"
"vmovups %%xmm11, 48(%3,%0,8) \n\t"
"vmovups %%xmm12, 64(%3,%0,8) \n\t"
"vmovups %%xmm13, 80(%3,%0,8) \n\t"
"vmovups %%xmm14, 96(%3,%0,8) \n\t"
"vmovups %%xmm15,112(%3,%0,8) \n\t"
"addq $16, %0 \n\t"
"subq $16, %1 \n\t"
"jnz 1b \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (alpha) // 4
: "cc",
"%xmm0",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
}

View File

@ -0,0 +1,160 @@
/***************************************************************************
Copyright (c) 2014, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define HAVE_KERNEL_8 1
static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *alpha) __attribute__ ((noinline));
static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
{
BLASLONG register i = 0;
if ( n < 2048 )
{
__asm__ __volatile__
(
"vmovddup (%4), %%xmm0 \n\t" // alpha
".align 16 \n\t"
"1: \n\t"
"vmovups (%3,%0,8), %%xmm8 \n\t" // 2 y
"vmovups 16(%3,%0,8), %%xmm9 \n\t" // 2 y
"vmovups 32(%3,%0,8), %%xmm10 \n\t" // 2 y
"vmovups 48(%3,%0,8), %%xmm11 \n\t" // 2 y
"vmovups 64(%3,%0,8), %%xmm12 \n\t" // 2 y
"vmovups 80(%3,%0,8), %%xmm13 \n\t" // 2 y
"vmovups 96(%3,%0,8), %%xmm14 \n\t" // 2 y
"vmovups 112(%3,%0,8), %%xmm15 \n\t" // 2 y
"vfmadd231pd (%2,%0,8), %%xmm0 , %%xmm8 \n\t" // y += alpha * x
"vfmadd231pd 16(%2,%0,8), %%xmm0 , %%xmm9 \n\t" // y += alpha * x
"vfmadd231pd 32(%2,%0,8), %%xmm0 , %%xmm10 \n\t" // y += alpha * x
"vfmadd231pd 48(%2,%0,8), %%xmm0 , %%xmm11 \n\t" // y += alpha * x
"vfmadd231pd 64(%2,%0,8), %%xmm0 , %%xmm12 \n\t" // y += alpha * x
"vfmadd231pd 80(%2,%0,8), %%xmm0 , %%xmm13 \n\t" // y += alpha * x
"vfmadd231pd 96(%2,%0,8), %%xmm0 , %%xmm14 \n\t" // y += alpha * x
"vfmadd231pd 112(%2,%0,8), %%xmm0 , %%xmm15 \n\t" // y += alpha * x
"vmovups %%xmm8 , (%3,%0,8) \n\t"
"vmovups %%xmm9 , 16(%3,%0,8) \n\t"
"vmovups %%xmm10, 32(%3,%0,8) \n\t"
"vmovups %%xmm11, 48(%3,%0,8) \n\t"
"vmovups %%xmm12, 64(%3,%0,8) \n\t"
"vmovups %%xmm13, 80(%3,%0,8) \n\t"
"vmovups %%xmm14, 96(%3,%0,8) \n\t"
"vmovups %%xmm15,112(%3,%0,8) \n\t"
"addq $16, %0 \n\t"
"subq $16, %1 \n\t"
"jnz 1b \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (alpha) // 4
: "cc",
"%xmm0",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
return;
}
__asm__ __volatile__
(
"vmovddup (%4), %%xmm0 \n\t" // alpha
".align 16 \n\t"
"1: \n\t"
"prefetcht0 512(%3,%0,8) \n\t"
"vmovups (%3,%0,8), %%xmm8 \n\t" // 2 y
"vmovups 16(%3,%0,8), %%xmm9 \n\t" // 2 y
"vmovups 32(%3,%0,8), %%xmm10 \n\t" // 2 y
"vmovups 48(%3,%0,8), %%xmm11 \n\t" // 2 y
"prefetcht0 576(%3,%0,8) \n\t"
"vmovups 64(%3,%0,8), %%xmm12 \n\t" // 2 y
"vmovups 80(%3,%0,8), %%xmm13 \n\t" // 2 y
"vmovups 96(%3,%0,8), %%xmm14 \n\t" // 2 y
"vmovups 112(%3,%0,8), %%xmm15 \n\t" // 2 y
"prefetcht0 512(%2,%0,8) \n\t"
"vfmadd231pd (%2,%0,8), %%xmm0 , %%xmm8 \n\t" // y += alpha * x
"vfmadd231pd 16(%2,%0,8), %%xmm0 , %%xmm9 \n\t" // y += alpha * x
"vfmadd231pd 32(%2,%0,8), %%xmm0 , %%xmm10 \n\t" // y += alpha * x
"vfmadd231pd 48(%2,%0,8), %%xmm0 , %%xmm11 \n\t" // y += alpha * x
"prefetcht0 576(%2,%0,8) \n\t"
"vfmadd231pd 64(%2,%0,8), %%xmm0 , %%xmm12 \n\t" // y += alpha * x
"vfmadd231pd 80(%2,%0,8), %%xmm0 , %%xmm13 \n\t" // y += alpha * x
"vfmadd231pd 96(%2,%0,8), %%xmm0 , %%xmm14 \n\t" // y += alpha * x
"vfmadd231pd 112(%2,%0,8), %%xmm0 , %%xmm15 \n\t" // y += alpha * x
"vmovups %%xmm8 , (%3,%0,8) \n\t"
"vmovups %%xmm9 , 16(%3,%0,8) \n\t"
"vmovups %%xmm10, 32(%3,%0,8) \n\t"
"vmovups %%xmm11, 48(%3,%0,8) \n\t"
"vmovups %%xmm12, 64(%3,%0,8) \n\t"
"vmovups %%xmm13, 80(%3,%0,8) \n\t"
"vmovups %%xmm14, 96(%3,%0,8) \n\t"
"vmovups %%xmm15,112(%3,%0,8) \n\t"
"addq $16, %0 \n\t"
"subq $16, %1 \n\t"
"jnz 1b \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (alpha) // 4
: "cc",
"%xmm0",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
}

View File

@ -29,8 +29,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"
#if defined(BULLDOZER) || defined(PILEDRIVER) || defined(STEAMROLLER)
#if defined(BULLDOZER)
#include "ddot_microk_bulldozer-2.c"
#elif defined(STEAMROLLER)
#include "ddot_microk_steamroller-2.c"
#elif defined(PILEDRIVER)
#include "ddot_microk_piledriver-2.c"
#elif defined(NEHALEM)
#include "ddot_microk_nehalem-2.c"
#elif defined(HASWELL)

View File

@ -0,0 +1,165 @@
/***************************************************************************
Copyright (c) 2014, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define HAVE_KERNEL_8 1
static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *dot) __attribute__ ((noinline));
static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot)
{
BLASLONG register i = 0;
if ( n < 1408 )
{
__asm__ __volatile__
(
"vzeroupper \n\t"
"vxorpd %%xmm4, %%xmm4, %%xmm4 \n\t"
"vxorpd %%xmm5, %%xmm5, %%xmm5 \n\t"
"vxorpd %%xmm6, %%xmm6, %%xmm6 \n\t"
"vxorpd %%xmm7, %%xmm7, %%xmm7 \n\t"
".align 16 \n\t"
"1: \n\t"
"vmovups (%2,%0,8), %%xmm12 \n\t" // 2 * x
"vmovups 16(%2,%0,8), %%xmm13 \n\t" // 2 * x
"vmovups 32(%2,%0,8), %%xmm14 \n\t" // 2 * x
"vmovups 48(%2,%0,8), %%xmm15 \n\t" // 2 * x
"vfmadd231pd (%3,%0,8), %%xmm12, %%xmm4 \n\t" // 2 * y
"vmovups 64(%2,%0,8), %%xmm0 \n\t" // 2 * x
"vmovups 80(%2,%0,8), %%xmm1 \n\t" // 2 * x
"vfmadd231pd 16(%3,%0,8), %%xmm13, %%xmm5 \n\t" // 2 * y
"vmovups 96(%2,%0,8), %%xmm2 \n\t" // 2 * x
"vmovups 112(%2,%0,8), %%xmm3 \n\t" // 2 * x
"vfmadd231pd 32(%3,%0,8), %%xmm14, %%xmm6 \n\t" // 2 * y
"vfmadd231pd 48(%3,%0,8), %%xmm15, %%xmm7 \n\t" // 2 * y
"vfmadd231pd 64(%3,%0,8), %%xmm0 , %%xmm4 \n\t" // 2 * y
"vfmadd231pd 80(%3,%0,8), %%xmm1 , %%xmm5 \n\t" // 2 * y
"vfmadd231pd 96(%3,%0,8), %%xmm2 , %%xmm6 \n\t" // 2 * y
"vfmadd231pd 112(%3,%0,8), %%xmm3 , %%xmm7 \n\t" // 2 * y
"addq $16 , %0 \n\t"
"subq $16 , %1 \n\t"
"jnz 1b \n\t"
"vaddpd %%xmm4, %%xmm5, %%xmm4 \n\t"
"vaddpd %%xmm6, %%xmm7, %%xmm6 \n\t"
"vaddpd %%xmm4, %%xmm6, %%xmm4 \n\t"
"vhaddpd %%xmm4, %%xmm4, %%xmm4 \n\t"
"vmovsd %%xmm4, (%4) \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (dot) // 4
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
return;
}
__asm__ __volatile__
(
"vzeroupper \n\t"
"vxorpd %%xmm4, %%xmm4, %%xmm4 \n\t"
"vxorpd %%xmm5, %%xmm5, %%xmm5 \n\t"
"vxorpd %%xmm6, %%xmm6, %%xmm6 \n\t"
"vxorpd %%xmm7, %%xmm7, %%xmm7 \n\t"
".align 16 \n\t"
"1: \n\t"
"prefetcht0 768(%2,%0,8) \n\t"
"prefetcht0 832(%2,%0,8) \n\t"
"vmovups (%2,%0,8), %%xmm12 \n\t" // 2 * x
"vmovups 16(%2,%0,8), %%xmm13 \n\t" // 2 * x
"vmovups 32(%2,%0,8), %%xmm14 \n\t" // 2 * x
"vmovups 48(%2,%0,8), %%xmm15 \n\t" // 2 * x
"prefetcht0 768(%3,%0,8) \n\t"
"prefetcht0 832(%3,%0,8) \n\t"
"vfmadd231pd (%3,%0,8), %%xmm12, %%xmm4 \n\t" // 2 * y
"vmovups 64(%2,%0,8), %%xmm0 \n\t" // 2 * x
"vmovups 80(%2,%0,8), %%xmm1 \n\t" // 2 * x
"vfmadd231pd 16(%3,%0,8), %%xmm13, %%xmm5 \n\t" // 2 * y
"vmovups 96(%2,%0,8), %%xmm2 \n\t" // 2 * x
"vmovups 112(%2,%0,8), %%xmm3 \n\t" // 2 * x
"vfmadd231pd 32(%3,%0,8), %%xmm14, %%xmm6 \n\t" // 2 * y
"vfmadd231pd 48(%3,%0,8), %%xmm15, %%xmm7 \n\t" // 2 * y
"vfmadd231pd 64(%3,%0,8), %%xmm0 , %%xmm4 \n\t" // 2 * y
"vfmadd231pd 80(%3,%0,8), %%xmm1 , %%xmm5 \n\t" // 2 * y
"vfmadd231pd 96(%3,%0,8), %%xmm2 , %%xmm6 \n\t" // 2 * y
"vfmadd231pd 112(%3,%0,8), %%xmm3 , %%xmm7 \n\t" // 2 * y
"addq $16 , %0 \n\t"
"subq $16 , %1 \n\t"
"jnz 1b \n\t"
"vaddpd %%xmm4, %%xmm5, %%xmm4 \n\t"
"vaddpd %%xmm6, %%xmm7, %%xmm6 \n\t"
"vaddpd %%xmm4, %%xmm6, %%xmm4 \n\t"
"vhaddpd %%xmm4, %%xmm4, %%xmm4 \n\t"
"vmovsd %%xmm4, (%4) \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (dot) // 4
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
}

View File

@ -0,0 +1,97 @@
/***************************************************************************
Copyright (c) 2014, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define HAVE_KERNEL_8 1
static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *dot) __attribute__ ((noinline));
static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot)
{
BLASLONG register i = 0;
__asm__ __volatile__
(
"vzeroupper \n\t"
"vxorpd %%xmm4, %%xmm4, %%xmm4 \n\t"
"vxorpd %%xmm5, %%xmm5, %%xmm5 \n\t"
"vxorpd %%xmm6, %%xmm6, %%xmm6 \n\t"
"vxorpd %%xmm7, %%xmm7, %%xmm7 \n\t"
".align 16 \n\t"
"1: \n\t"
"vmovups (%2,%0,8), %%xmm12 \n\t" // 2 * x
"vmovups 16(%2,%0,8), %%xmm13 \n\t" // 2 * x
"vmovups 32(%2,%0,8), %%xmm14 \n\t" // 2 * x
"vmovups 48(%2,%0,8), %%xmm15 \n\t" // 2 * x
"vfmadd231pd (%3,%0,8), %%xmm12, %%xmm4 \n\t" // 2 * y
"vmovups 64(%2,%0,8), %%xmm0 \n\t" // 2 * x
"vmovups 80(%2,%0,8), %%xmm1 \n\t" // 2 * x
"vfmadd231pd 16(%3,%0,8), %%xmm13, %%xmm5 \n\t" // 2 * y
"vmovups 96(%2,%0,8), %%xmm2 \n\t" // 2 * x
"vmovups 112(%2,%0,8), %%xmm3 \n\t" // 2 * x
"vfmadd231pd 32(%3,%0,8), %%xmm14, %%xmm6 \n\t" // 2 * y
"vfmadd231pd 48(%3,%0,8), %%xmm15, %%xmm7 \n\t" // 2 * y
"vfmadd231pd 64(%3,%0,8), %%xmm0 , %%xmm4 \n\t" // 2 * y
"vfmadd231pd 80(%3,%0,8), %%xmm1 , %%xmm5 \n\t" // 2 * y
"vfmadd231pd 96(%3,%0,8), %%xmm2 , %%xmm6 \n\t" // 2 * y
"vfmadd231pd 112(%3,%0,8), %%xmm3 , %%xmm7 \n\t" // 2 * y
"addq $16 , %0 \n\t"
"subq $16 , %1 \n\t"
"jnz 1b \n\t"
"vaddpd %%xmm4, %%xmm5, %%xmm4 \n\t"
"vaddpd %%xmm6, %%xmm7, %%xmm6 \n\t"
"vaddpd %%xmm4, %%xmm6, %%xmm4 \n\t"
"vhaddpd %%xmm4, %%xmm4, %%xmm4 \n\t"
"vmovsd %%xmm4, (%4) \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (dot) // 4
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
}

View File

@ -35,6 +35,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "saxpy_microk_haswell-2.c"
#elif defined(SANDYBRIDGE)
#include "saxpy_microk_sandy-2.c"
#elif defined(PILEDRIVER) || defined(STEAMROLLER)
#include "saxpy_microk_piledriver-2.c"
#endif

View File

@ -0,0 +1,159 @@
/***************************************************************************
Copyright (c) 2014, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define HAVE_KERNEL_16 1
static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *alpha) __attribute__ ((noinline));
static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
{
BLASLONG register i = 0;
if ( n < 1024 )
{
__asm__ __volatile__
(
"vzeroupper \n\t"
"vbroadcastss (%4), %%xmm0 \n\t" // alpha
".align 16 \n\t"
"1: \n\t"
"vmovups (%3,%0,4), %%xmm8 \n\t" // 4 * y
"vmovups 16(%3,%0,4), %%xmm9 \n\t" // 4 * y
"vmovups 32(%3,%0,4), %%xmm10 \n\t" // 4 * y
"vmovups 48(%3,%0,4), %%xmm11 \n\t" // 4 * y
"vmovups 64(%3,%0,4), %%xmm12 \n\t" // 4 * y
"vmovups 80(%3,%0,4), %%xmm13 \n\t" // 4 * y
"vmovups 96(%3,%0,4), %%xmm14 \n\t" // 4 * y
"vmovups 112(%3,%0,4), %%xmm15 \n\t" // 4 * y
"vfmadd231ps (%2,%0,4), %%xmm0 , %%xmm8 \n\t" // y += alpha * x
"vfmadd231ps 16(%2,%0,4), %%xmm0 , %%xmm9 \n\t" // y += alpha * x
"vfmadd231ps 32(%2,%0,4), %%xmm0 , %%xmm10 \n\t" // y += alpha * x
"vfmadd231ps 48(%2,%0,4), %%xmm0 , %%xmm11 \n\t" // y += alpha * x
"vfmadd231ps 64(%2,%0,4), %%xmm0 , %%xmm12 \n\t" // y += alpha * x
"vfmadd231ps 80(%2,%0,4), %%xmm0 , %%xmm13 \n\t" // y += alpha * x
"vfmadd231ps 96(%2,%0,4), %%xmm0 , %%xmm14 \n\t" // y += alpha * x
"vfmadd231ps 112(%2,%0,4), %%xmm0 , %%xmm15 \n\t" // y += alpha * x
"vmovups %%xmm8 , (%3,%0,4) \n\t"
"vmovups %%xmm9 , 16(%3,%0,4) \n\t"
"vmovups %%xmm10, 32(%3,%0,4) \n\t"
"vmovups %%xmm11, 48(%3,%0,4) \n\t"
"vmovups %%xmm12, 64(%3,%0,4) \n\t"
"vmovups %%xmm13, 80(%3,%0,4) \n\t"
"vmovups %%xmm14, 96(%3,%0,4) \n\t"
"vmovups %%xmm15,112(%3,%0,4) \n\t"
"addq $32, %0 \n\t"
"subq $32, %1 \n\t"
"jnz 1b \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (alpha) // 4
: "cc",
"%xmm0",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
return;
}
__asm__ __volatile__
(
"vzeroupper \n\t"
"vbroadcastss (%4), %%xmm0 \n\t" // alpha
".align 16 \n\t"
"1: \n\t"
"prefetcht0 512(%3,%0,4) \n\t"
"prefetcht0 576(%3,%0,4) \n\t"
"vmovups (%3,%0,4), %%xmm8 \n\t" // 4 * y
"vmovups 16(%3,%0,4), %%xmm9 \n\t" // 4 * y
"vmovups 32(%3,%0,4), %%xmm10 \n\t" // 4 * y
"vmovups 48(%3,%0,4), %%xmm11 \n\t" // 4 * y
"vmovups 64(%3,%0,4), %%xmm12 \n\t" // 4 * y
"vmovups 80(%3,%0,4), %%xmm13 \n\t" // 4 * y
"vmovups 96(%3,%0,4), %%xmm14 \n\t" // 4 * y
"vmovups 112(%3,%0,4), %%xmm15 \n\t" // 4 * y
"prefetcht0 512(%2,%0,4) \n\t"
"prefetcht0 576(%2,%0,4) \n\t"
"vfmadd231ps (%2,%0,4), %%xmm0 , %%xmm8 \n\t" // y += alpha * x
"vfmadd231ps 16(%2,%0,4), %%xmm0 , %%xmm9 \n\t" // y += alpha * x
"vfmadd231ps 32(%2,%0,4), %%xmm0 , %%xmm10 \n\t" // y += alpha * x
"vfmadd231ps 48(%2,%0,4), %%xmm0 , %%xmm11 \n\t" // y += alpha * x
"vfmadd231ps 64(%2,%0,4), %%xmm0 , %%xmm12 \n\t" // y += alpha * x
"vfmadd231ps 80(%2,%0,4), %%xmm0 , %%xmm13 \n\t" // y += alpha * x
"vfmadd231ps 96(%2,%0,4), %%xmm0 , %%xmm14 \n\t" // y += alpha * x
"vfmadd231ps 112(%2,%0,4), %%xmm0 , %%xmm15 \n\t" // y += alpha * x
"vmovups %%xmm8 , (%3,%0,4) \n\t"
"vmovups %%xmm9 , 16(%3,%0,4) \n\t"
"vmovups %%xmm10, 32(%3,%0,4) \n\t"
"vmovups %%xmm11, 48(%3,%0,4) \n\t"
"vmovups %%xmm12, 64(%3,%0,4) \n\t"
"vmovups %%xmm13, 80(%3,%0,4) \n\t"
"vmovups %%xmm14, 96(%3,%0,4) \n\t"
"vmovups %%xmm15,112(%3,%0,4) \n\t"
"addq $32, %0 \n\t"
"subq $32, %1 \n\t"
"jnz 1b \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (alpha) // 4
: "cc",
"%xmm0",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
}

View File

@ -28,8 +28,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"
#if defined(BULLDOZER) || defined(PILEDRIVER) || defined(STEAMROLLER)
#if defined(BULLDOZER)
#include "sdot_microk_bulldozer-2.c"
#elif defined(STEAMROLLER) || defined(PILEDRIVER)
#include "sdot_microk_steamroller-2.c"
#elif defined(NEHALEM)
#include "sdot_microk_nehalem-2.c"
#elif defined(HASWELL)

View File

@ -0,0 +1,163 @@
/***************************************************************************
Copyright (c) 2014, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define HAVE_KERNEL_16 1
static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *dot) __attribute__ ((noinline));
static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot)
{
BLASLONG register i = 0;
if ( n < 4096 )
{
__asm__ __volatile__
(
"vxorps %%xmm4, %%xmm4, %%xmm4 \n\t"
"vxorps %%xmm5, %%xmm5, %%xmm5 \n\t"
"vxorps %%xmm6, %%xmm6, %%xmm6 \n\t"
"vxorps %%xmm7, %%xmm7, %%xmm7 \n\t"
".align 16 \n\t"
"1: \n\t"
"vmovups (%2,%0,4), %%xmm12 \n\t" // 4 * x
"vmovups 16(%2,%0,4), %%xmm13 \n\t" // 4 * x
"vmovups 32(%2,%0,4), %%xmm14 \n\t" // 4 * x
"vmovups 48(%2,%0,4), %%xmm15 \n\t" // 4 * x
"vfmadd231ps (%3,%0,4), %%xmm12, %%xmm4 \n\t" // 4 * y
"vfmadd231ps 16(%3,%0,4), %%xmm13, %%xmm5 \n\t" // 4 * y
"vmovups 64(%2,%0,4), %%xmm0 \n\t" // 4 * x
"vmovups 80(%2,%0,4), %%xmm1 \n\t" // 4 * x
"vfmadd231ps 32(%3,%0,4), %%xmm14, %%xmm6 \n\t" // 4 * y
"vfmadd231ps 48(%3,%0,4), %%xmm15, %%xmm7 \n\t" // 4 * y
"vmovups 96(%2,%0,4), %%xmm2 \n\t" // 4 * x
"vmovups 112(%2,%0,4), %%xmm3 \n\t" // 4 * x
"vfmadd231ps 64(%3,%0,4), %%xmm0 , %%xmm4 \n\t" // 4 * y
"vfmadd231ps 80(%3,%0,4), %%xmm1 , %%xmm5 \n\t" // 4 * y
"vfmadd231ps 96(%3,%0,4), %%xmm2 , %%xmm6 \n\t" // 4 * y
"vfmadd231ps 112(%3,%0,4), %%xmm3 , %%xmm7 \n\t" // 4 * y
"addq $32, %0 \n\t"
"subq $32, %1 \n\t"
"jnz 1b \n\t"
"vaddps %%xmm4, %%xmm5, %%xmm4 \n\t"
"vaddps %%xmm6, %%xmm7, %%xmm6 \n\t"
"vaddps %%xmm4, %%xmm6, %%xmm4 \n\t"
"vhaddps %%xmm4, %%xmm4, %%xmm4 \n\t"
"vhaddps %%xmm4, %%xmm4, %%xmm4 \n\t"
"vmovss %%xmm4, (%4) \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (dot) // 4
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
return;
}
__asm__ __volatile__
(
"vxorps %%xmm4, %%xmm4, %%xmm4 \n\t"
"vxorps %%xmm5, %%xmm5, %%xmm5 \n\t"
"vxorps %%xmm6, %%xmm6, %%xmm6 \n\t"
"vxorps %%xmm7, %%xmm7, %%xmm7 \n\t"
".align 16 \n\t"
"1: \n\t"
"prefetcht0 512(%2,%0,4) \n\t"
"vmovups (%2,%0,4), %%xmm12 \n\t" // 4 * x
"vmovups 16(%2,%0,4), %%xmm13 \n\t" // 4 * x
"vmovups 32(%2,%0,4), %%xmm14 \n\t" // 4 * x
"vmovups 48(%2,%0,4), %%xmm15 \n\t" // 4 * x
"prefetcht0 512(%3,%0,4) \n\t"
"vfmadd231ps (%3,%0,4), %%xmm12, %%xmm4 \n\t" // 4 * y
"vfmadd231ps 16(%3,%0,4), %%xmm13, %%xmm5 \n\t" // 4 * y
"prefetcht0 576(%2,%0,4) \n\t"
"vmovups 64(%2,%0,4), %%xmm0 \n\t" // 4 * x
"vmovups 80(%2,%0,4), %%xmm1 \n\t" // 4 * x
"prefetcht0 576(%3,%0,4) \n\t"
"vfmadd231ps 32(%3,%0,4), %%xmm14, %%xmm6 \n\t" // 4 * y
"vfmadd231ps 48(%3,%0,4), %%xmm15, %%xmm7 \n\t" // 4 * y
"vmovups 96(%2,%0,4), %%xmm2 \n\t" // 4 * x
"vmovups 112(%2,%0,4), %%xmm3 \n\t" // 4 * x
"vfmadd231ps 64(%3,%0,4), %%xmm0 , %%xmm4 \n\t" // 4 * y
"vfmadd231ps 80(%3,%0,4), %%xmm1 , %%xmm5 \n\t" // 4 * y
"vfmadd231ps 96(%3,%0,4), %%xmm2 , %%xmm6 \n\t" // 4 * y
"vfmadd231ps 112(%3,%0,4), %%xmm3 , %%xmm7 \n\t" // 4 * y
"addq $32, %0 \n\t"
"subq $32, %1 \n\t"
"jnz 1b \n\t"
"vaddps %%xmm4, %%xmm5, %%xmm4 \n\t"
"vaddps %%xmm6, %%xmm7, %%xmm6 \n\t"
"vaddps %%xmm4, %%xmm6, %%xmm4 \n\t"
"vhaddps %%xmm4, %%xmm4, %%xmm4 \n\t"
"vhaddps %%xmm4, %%xmm4, %%xmm4 \n\t"
"vmovss %%xmm4, (%4) \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (dot) // 4
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
}