From 726ad085cb00af5c13a7742a4b73df598953bf80 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Mon, 11 Aug 2014 13:10:12 +0200 Subject: [PATCH] added optimized zgemv_t for haswell --- kernel/x86_64/dgemv_n_microk_haswell-2.c | 4 +- kernel/x86_64/dgemv_t_microk_haswell-2.c | 4 +- kernel/x86_64/sgemv_n_microk_bulldozer-2.c | 4 +- kernel/x86_64/sgemv_n_microk_haswell-2.c | 4 +- kernel/x86_64/sgemv_n_microk_nehalem-2.c | 4 +- kernel/x86_64/sgemv_n_microk_sandy-2.c | 4 +- kernel/x86_64/sgemv_t_microk_bulldozer-2.c | 4 +- kernel/x86_64/sgemv_t_microk_haswell-2.c | 4 +- kernel/x86_64/sgemv_t_microk_nehalem-2.c | 4 +- kernel/x86_64/sgemv_t_microk_sandy-2.c | 4 +- kernel/x86_64/zgemv_n_microk_haswell-2.c | 4 +- kernel/x86_64/zgemv_t_microk_bulldozer-2.c | 139 +++++++++++++++++++++ kernel/x86_64/zgemv_t_microk_haswell-2.c | 139 +++++++++++++++++++++ 13 files changed, 300 insertions(+), 22 deletions(-) create mode 100644 kernel/x86_64/zgemv_t_microk_bulldozer-2.c create mode 100644 kernel/x86_64/zgemv_t_microk_haswell-2.c diff --git a/kernel/x86_64/dgemv_n_microk_haswell-2.c b/kernel/x86_64/dgemv_n_microk_haswell-2.c index b9f462cb2..28e2fe4f6 100644 --- a/kernel/x86_64/dgemv_n_microk_haswell-2.c +++ b/kernel/x86_64/dgemv_n_microk_haswell-2.c @@ -26,9 +26,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #define HAVE_KERNEL_16x4 1 -static void dgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) __attribute__ ((noinline)); +static void dgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -static void dgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) +static void dgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { BLASLONG register i = 0; diff --git a/kernel/x86_64/dgemv_t_microk_haswell-2.c b/kernel/x86_64/dgemv_t_microk_haswell-2.c index 94d4c319e..1a4ba37d7 100644 --- a/kernel/x86_64/dgemv_t_microk_haswell-2.c +++ b/kernel/x86_64/dgemv_t_microk_haswell-2.c @@ -26,9 +26,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #define HAVE_KERNEL_16x4 1 -static void dgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) __attribute__ ((noinline)); +static void dgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -static void dgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) +static void dgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { BLASLONG register i = 0; diff --git a/kernel/x86_64/sgemv_n_microk_bulldozer-2.c b/kernel/x86_64/sgemv_n_microk_bulldozer-2.c index d50fa4268..c4a490587 100644 --- a/kernel/x86_64/sgemv_n_microk_bulldozer-2.c +++ b/kernel/x86_64/sgemv_n_microk_bulldozer-2.c @@ -26,9 +26,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #define HAVE_KERNEL_16x4 1 -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) __attribute__ ((noinline)); +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { BLASLONG register i = 0; diff --git a/kernel/x86_64/sgemv_n_microk_haswell-2.c b/kernel/x86_64/sgemv_n_microk_haswell-2.c index d3fee67c3..19888d150 100644 --- a/kernel/x86_64/sgemv_n_microk_haswell-2.c +++ b/kernel/x86_64/sgemv_n_microk_haswell-2.c @@ -26,9 +26,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #define HAVE_KERNEL_16x4 1 -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) __attribute__ ((noinline)); +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { BLASLONG register i = 0; diff --git a/kernel/x86_64/sgemv_n_microk_nehalem-2.c b/kernel/x86_64/sgemv_n_microk_nehalem-2.c index 3cfb82a45..40ccbb78f 100644 --- a/kernel/x86_64/sgemv_n_microk_nehalem-2.c +++ b/kernel/x86_64/sgemv_n_microk_nehalem-2.c @@ -26,9 +26,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #define HAVE_KERNEL_16x4 1 -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) __attribute__ ((noinline)); +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { BLASLONG register i = 0; diff --git a/kernel/x86_64/sgemv_n_microk_sandy-2.c b/kernel/x86_64/sgemv_n_microk_sandy-2.c index 21eff1c5e..b255ddbcb 100644 --- a/kernel/x86_64/sgemv_n_microk_sandy-2.c +++ b/kernel/x86_64/sgemv_n_microk_sandy-2.c @@ -26,9 +26,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #define HAVE_KERNEL_16x4 1 -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) __attribute__ ((noinline)); +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { BLASLONG register i = 0; diff --git a/kernel/x86_64/sgemv_t_microk_bulldozer-2.c b/kernel/x86_64/sgemv_t_microk_bulldozer-2.c index 54bdca63a..e4498afa3 100644 --- a/kernel/x86_64/sgemv_t_microk_bulldozer-2.c +++ b/kernel/x86_64/sgemv_t_microk_bulldozer-2.c @@ -26,9 +26,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #define HAVE_KERNEL_16x4 1 -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) __attribute__ ((noinline)); +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { BLASLONG register i = 0; diff --git a/kernel/x86_64/sgemv_t_microk_haswell-2.c b/kernel/x86_64/sgemv_t_microk_haswell-2.c index cef703483..e6d47270d 100644 --- a/kernel/x86_64/sgemv_t_microk_haswell-2.c +++ b/kernel/x86_64/sgemv_t_microk_haswell-2.c @@ -26,9 +26,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #define HAVE_KERNEL_16x4 1 -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) __attribute__ ((noinline)); +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { BLASLONG register i = 0; diff --git a/kernel/x86_64/sgemv_t_microk_nehalem-2.c b/kernel/x86_64/sgemv_t_microk_nehalem-2.c index e1f2b81bd..db5a1448b 100644 --- a/kernel/x86_64/sgemv_t_microk_nehalem-2.c +++ b/kernel/x86_64/sgemv_t_microk_nehalem-2.c @@ -26,9 +26,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #define HAVE_KERNEL_16x4 1 -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) __attribute__ ((noinline)); +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { BLASLONG register i = 0; diff --git a/kernel/x86_64/sgemv_t_microk_sandy-2.c b/kernel/x86_64/sgemv_t_microk_sandy-2.c index 6a3748238..841522302 100644 --- a/kernel/x86_64/sgemv_t_microk_sandy-2.c +++ b/kernel/x86_64/sgemv_t_microk_sandy-2.c @@ -26,9 +26,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #define HAVE_KERNEL_16x4 1 -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) __attribute__ ((noinline)); +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -static void sgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) +static void sgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { BLASLONG register i = 0; diff --git a/kernel/x86_64/zgemv_n_microk_haswell-2.c b/kernel/x86_64/zgemv_n_microk_haswell-2.c index 8583f96b3..833983fe0 100644 --- a/kernel/x86_64/zgemv_n_microk_haswell-2.c +++ b/kernel/x86_64/zgemv_n_microk_haswell-2.c @@ -26,9 +26,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #define HAVE_KERNEL_16x4 1 -static void zgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) __attribute__ ((noinline)); +static void zgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -static void zgemv_kernel_16x4( BLASLONG n, float **ap, float *x, float *y) +static void zgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) { BLASLONG register i = 0; diff --git a/kernel/x86_64/zgemv_t_microk_bulldozer-2.c b/kernel/x86_64/zgemv_t_microk_bulldozer-2.c new file mode 100644 index 000000000..efb6d784e --- /dev/null +++ b/kernel/x86_64/zgemv_t_microk_bulldozer-2.c @@ -0,0 +1,139 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary froms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary from must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_16x4 1 +static void zgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); + +static void zgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) +{ + + BLASLONG register i = 0; + + __asm__ __volatile__ + ( + "vzeroupper \n\t" + + "vxorpd %%ymm8 , %%ymm8 , %%ymm8 \n\t" // temp + "vxorpd %%ymm9 , %%ymm9 , %%ymm9 \n\t" // temp + "vxorpd %%ymm10, %%ymm10, %%ymm10 \n\t" // temp + "vxorpd %%ymm11, %%ymm11, %%ymm11 \n\t" // temp + "vxorpd %%ymm12, %%ymm12, %%ymm12 \n\t" // temp + "vxorpd %%ymm13, %%ymm13, %%ymm13 \n\t" + "vxorpd %%ymm14, %%ymm14, %%ymm14 \n\t" + "vxorpd %%ymm15, %%ymm15, %%ymm15 \n\t" + + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + + "vmovddup (%2,%0,8), %%xmm0 \n\t" // real value from x0 + "vmovddup 8(%2,%0,8), %%xmm1 \n\t" // imag value from x0 + "vmovddup 16(%2,%0,8), %%xmm2 \n\t" // real value from x1 + "vmovddup 24(%2,%0,8), %%xmm3 \n\t" // imag value from x1 + "vinsertf128 $1, %%xmm2, %%ymm0 , %%ymm0 \n\t" // real values from x0 and x1 + "vinsertf128 $1, %%xmm3, %%ymm1 , %%ymm1 \n\t" // imag values from x0 and x1 + + "vmovups (%4,%0,8), %%ymm4 \n\t" // 2 complex values from a0 + "vmovups (%5,%0,8), %%ymm5 \n\t" // 2 complex values from a1 + "vmovups (%6,%0,8), %%ymm6 \n\t" // 2 complex values from a2 + "vmovups (%7,%0,8), %%ymm7 \n\t" // 2 complex values from a3 + + "vfmaddpd %%ymm8 , %%ymm4 , %%ymm0, %%ymm8 \n\t" // ar0*xr0,al0*xr0,ar1*xr1,al1*xr1 + "vfmaddpd %%ymm9 , %%ymm4 , %%ymm1, %%ymm9 \n\t" // ar0*xl0,al0*xl0,ar1*xl1,al1*xl1 + "vfmaddpd %%ymm10, %%ymm5 , %%ymm0, %%ymm10 \n\t" // ar0*xr0,al0*xr0,ar1*xr1,al1*xr1 + "vfmaddpd %%ymm11, %%ymm5 , %%ymm1, %%ymm11 \n\t" // ar0*xl0,al0*xl0,ar1*xl1,al1*xl1 + "vfmaddpd %%ymm12, %%ymm6 , %%ymm0, %%ymm12 \n\t" // ar0*xr0,al0*xr0,ar1*xr1,al1*xr1 + "vfmaddpd %%ymm13, %%ymm6 , %%ymm1, %%ymm13 \n\t" // ar0*xl0,al0*xl0,ar1*xl1,al1*xl1 + "vfmaddpd %%ymm14, %%ymm7 , %%ymm0, %%ymm14 \n\t" // ar0*xr0,al0*xr0,ar1*xr1,al1*xr1 + "vfmaddpd %%ymm15, %%ymm7 , %%ymm1, %%ymm15 \n\t" // ar0*xl0,al0*xl0,ar1*xl1,al1*xl1 + + + "addq $4 , %0 \n\t" + "subq $2 , %1 \n\t" + "jnz .L01LOOP%= \n\t" + +#if ( !defined(CONJ) && !defined(XCONJ) ) || ( defined(CONJ) && defined(XCONJ) ) + "vpermilpd $0x5 , %%ymm9 , %%ymm9 \n\t" + "vpermilpd $0x5 , %%ymm11, %%ymm11 \n\t" + "vpermilpd $0x5 , %%ymm13, %%ymm13 \n\t" + "vpermilpd $0x5 , %%ymm15, %%ymm15 \n\t" + "vaddsubpd %%ymm9 , %%ymm8, %%ymm8 \n\t" + "vaddsubpd %%ymm11, %%ymm10, %%ymm10 \n\t" + "vaddsubpd %%ymm13, %%ymm12, %%ymm12 \n\t" + "vaddsubpd %%ymm15, %%ymm14, %%ymm14 \n\t" +#else + "vpermilpd $0x5 , %%ymm8 , %%ymm8 \n\t" + "vpermilpd $0x5 , %%ymm10, %%ymm10 \n\t" + "vpermilpd $0x5 , %%ymm12, %%ymm12 \n\t" + "vpermilpd $0x5 , %%ymm14, %%ymm14 \n\t" + "vaddsubpd %%ymm8 , %%ymm9 , %%ymm8 \n\t" + "vaddsubpd %%ymm10, %%ymm11, %%ymm10 \n\t" + "vaddsubpd %%ymm12, %%ymm13, %%ymm12 \n\t" + "vaddsubpd %%ymm14, %%ymm15, %%ymm14 \n\t" + "vpermilpd $0x5 , %%ymm8 , %%ymm8 \n\t" + "vpermilpd $0x5 , %%ymm10, %%ymm10 \n\t" + "vpermilpd $0x5 , %%ymm12, %%ymm12 \n\t" + "vpermilpd $0x5 , %%ymm14, %%ymm14 \n\t" +#endif + + "vextractf128 $1, %%ymm8 , %%xmm9 \n\t" + "vextractf128 $1, %%ymm10, %%xmm11 \n\t" + "vextractf128 $1, %%ymm12, %%xmm13 \n\t" + "vextractf128 $1, %%ymm14, %%xmm15 \n\t" + + "vaddpd %%xmm8 , %%xmm9 , %%xmm8 \n\t" + "vaddpd %%xmm10, %%xmm11, %%xmm10 \n\t" + "vaddpd %%xmm12, %%xmm13, %%xmm12 \n\t" + "vaddpd %%xmm14, %%xmm15, %%xmm14 \n\t" + + "vmovups %%xmm8 , (%3) \n\t" + "vmovups %%xmm10, 16(%3) \n\t" + "vmovups %%xmm12, 32(%3) \n\t" + "vmovups %%xmm14, 48(%3) \n\t" + + "vzeroupper \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (x), // 2 + "r" (y), // 3 + "r" (ap[0]), // 4 + "r" (ap[1]), // 5 + "r" (ap[2]), // 6 + "r" (ap[3]) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + diff --git a/kernel/x86_64/zgemv_t_microk_haswell-2.c b/kernel/x86_64/zgemv_t_microk_haswell-2.c new file mode 100644 index 000000000..2dddef27d --- /dev/null +++ b/kernel/x86_64/zgemv_t_microk_haswell-2.c @@ -0,0 +1,139 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary froms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary from must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#define HAVE_KERNEL_16x4 1 +static void zgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); + +static void zgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) +{ + + BLASLONG register i = 0; + + __asm__ __volatile__ + ( + "vzeroupper \n\t" + + "vxorpd %%ymm8 , %%ymm8 , %%ymm8 \n\t" // temp + "vxorpd %%ymm9 , %%ymm9 , %%ymm9 \n\t" // temp + "vxorpd %%ymm10, %%ymm10, %%ymm10 \n\t" // temp + "vxorpd %%ymm11, %%ymm11, %%ymm11 \n\t" // temp + "vxorpd %%ymm12, %%ymm12, %%ymm12 \n\t" // temp + "vxorpd %%ymm13, %%ymm13, %%ymm13 \n\t" + "vxorpd %%ymm14, %%ymm14, %%ymm14 \n\t" + "vxorpd %%ymm15, %%ymm15, %%ymm15 \n\t" + + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + + "vmovddup (%2,%0,8), %%xmm0 \n\t" // real value from x0 + "vmovddup 8(%2,%0,8), %%xmm1 \n\t" // imag value from x0 + "vmovddup 16(%2,%0,8), %%xmm2 \n\t" // real value from x1 + "vmovddup 24(%2,%0,8), %%xmm3 \n\t" // imag value from x1 + "vinsertf128 $1, %%xmm2, %%ymm0 , %%ymm0 \n\t" // real values from x0 and x1 + "vinsertf128 $1, %%xmm3, %%ymm1 , %%ymm1 \n\t" // imag values from x0 and x1 + + "vmovups (%4,%0,8), %%ymm4 \n\t" // 2 complex values from a0 + "vmovups (%5,%0,8), %%ymm5 \n\t" // 2 complex values from a1 + "vmovups (%6,%0,8), %%ymm6 \n\t" // 2 complex values from a2 + "vmovups (%7,%0,8), %%ymm7 \n\t" // 2 complex values from a3 + + "vfmadd231pd %%ymm4 , %%ymm0, %%ymm8 \n\t" // ar0*xr0,al0*xr0,ar1*xr1,al1*xr1 + "vfmadd231pd %%ymm4 , %%ymm1, %%ymm9 \n\t" // ar0*xl0,al0*xl0,ar1*xl1,al1*xl1 + "vfmadd231pd %%ymm5 , %%ymm0, %%ymm10 \n\t" // ar0*xr0,al0*xr0,ar1*xr1,al1*xr1 + "vfmadd231pd %%ymm5 , %%ymm1, %%ymm11 \n\t" // ar0*xl0,al0*xl0,ar1*xl1,al1*xl1 + "vfmadd231pd %%ymm6 , %%ymm0, %%ymm12 \n\t" // ar0*xr0,al0*xr0,ar1*xr1,al1*xr1 + "vfmadd231pd %%ymm6 , %%ymm1, %%ymm13 \n\t" // ar0*xl0,al0*xl0,ar1*xl1,al1*xl1 + "vfmadd231pd %%ymm7 , %%ymm0, %%ymm14 \n\t" // ar0*xr0,al0*xr0,ar1*xr1,al1*xr1 + "vfmadd231pd %%ymm7 , %%ymm1, %%ymm15 \n\t" // ar0*xl0,al0*xl0,ar1*xl1,al1*xl1 + + + "addq $4 , %0 \n\t" + "subq $2 , %1 \n\t" + "jnz .L01LOOP%= \n\t" + +#if ( !defined(CONJ) && !defined(XCONJ) ) || ( defined(CONJ) && defined(XCONJ) ) + "vpermilpd $0x5 , %%ymm9 , %%ymm9 \n\t" + "vpermilpd $0x5 , %%ymm11, %%ymm11 \n\t" + "vpermilpd $0x5 , %%ymm13, %%ymm13 \n\t" + "vpermilpd $0x5 , %%ymm15, %%ymm15 \n\t" + "vaddsubpd %%ymm9 , %%ymm8, %%ymm8 \n\t" + "vaddsubpd %%ymm11, %%ymm10, %%ymm10 \n\t" + "vaddsubpd %%ymm13, %%ymm12, %%ymm12 \n\t" + "vaddsubpd %%ymm15, %%ymm14, %%ymm14 \n\t" +#else + "vpermilpd $0x5 , %%ymm8 , %%ymm8 \n\t" + "vpermilpd $0x5 , %%ymm10, %%ymm10 \n\t" + "vpermilpd $0x5 , %%ymm12, %%ymm12 \n\t" + "vpermilpd $0x5 , %%ymm14, %%ymm14 \n\t" + "vaddsubpd %%ymm8 , %%ymm9 , %%ymm8 \n\t" + "vaddsubpd %%ymm10, %%ymm11, %%ymm10 \n\t" + "vaddsubpd %%ymm12, %%ymm13, %%ymm12 \n\t" + "vaddsubpd %%ymm14, %%ymm15, %%ymm14 \n\t" + "vpermilpd $0x5 , %%ymm8 , %%ymm8 \n\t" + "vpermilpd $0x5 , %%ymm10, %%ymm10 \n\t" + "vpermilpd $0x5 , %%ymm12, %%ymm12 \n\t" + "vpermilpd $0x5 , %%ymm14, %%ymm14 \n\t" +#endif + + "vextractf128 $1, %%ymm8 , %%xmm9 \n\t" + "vextractf128 $1, %%ymm10, %%xmm11 \n\t" + "vextractf128 $1, %%ymm12, %%xmm13 \n\t" + "vextractf128 $1, %%ymm14, %%xmm15 \n\t" + + "vaddpd %%xmm8 , %%xmm9 , %%xmm8 \n\t" + "vaddpd %%xmm10, %%xmm11, %%xmm10 \n\t" + "vaddpd %%xmm12, %%xmm13, %%xmm12 \n\t" + "vaddpd %%xmm14, %%xmm15, %%xmm14 \n\t" + + "vmovups %%xmm8 , (%3) \n\t" + "vmovups %%xmm10, 16(%3) \n\t" + "vmovups %%xmm12, 32(%3) \n\t" + "vmovups %%xmm14, 48(%3) \n\t" + + "vzeroupper \n\t" + + : + : + "r" (i), // 0 + "r" (n), // 1 + "r" (x), // 2 + "r" (y), // 3 + "r" (ap[0]), // 4 + "r" (ap[1]), // 5 + "r" (ap[2]), // 6 + "r" (ap[3]) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +