From 09fcd3a34135ad86f4b17f8e15893fe99b9f0171 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Mon, 11 Aug 2014 14:19:25 +0200 Subject: [PATCH] add optimized zgemv_t kernel for bulldozer --- kernel/x86_64/KERNEL.BULLDOZER | 2 +- kernel/x86_64/zgemv_t.c | 8 +- kernel/x86_64/zgemv_t_microk_bulldozer-2.c | 151 +++++++++++++-------- 3 files changed, 101 insertions(+), 60 deletions(-) diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index 893f13064..19bf7fd32 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -2,7 +2,7 @@ SGEMVNKERNEL = sgemv_n.c SGEMVTKERNEL = sgemv_t.c ZGEMVNKERNEL = zgemv_n_dup.S -ZGEMVTKERNEL = zgemv_t.S +ZGEMVTKERNEL = zgemv_t.c DGEMVNKERNEL = dgemv_n_bulldozer.S DGEMVTKERNEL = dgemv_t_bulldozer.S diff --git a/kernel/x86_64/zgemv_t.c b/kernel/x86_64/zgemv_t.c index a2dc45c45..b54d5f4e2 100644 --- a/kernel/x86_64/zgemv_t.c +++ b/kernel/x86_64/zgemv_t.c @@ -28,11 +28,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -/* -#if defined(HASWELL) -#include "zgemv_t_microk_haswell-2.c" + +#if defined(BULLDOZER) +#include "zgemv_t_microk_bulldozer-2.c" #endif -*/ + #define NBMAX 1028 diff --git a/kernel/x86_64/zgemv_t_microk_bulldozer-2.c b/kernel/x86_64/zgemv_t_microk_bulldozer-2.c index efb6d784e..65d5a10a2 100644 --- a/kernel/x86_64/zgemv_t_microk_bulldozer-2.c +++ b/kernel/x86_64/zgemv_t_microk_bulldozer-2.c @@ -37,77 +37,118 @@ static void zgemv_kernel_16x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) ( "vzeroupper \n\t" - "vxorpd %%ymm8 , %%ymm8 , %%ymm8 \n\t" // temp - "vxorpd %%ymm9 , %%ymm9 , %%ymm9 \n\t" // temp - "vxorpd %%ymm10, %%ymm10, %%ymm10 \n\t" // temp - "vxorpd %%ymm11, %%ymm11, %%ymm11 \n\t" // temp - "vxorpd %%ymm12, %%ymm12, %%ymm12 \n\t" // temp - "vxorpd %%ymm13, %%ymm13, %%ymm13 \n\t" - "vxorpd %%ymm14, %%ymm14, %%ymm14 \n\t" - "vxorpd %%ymm15, %%ymm15, %%ymm15 \n\t" + "vxorpd %%xmm8 , %%xmm8 , %%xmm8 \n\t" // temp + "vxorpd %%xmm9 , %%xmm9 , %%xmm9 \n\t" // temp + "vxorpd %%xmm10, %%xmm10, %%xmm10 \n\t" // temp + "vxorpd %%xmm11, %%xmm11, %%xmm11 \n\t" // temp + "vxorpd %%xmm12, %%xmm12, %%xmm12 \n\t" // temp + "vxorpd %%xmm13, %%xmm13, %%xmm13 \n\t" + "vxorpd %%xmm14, %%xmm14, %%xmm14 \n\t" + "vxorpd %%xmm15, %%xmm15, %%xmm15 \n\t" ".align 16 \n\t" ".L01LOOP%=: \n\t" "vmovddup (%2,%0,8), %%xmm0 \n\t" // real value from x0 "vmovddup 8(%2,%0,8), %%xmm1 \n\t" // imag value from x0 - "vmovddup 16(%2,%0,8), %%xmm2 \n\t" // real value from x1 - "vmovddup 24(%2,%0,8), %%xmm3 \n\t" // imag value from x1 - "vinsertf128 $1, %%xmm2, %%ymm0 , %%ymm0 \n\t" // real values from x0 and x1 - "vinsertf128 $1, %%xmm3, %%ymm1 , %%ymm1 \n\t" // imag values from x0 and x1 - "vmovups (%4,%0,8), %%ymm4 \n\t" // 2 complex values from a0 - "vmovups (%5,%0,8), %%ymm5 \n\t" // 2 complex values from a1 - "vmovups (%6,%0,8), %%ymm6 \n\t" // 2 complex values from a2 - "vmovups (%7,%0,8), %%ymm7 \n\t" // 2 complex values from a3 + "prefetcht0 192(%4,%0,8) \n\t" + "vmovups (%4,%0,8), %%xmm4 \n\t" // 1 complex values from a0 + "prefetcht0 192(%5,%0,8) \n\t" + "vmovups (%5,%0,8), %%xmm5 \n\t" // 1 complex values from a1 + "prefetcht0 192(%6,%0,8) \n\t" + "vmovups (%6,%0,8), %%xmm6 \n\t" // 1 complex values from a2 + "prefetcht0 192(%7,%0,8) \n\t" + "vmovups (%7,%0,8), %%xmm7 \n\t" // 1 complex values from a3 - "vfmaddpd %%ymm8 , %%ymm4 , %%ymm0, %%ymm8 \n\t" // ar0*xr0,al0*xr0,ar1*xr1,al1*xr1 - "vfmaddpd %%ymm9 , %%ymm4 , %%ymm1, %%ymm9 \n\t" // ar0*xl0,al0*xl0,ar1*xl1,al1*xl1 - "vfmaddpd %%ymm10, %%ymm5 , %%ymm0, %%ymm10 \n\t" // ar0*xr0,al0*xr0,ar1*xr1,al1*xr1 - "vfmaddpd %%ymm11, %%ymm5 , %%ymm1, %%ymm11 \n\t" // ar0*xl0,al0*xl0,ar1*xl1,al1*xl1 - "vfmaddpd %%ymm12, %%ymm6 , %%ymm0, %%ymm12 \n\t" // ar0*xr0,al0*xr0,ar1*xr1,al1*xr1 - "vfmaddpd %%ymm13, %%ymm6 , %%ymm1, %%ymm13 \n\t" // ar0*xl0,al0*xl0,ar1*xl1,al1*xl1 - "vfmaddpd %%ymm14, %%ymm7 , %%ymm0, %%ymm14 \n\t" // ar0*xr0,al0*xr0,ar1*xr1,al1*xr1 - "vfmaddpd %%ymm15, %%ymm7 , %%ymm1, %%ymm15 \n\t" // ar0*xl0,al0*xl0,ar1*xl1,al1*xl1 + "vfmaddpd %%xmm8 , %%xmm4 , %%xmm0, %%xmm8 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm9 , %%xmm4 , %%xmm1, %%xmm9 \n\t" // ar0*xl0,al0*xl0 + "vfmaddpd %%xmm10, %%xmm5 , %%xmm0, %%xmm10 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm11, %%xmm5 , %%xmm1, %%xmm11 \n\t" // ar0*xl0,al0*xl0 + "vfmaddpd %%xmm12, %%xmm6 , %%xmm0, %%xmm12 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm13, %%xmm6 , %%xmm1, %%xmm13 \n\t" // ar0*xl0,al0*xl0 + "vfmaddpd %%xmm14, %%xmm7 , %%xmm0, %%xmm14 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm15, %%xmm7 , %%xmm1, %%xmm15 \n\t" // ar0*xl0,al0*xl0 + "vmovddup 16(%2,%0,8), %%xmm0 \n\t" // real value from x0 + "vmovddup 24(%2,%0,8), %%xmm1 \n\t" // imag value from x0 - "addq $4 , %0 \n\t" - "subq $2 , %1 \n\t" + "vmovups 16(%4,%0,8), %%xmm4 \n\t" // 1 complex values from a0 + "vmovups 16(%5,%0,8), %%xmm5 \n\t" // 1 complex values from a1 + "vmovups 16(%6,%0,8), %%xmm6 \n\t" // 1 complex values from a2 + "vmovups 16(%7,%0,8), %%xmm7 \n\t" // 1 complex values from a3 + + "vfmaddpd %%xmm8 , %%xmm4 , %%xmm0, %%xmm8 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm9 , %%xmm4 , %%xmm1, %%xmm9 \n\t" // ar0*xl0,al0*xl0 + "vfmaddpd %%xmm10, %%xmm5 , %%xmm0, %%xmm10 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm11, %%xmm5 , %%xmm1, %%xmm11 \n\t" // ar0*xl0,al0*xl0 + "vfmaddpd %%xmm12, %%xmm6 , %%xmm0, %%xmm12 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm13, %%xmm6 , %%xmm1, %%xmm13 \n\t" // ar0*xl0,al0*xl0 + "vfmaddpd %%xmm14, %%xmm7 , %%xmm0, %%xmm14 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm15, %%xmm7 , %%xmm1, %%xmm15 \n\t" // ar0*xl0,al0*xl0 + + "vmovddup 32(%2,%0,8), %%xmm0 \n\t" // real value from x0 + "vmovddup 40(%2,%0,8), %%xmm1 \n\t" // imag value from x0 + + "vmovups 32(%4,%0,8), %%xmm4 \n\t" // 1 complex values from a0 + "vmovups 32(%5,%0,8), %%xmm5 \n\t" // 1 complex values from a1 + "vmovups 32(%6,%0,8), %%xmm6 \n\t" // 1 complex values from a2 + "vmovups 32(%7,%0,8), %%xmm7 \n\t" // 1 complex values from a3 + + "vfmaddpd %%xmm8 , %%xmm4 , %%xmm0, %%xmm8 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm9 , %%xmm4 , %%xmm1, %%xmm9 \n\t" // ar0*xl0,al0*xl0 + "vfmaddpd %%xmm10, %%xmm5 , %%xmm0, %%xmm10 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm11, %%xmm5 , %%xmm1, %%xmm11 \n\t" // ar0*xl0,al0*xl0 + "vfmaddpd %%xmm12, %%xmm6 , %%xmm0, %%xmm12 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm13, %%xmm6 , %%xmm1, %%xmm13 \n\t" // ar0*xl0,al0*xl0 + "vfmaddpd %%xmm14, %%xmm7 , %%xmm0, %%xmm14 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm15, %%xmm7 , %%xmm1, %%xmm15 \n\t" // ar0*xl0,al0*xl0 + + "vmovddup 48(%2,%0,8), %%xmm0 \n\t" // real value from x0 + "vmovddup 56(%2,%0,8), %%xmm1 \n\t" // imag value from x0 + + "vmovups 48(%4,%0,8), %%xmm4 \n\t" // 1 complex values from a0 + "vmovups 48(%5,%0,8), %%xmm5 \n\t" // 1 complex values from a1 + "vmovups 48(%6,%0,8), %%xmm6 \n\t" // 1 complex values from a2 + "vmovups 48(%7,%0,8), %%xmm7 \n\t" // 1 complex values from a3 + + "vfmaddpd %%xmm8 , %%xmm4 , %%xmm0, %%xmm8 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm9 , %%xmm4 , %%xmm1, %%xmm9 \n\t" // ar0*xl0,al0*xl0 + "vfmaddpd %%xmm10, %%xmm5 , %%xmm0, %%xmm10 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm11, %%xmm5 , %%xmm1, %%xmm11 \n\t" // ar0*xl0,al0*xl0 + "vfmaddpd %%xmm12, %%xmm6 , %%xmm0, %%xmm12 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm13, %%xmm6 , %%xmm1, %%xmm13 \n\t" // ar0*xl0,al0*xl0 + "vfmaddpd %%xmm14, %%xmm7 , %%xmm0, %%xmm14 \n\t" // ar0*xr0,al0*xr0 + "vfmaddpd %%xmm15, %%xmm7 , %%xmm1, %%xmm15 \n\t" // ar0*xl0,al0*xl0 + + "addq $8 , %0 \n\t" + "subq $4 , %1 \n\t" "jnz .L01LOOP%= \n\t" #if ( !defined(CONJ) && !defined(XCONJ) ) || ( defined(CONJ) && defined(XCONJ) ) - "vpermilpd $0x5 , %%ymm9 , %%ymm9 \n\t" - "vpermilpd $0x5 , %%ymm11, %%ymm11 \n\t" - "vpermilpd $0x5 , %%ymm13, %%ymm13 \n\t" - "vpermilpd $0x5 , %%ymm15, %%ymm15 \n\t" - "vaddsubpd %%ymm9 , %%ymm8, %%ymm8 \n\t" - "vaddsubpd %%ymm11, %%ymm10, %%ymm10 \n\t" - "vaddsubpd %%ymm13, %%ymm12, %%ymm12 \n\t" - "vaddsubpd %%ymm15, %%ymm14, %%ymm14 \n\t" + "vpermilpd $0x1 , %%xmm9 , %%xmm9 \n\t" + "vpermilpd $0x1 , %%xmm11, %%xmm11 \n\t" + "vpermilpd $0x1 , %%xmm13, %%xmm13 \n\t" + "vpermilpd $0x1 , %%xmm15, %%xmm15 \n\t" + "vaddsubpd %%xmm9 , %%xmm8, %%xmm8 \n\t" + "vaddsubpd %%xmm11, %%xmm10, %%xmm10 \n\t" + "vaddsubpd %%xmm13, %%xmm12, %%xmm12 \n\t" + "vaddsubpd %%xmm15, %%xmm14, %%xmm14 \n\t" #else - "vpermilpd $0x5 , %%ymm8 , %%ymm8 \n\t" - "vpermilpd $0x5 , %%ymm10, %%ymm10 \n\t" - "vpermilpd $0x5 , %%ymm12, %%ymm12 \n\t" - "vpermilpd $0x5 , %%ymm14, %%ymm14 \n\t" - "vaddsubpd %%ymm8 , %%ymm9 , %%ymm8 \n\t" - "vaddsubpd %%ymm10, %%ymm11, %%ymm10 \n\t" - "vaddsubpd %%ymm12, %%ymm13, %%ymm12 \n\t" - "vaddsubpd %%ymm14, %%ymm15, %%ymm14 \n\t" - "vpermilpd $0x5 , %%ymm8 , %%ymm8 \n\t" - "vpermilpd $0x5 , %%ymm10, %%ymm10 \n\t" - "vpermilpd $0x5 , %%ymm12, %%ymm12 \n\t" - "vpermilpd $0x5 , %%ymm14, %%ymm14 \n\t" + "vpermilpd $0x1 , %%xmm8 , %%xmm8 \n\t" + "vpermilpd $0x1 , %%xmm10, %%xmm10 \n\t" + "vpermilpd $0x1 , %%xmm12, %%xmm12 \n\t" + "vpermilpd $0x1 , %%xmm14, %%xmm14 \n\t" + "vaddsubpd %%xmm8 , %%xmm9 , %%xmm8 \n\t" + "vaddsubpd %%xmm10, %%xmm11, %%xmm10 \n\t" + "vaddsubpd %%xmm12, %%xmm13, %%xmm12 \n\t" + "vaddsubpd %%xmm14, %%xmm15, %%xmm14 \n\t" + "vpermilpd $0x1 , %%xmm8 , %%xmm8 \n\t" + "vpermilpd $0x1 , %%xmm10, %%xmm10 \n\t" + "vpermilpd $0x1 , %%xmm12, %%xmm12 \n\t" + "vpermilpd $0x1 , %%xmm14, %%xmm14 \n\t" #endif - "vextractf128 $1, %%ymm8 , %%xmm9 \n\t" - "vextractf128 $1, %%ymm10, %%xmm11 \n\t" - "vextractf128 $1, %%ymm12, %%xmm13 \n\t" - "vextractf128 $1, %%ymm14, %%xmm15 \n\t" - - "vaddpd %%xmm8 , %%xmm9 , %%xmm8 \n\t" - "vaddpd %%xmm10, %%xmm11, %%xmm10 \n\t" - "vaddpd %%xmm12, %%xmm13, %%xmm12 \n\t" - "vaddpd %%xmm14, %%xmm15, %%xmm14 \n\t" "vmovups %%xmm8 , (%3) \n\t" "vmovups %%xmm10, 16(%3) \n\t"