From 6821677489deadeed9d12691f3ef674a6dbf93c9 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Fri, 26 Apr 2013 20:05:42 +0200 Subject: [PATCH 01/15] minor improvements and code cleanup --- kernel/x86_64/dgemm_kernel_4x4_bulldozer.S | 259 +++++++-------------- 1 file changed, 87 insertions(+), 172 deletions(-) diff --git a/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S b/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S index 05bad596e..0b32e275d 100644 --- a/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S +++ b/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S @@ -56,7 +56,7 @@ * 53 GFLOPS with 4 threads on 2 modules * 46 GFLOPS with 2 threads on 2 modules * 28 GFLOPS with 2 threads on 1 module -* 23,6 GFLOPS with 1 thread on 1 module +* 23,1 GFLOPS with 1 thread on 1 module *********************************************************************/ #define ASSEMBLER @@ -114,132 +114,132 @@ #define B_PR1 512 #define KERNEL1(xx) \ - vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6,%xmm7,%xmm8 ;\ - vfmaddpd %xmm12,%xmm2,%xmm7,%xmm12 ;\ + vfmaddpd %xmm8,%xmm6,%xmm7,%xmm8 ;\ vmovddup -14 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2,%xmm7,%xmm12 ;\ vmovddup -13 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\ - vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\ + vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\ vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\ + vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\ + vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\ + vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\ + vmovups -12 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ #define KERNEL2(xx) \ - vmovups -12 * SIZE(AO, %rax, 4), %xmm2 ;\ - vmovups -10 * SIZE(AO, %rax, 4),%xmm0 ;\ - vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm8, %xmm2, %xmm1,%xmm8 ;\ - vfmaddpd %xmm12,%xmm0, %xmm1,%xmm12 ;\ + vmovups -10 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ vmovddup -10 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9, %xmm2, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm0, %xmm3,%xmm13 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup -9 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm2, %xmm5,%xmm10 ;\ - vfmaddpd %xmm14,%xmm0, %xmm5,%xmm14 ;\ - vfmaddpd %xmm11,%xmm2, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm0, %xmm4,%xmm15 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ #define KERNEL3(xx) \ - vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\ - vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm8, %xmm6, %xmm7, %xmm8 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ + vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8, %xmm6, %xmm7, %xmm8 ;\ vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ vmovddup -5 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\ vmovddup (BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup -4 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\ + vmovups -4 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vmovups (AO, %rax, 4), %xmm6 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ #define KERNEL4(xx) \ - vmovups -4 * SIZE(AO, %rax, 4), %xmm2 ;\ - vmovups -2 * SIZE(AO, %rax, 4),%xmm0 ;\ - vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm8,%xmm2, %xmm1,%xmm8 ;\ - vfmaddpd %xmm12,%xmm0, %xmm1 ,%xmm12;\ + vmovups -2 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1 ,%xmm12;\ vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm2, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm0, %xmm3,%xmm13 ;\ + vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup -1 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm2, %xmm5,%xmm10 ;\ - vfmaddpd %xmm14,%xmm0, %xmm5,%xmm14 ;\ - vfmaddpd %xmm11,%xmm2, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm0, %xmm4,%xmm15 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ #define KERNEL5(xx) \ vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\ - vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ + vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ vmovddup 2 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ vmovddup 3 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ vmovddup 8 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ + vmovups 4 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vmovups 8 * SIZE(AO, %rax, 4), %xmm6 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ #define KERNEL6(xx) \ - vmovups 4 * SIZE(AO, %rax, 4), %xmm2 ;\ - vmovups 6 * SIZE(AO, %rax, 4),%xmm0 ;\ - vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm8,%xmm2, %xmm1,%xmm8 ;\ - vfmaddpd %xmm12,%xmm0, %xmm1,%xmm12 ;\ + vmovups 6 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ vmovddup 6 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm2, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm0, %xmm3,%xmm13 ;\ + vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup 7 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm2, %xmm5,%xmm10 ;\ - vfmaddpd %xmm14,%xmm0, %xmm5,%xmm14 ;\ - vfmaddpd %xmm11,%xmm2, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm0, %xmm4,%xmm15 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ #define KERNEL7(xx) \ vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\ - vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ + vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ vmovddup 11 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ vmovddup 16 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup 12 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ + vmovups 12 * SIZE(AO, %rax, 4), %xmm0 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vmovups 16 * SIZE(AO, %rax, 4),%xmm6 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ #define KERNEL8(xx) \ - vmovups 12 * SIZE(AO, %rax, 4), %xmm2 ;\ - vmovups 14 * SIZE(AO, %rax, 4), %xmm0 ;\ - vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm8,%xmm2, %xmm1,%xmm8 ;\ - vfmaddpd %xmm12,%xmm0, %xmm1,%xmm12 ;\ + vmovups 14 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm2, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13, %xmm0, %xmm3,%xmm13 ;\ + vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ vmovddup 15 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm2, %xmm5,%xmm10 ;\ - vfmaddpd %xmm14,%xmm0, %xmm5,%xmm14 ;\ - vfmaddpd %xmm11,%xmm2, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm0, %xmm4,%xmm15 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ #define KERNEL_SUB1(xx) \ vmovups -16 * SIZE(AO),%xmm0 ;\ @@ -456,97 +456,13 @@ vmovups -16 * SIZE(AO, %rax, 4),%xmm6 vmovddup -16 * SIZE(BO, %rax, 4), %xmm7 + vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 .align 16 .L12: -#ifndef SMP - - prefetcht0 A_PR1(AO,%rax,4) - prefetcht0 B_PR1(BO,%rax,4) - KERNEL1(16 * 0) - KERNEL2(16 * 0) - prefetcht0 A_PR1+64(AO,%rax,4) - prefetcht0 B_PR1+64(BO,%rax,4) - KERNEL3(16 * 0) - KERNEL4(16 * 0) - prefetcht0 A_PR1+128(AO,%rax,4) - prefetcht0 B_PR1+128(BO,%rax,4) - KERNEL5(16 * 0) - KERNEL6(16 * 0) - prefetcht0 A_PR1+192(AO,%rax,4) - prefetcht0 B_PR1+192(BO,%rax,4) - KERNEL7(16 * 0) - KERNEL8(16 * 0) - - addq $8 * SIZE, %rax - je .L15 - - prefetcht0 A_PR1(AO,%rax,4) - prefetcht0 B_PR1(BO,%rax,4) - KERNEL1(16 * 0) - KERNEL2(16 * 0) - prefetcht0 A_PR1+64(AO,%rax,4) - prefetcht0 B_PR1+64(BO,%rax,4) - KERNEL3(16 * 0) - KERNEL4(16 * 0) - prefetcht0 A_PR1+128(AO,%rax,4) - prefetcht0 B_PR1+128(BO,%rax,4) - KERNEL5(16 * 0) - KERNEL6(16 * 0) - prefetcht0 A_PR1+192(AO,%rax,4) - prefetcht0 B_PR1+192(BO,%rax,4) - KERNEL7(16 * 0) - KERNEL8(16 * 0) - - addq $8 * SIZE, %rax - je .L15 - - - prefetcht0 A_PR1(AO,%rax,4) - prefetcht0 B_PR1(BO,%rax,4) - KERNEL1(16 * 0) - KERNEL2(16 * 0) - prefetcht0 A_PR1+64(AO,%rax,4) - prefetcht0 B_PR1+64(BO,%rax,4) - KERNEL3(16 * 0) - KERNEL4(16 * 0) - prefetcht0 A_PR1+128(AO,%rax,4) - prefetcht0 B_PR1+128(BO,%rax,4) - KERNEL5(16 * 0) - KERNEL6(16 * 0) - prefetcht0 A_PR1+192(AO,%rax,4) - prefetcht0 B_PR1+192(BO,%rax,4) - KERNEL7(16 * 0) - KERNEL8(16 * 0) - - addq $8 * SIZE, %rax - je .L15 - - prefetcht0 A_PR1(AO,%rax,4) - prefetcht0 B_PR1(BO,%rax,4) - KERNEL1(16 * 0) - KERNEL2(16 * 0) - prefetcht0 A_PR1+64(AO,%rax,4) - prefetcht0 B_PR1+64(BO,%rax,4) - KERNEL3(16 * 0) - KERNEL4(16 * 0) - prefetcht0 A_PR1+128(AO,%rax,4) - prefetcht0 B_PR1+128(BO,%rax,4) - KERNEL5(16 * 0) - KERNEL6(16 * 0) - prefetcht0 A_PR1+192(AO,%rax,4) - prefetcht0 B_PR1+192(BO,%rax,4) - KERNEL7(16 * 0) - KERNEL8(16 * 0) - - addq $8 * SIZE, %rax - jnz .L12 - - .align 16 -#else -#ifdef OPTMODULE +#if defined(OPTBYMODULE) || !defined(SMP) prefetcht0 A_PR1(AO,%rax,4) prefetcht0 B_PR1(BO,%rax,4) @@ -586,7 +502,6 @@ .align 16 #endif -#endif .L15: From 19ad2fb128112a794f1d84f7c6f8070a991a61bc Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sat, 27 Apr 2013 13:40:49 +0200 Subject: [PATCH 02/15] prefetch improved. Defined 2 different kernels for inner loop --- kernel/x86_64/dgemm_kernel_4x4_bulldozer.S | 202 +++++++++++++++++---- 1 file changed, 171 insertions(+), 31 deletions(-) diff --git a/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S b/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S index 0b32e275d..91cd49291 100644 --- a/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S +++ b/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S @@ -46,6 +46,11 @@ * moved vmovddup ALPHA, %xmm7 down * define A_PR1 192 * define B_PR1 512 +* +* 2013/04/27 Saar +* define A_PR1 224 +* define B_PR1 224 +* created 2 different Kernels **********************************************************************/ /********************************************************************* @@ -110,8 +115,11 @@ #define movapd movaps #define movupd movups -#define A_PR1 192 -#define B_PR1 512 +#define A_PR1 224 +#define B_PR1 224 + + +#if defined(OPTBYMODULE) || !defined(SMP) #define KERNEL1(xx) \ vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\ @@ -122,24 +130,24 @@ vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\ vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\ - vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\ - vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\ - vmovups -12 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups -12 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\ #define KERNEL2(xx) \ vmovups -10 * SIZE(AO, %rax, 4), %xmm2 ;\ vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ vmovddup -10 * SIZE(BO, %rax, 4), %xmm5 ;\ vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup -9 * SIZE(BO, %rax, 4), %xmm4 ;\ vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ @@ -152,26 +160,26 @@ vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ vmovddup -5 * SIZE(BO, %rax, 4), %xmm4 ;\ vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\ - vmovddup (BO, %rax, 4), %xmm7 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup -4 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\ - vmovups -4 * SIZE(AO, %rax, 4),%xmm0 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovups (AO, %rax, 4), %xmm6 ;\ + vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups -4 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\ #define KERNEL4(xx) \ vmovups -2 * SIZE(AO, %rax, 4), %xmm2 ;\ vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vmovups (AO, %rax, 4), %xmm6 ;\ vfmaddpd %xmm12,%xmm2, %xmm1 ,%xmm12;\ vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup -1 * SIZE(BO, %rax, 4), %xmm4 ;\ vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vmovddup (BO, %rax, 4), %xmm7 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ @@ -184,26 +192,26 @@ vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ vmovddup 3 * SIZE(BO, %rax, 4), %xmm4 ;\ vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ - vmovddup 8 * SIZE(BO, %rax, 4), %xmm7 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ - vmovups 4 * SIZE(AO, %rax, 4),%xmm0 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovups 8 * SIZE(AO, %rax, 4), %xmm6 ;\ + vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups 4 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ #define KERNEL6(xx) \ vmovups 6 * SIZE(AO, %rax, 4), %xmm2 ;\ vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vmovups 8 * SIZE(AO, %rax, 4), %xmm6 ;\ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ vmovddup 6 * SIZE(BO, %rax, 4), %xmm5 ;\ vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup 7 * SIZE(BO, %rax, 4), %xmm4 ;\ vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vmovddup 8 * SIZE(BO, %rax, 4), %xmm7 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ @@ -216,31 +224,163 @@ vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ vmovddup 11 * SIZE(BO, %rax, 4), %xmm4 ;\ vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ - vmovddup 16 * SIZE(BO, %rax, 4), %xmm7 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vmovddup 12 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ - vmovups 12 * SIZE(AO, %rax, 4), %xmm0 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovups 16 * SIZE(AO, %rax, 4),%xmm6 ;\ + vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups 12 * SIZE(AO, %rax, 4), %xmm0 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ #define KERNEL8(xx) \ vmovups 14 * SIZE(AO, %rax, 4), %xmm2 ;\ vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vmovups 16 * SIZE(AO, %rax, 4),%xmm6 ;\ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ vmovddup 15 * SIZE(BO, %rax, 4), %xmm4 ;\ vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vmovddup 16 * SIZE(BO, %rax, 4), %xmm7 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ +#else + +#define KERNEL1(xx) \ + vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm6,%xmm7,%xmm8 ;\ + vmovddup -14 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2,%xmm7,%xmm12 ;\ + vmovddup -13 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\ + vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\ + vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\ + vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\ + vmovups -12 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + +#define KERNEL2(xx) \ + vmovups -10 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vmovddup -10 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -9 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL3(xx) \ + vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8, %xmm6, %xmm7, %xmm8 ;\ + vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ + vmovddup -5 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\ + vmovddup (BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -4 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\ + vmovups -4 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovups (AO, %rax, 4), %xmm6 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL4(xx) \ + vmovups -2 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1 ,%xmm12;\ + vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -1 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL5(xx) \ + vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ + vmovddup 2 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ + vmovddup 3 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ + vmovddup 8 * SIZE(BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ + vmovups 4 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovups 8 * SIZE(AO, %rax, 4), %xmm6 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL6(xx) \ + vmovups 6 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vmovddup 6 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 7 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL7(xx) \ + vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ + vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ + vmovddup 11 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ + vmovddup 16 * SIZE(BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 12 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ + vmovups 12 * SIZE(AO, %rax, 4), %xmm0 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovups 16 * SIZE(AO, %rax, 4),%xmm6 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL8(xx) \ + vmovups 14 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ + vmovddup 15 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#endif + #define KERNEL_SUB1(xx) \ vmovups -16 * SIZE(AO),%xmm0 ;\ vmovups -14 * SIZE(AO),%xmm2 ;\ @@ -425,7 +565,7 @@ vxorpd %xmm14, %xmm14,%xmm14 vxorpd %xmm15, %xmm15,%xmm15 - // prefetchw (CO1) + prefetchw (CO1) // prefetchw (CO1,LDC) // prefetchw (CO2) // prefetchw (CO2,LDC) @@ -458,7 +598,7 @@ vmovddup -16 * SIZE(BO, %rax, 4), %xmm7 vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 - .align 16 + .align 32 .L12: @@ -471,8 +611,8 @@ prefetcht0 A_PR1+64(AO,%rax,4) prefetcht0 B_PR1+64(BO,%rax,4) KERNEL3(16 * 0) - KERNEL4(16 * 0) - prefetcht0 A_PR1+128(AO,%rax,4) + KERNEL4(16 * 0) + prefetcht0 A_PR1+128(AO,%rax,4) prefetcht0 B_PR1+128(BO,%rax,4) KERNEL5(16 * 0) KERNEL6(16 * 0) From 4cb454cdf2b1a49644203503eb6bb872fecaf123 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sat, 27 Apr 2013 14:30:00 +0200 Subject: [PATCH 03/15] added trsm_kernel_LT_4x4_bulldozer.S --- kernel/x86_64/KERNEL.BULLDOZER | 4 +- kernel/x86_64/trsm_kernel_LT_4x4_bulldozer.S | 3263 ++++++++++++++++++ 2 files changed, 3265 insertions(+), 2 deletions(-) create mode 100644 kernel/x86_64/trsm_kernel_LT_4x4_bulldozer.S diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index 2ac035fe0..e0b8a71e4 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -44,8 +44,8 @@ STRSMKERNEL_RN = trsm_kernel_LT_8x4_sse.S STRSMKERNEL_RT = trsm_kernel_RT_8x4_sse.S DTRSMKERNEL_LN = trsm_kernel_LN_4x4_barcelona.S -DTRSMKERNEL_LT = trsm_kernel_LT_4x4_barcelona.S -DTRSMKERNEL_RN = trsm_kernel_LT_4x4_barcelona.S +DTRSMKERNEL_LT = trsm_kernel_LT_4x4_bulldozer.S +DTRSMKERNEL_RN = trsm_kernel_LT_4x4_bulldozer.S DTRSMKERNEL_RT = trsm_kernel_RT_4x4_barcelona.S CTRSMKERNEL_LN = ztrsm_kernel_LN_4x2_sse.S diff --git a/kernel/x86_64/trsm_kernel_LT_4x4_bulldozer.S b/kernel/x86_64/trsm_kernel_LT_4x4_bulldozer.S new file mode 100644 index 000000000..5f3f8f7f8 --- /dev/null +++ b/kernel/x86_64/trsm_kernel_LT_4x4_bulldozer.S @@ -0,0 +1,3263 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#define ASSEMBLER +#include "common.h" + +#define OLD_M %rdi +#define OLD_N %rsi +#define M %r13 +#define N %r14 +#define K %rdx + +#define A %rcx +#define B %r8 +#define C %r9 +#define LDC %r10 + +#define I %r11 +#define AO %rdi +#define BO %rsi +#define CO1 %r15 +#define CO2 %r12 +#define BB %rbp +#define J %rbx + +#ifndef WINDOWS_ABI + +#define STACKSIZE 96 + +#define OFFSET 48(%rsp) +#define AORIG 56(%rsp) +#define KK 64(%rsp) +#define KKK 72(%rsp) + +#else + +#define STACKSIZE 256 + +#define OLD_A 40 + STACKSIZE(%rsp) +#define OLD_B 48 + STACKSIZE(%rsp) +#define OLD_C 56 + STACKSIZE(%rsp) +#define OLD_LDC 64 + STACKSIZE(%rsp) +#define OLD_OFFSET 72 + STACKSIZE(%rsp) + +#define OFFSET 224(%rsp) +#define AORIG 232(%rsp) +#define KK 240(%rsp) +#define KKK 248(%rsp) + +#endif + + +#define movlpd movsd +#define movapd movups +#define movupd movups + +#define A_PR1 224 +#define B_PR1 224 + +#define KERNEL1(xx) \ + vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm6,%xmm7,%xmm8 ;\ + vmovddup -14 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2,%xmm7,%xmm12 ;\ + vmovddup -13 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\ + vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups -12 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\ + +#define KERNEL2(xx) \ + vmovups -10 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vmovddup -10 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -9 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL3(xx) \ + vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8, %xmm6, %xmm7, %xmm8 ;\ + vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ + vmovddup -5 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -4 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups -4 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\ + +#define KERNEL4(xx) \ + vmovups -2 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vmovups (AO, %rax, 4), %xmm6 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1 ,%xmm12;\ + vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -1 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vmovddup (BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL5(xx) \ + vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ + vmovddup 2 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ + vmovddup 3 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups 4 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ + +#define KERNEL6(xx) \ + vmovups 6 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vmovups 8 * SIZE(AO, %rax, 4), %xmm6 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vmovddup 6 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 7 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vmovddup 8 * SIZE(BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL7(xx) \ + vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ + vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ + vmovddup 11 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 12 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups 12 * SIZE(AO, %rax, 4), %xmm0 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ + +#define KERNEL8(xx) \ + vmovups 14 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vmovups 16 * SIZE(AO, %rax, 4),%xmm6 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ + vmovddup 15 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vmovddup 16 * SIZE(BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL_SUB1(xx) \ + vmovups -16 * SIZE(AO),%xmm0 ;\ + vmovups -14 * SIZE(AO),%xmm2 ;\ + vmovddup -16 * SIZE(BO), %xmm1 ;\ + vmovddup -15 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm12, %xmm2, %xmm1,%xmm12 ;\ + vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ + vmovddup -14 * SIZE(BO), %xmm1 ;\ + vmovddup -13 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm10, %xmm0, %xmm1,%xmm10 ;\ + vfmaddpd %xmm11, %xmm0, %xmm3,%xmm11 ;\ + vfmaddpd %xmm14, %xmm2, %xmm1,%xmm14 ;\ + vfmaddpd %xmm15, %xmm2, %xmm3,%xmm15 ;\ + + +#define KERNEL_SUB2(xx) \ + vmovups -12 * SIZE(AO), %xmm0 ;\ + vmovups -10 * SIZE(AO), %xmm2 ;\ + vmovddup -12 * SIZE(BO), %xmm1 ;\ + vmovddup -11 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -10 * SIZE(BO), %xmm1 ;\ + vmovddup -9 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ + vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ + vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ + vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ + +#define KERNEL_SUB3(xx) \ + vmovups -8 * SIZE(AO),%xmm0 ;\ + vmovups -6 * SIZE(AO),%xmm2 ;\ + vmovddup -8 * SIZE(BO), %xmm1 ;\ + vmovddup -7 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -6 * SIZE(BO), %xmm1 ;\ + vmovddup -5 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ + vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ + vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ + vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ + +#define KERNEL_SUB4(xx) \ + vmovups -4 * SIZE(AO), %xmm0 ;\ + vmovups -2 * SIZE(AO), %xmm2 ;\ + vmovddup -4 * SIZE(BO), %xmm1 ;\ + vmovddup -3 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -2 * SIZE(BO), %xmm1 ;\ + vmovddup -1 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ + vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ + vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ + vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ + vmovups (AO), %xmm0 ;\ + vmovddup (BO), %xmm1 ;\ + vmovddup 1 * SIZE(BO), %xmm3 ;\ + vmovaps %xmm0, %xmm2 + + + + + PROLOGUE + PROFCODE + + subq $STACKSIZE, %rsp + movq %rbx, (%rsp) + movq %rbp, 8(%rsp) + movq %r12, 16(%rsp) + movq %r13, 24(%rsp) + movq %r14, 32(%rsp) + movq %r15, 40(%rsp) + +#ifdef WINDOWS_ABI + movq %rdi, 48(%rsp) + movq %rsi, 56(%rsp) + movups %xmm6, 64(%rsp) + movups %xmm7, 80(%rsp) + movups %xmm8, 96(%rsp) + movups %xmm9, 112(%rsp) + movups %xmm10, 128(%rsp) + movups %xmm11, 144(%rsp) + movups %xmm12, 160(%rsp) + movups %xmm13, 176(%rsp) + movups %xmm14, 192(%rsp) + movups %xmm15, 208(%rsp) + + movq ARG1, OLD_M + movq ARG2, OLD_N + movq ARG3, K + movq OLD_A, A + movq OLD_B, B + movq OLD_C, C + movq OLD_LDC, LDC + movsd OLD_OFFSET, %xmm12 +#else + movq STACKSIZE + 8(%rsp), LDC + movsd STACKSIZE + 16(%rsp), %xmm12 +#endif + + movq OLD_M, M + movq OLD_N, N + + subq $-16 * SIZE, A + subq $-16 * SIZE, B + + movsd %xmm12, OFFSET + movsd %xmm12, KK + + leaq (, LDC, SIZE), LDC + +#ifdef LN + leaq (, M, SIZE), %rax + addq %rax, C + imulq K, %rax + addq %rax, A +#endif + +#ifdef RT + leaq (, N, SIZE), %rax + imulq K, %rax + addq %rax, B + movq N, %rax + imulq LDC, %rax + addq %rax, C +#endif + +#ifdef RN + negq KK +#endif + +#ifdef RT + movq N, %rax + subq OFFSET, %rax + movq %rax, KK +#endif + + movq N, J + sarq $2, J # j = (n >> 2) + jle .L40 + +.L01: +#if defined(LT) || defined(RN) + movq A, AO +#else + movq A, AORIG +#endif + +#ifdef RT + movq K, %rax + salq $2 + BASE_SHIFT, %rax + subq %rax, B + + leaq (, LDC, 4), %rax + subq %rax, C +#endif + + movq C, CO1 # coffset1 = c + leaq (C, LDC, 1), CO2 # coffset2 = c + ldc +#ifndef RT + leaq (C, LDC, 4), C +#endif + +#ifdef LN + movq OFFSET, %rax + addq M, %rax + movq %rax, KK +#endif + + movq K, %rax + salq $BASE_SHIFT + 2, %rax + leaq (B, %rax), BB + +#if defined(LT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq M, I + sarq $2, I # i = (m >> 2) + jle .L20 + ALIGN_4 + +.L11: +#ifdef LN + movq K, %rax + salq $2 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (, %rax, SIZE), %rax + leaq (BO, %rax, 4), BO +#endif + + vxorpd %xmm8, %xmm8,%xmm8 + vxorpd %xmm9, %xmm9,%xmm9 + vxorpd %xmm10, %xmm10,%xmm10 + vxorpd %xmm11, %xmm11,%xmm11 + vxorpd %xmm12, %xmm12,%xmm12 + vxorpd %xmm13, %xmm13,%xmm13 + vxorpd %xmm14, %xmm14,%xmm14 + vxorpd %xmm15, %xmm15,%xmm15 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + + andq $-8, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 4), BO + negq %rax + NOBRANCH + je .L15 + + vmovups -16 * SIZE(AO, %rax, 4),%xmm6 + vmovddup -16 * SIZE(BO, %rax, 4), %xmm7 + vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 + + + ALIGN_4 + +.L12: + prefetcht0 A_PR1(AO,%rax,4) + prefetcht0 B_PR1(BO,%rax,4) + KERNEL1(16 * 0) + KERNEL2(16 * 0) + prefetcht0 A_PR1+64(AO,%rax,4) + prefetcht0 B_PR1+64(BO,%rax,4) + KERNEL3(16 * 0) + KERNEL4(16 * 0) + prefetcht0 A_PR1+128(AO,%rax,4) + prefetcht0 B_PR1+128(BO,%rax,4) + KERNEL5(16 * 0) + KERNEL6(16 * 0) + prefetcht0 A_PR1+192(AO,%rax,4) + prefetcht0 B_PR1+192(BO,%rax,4) + KERNEL7(16 * 0) + KERNEL8(16 * 0) + + addq $8 * SIZE, %rax + je .L15 + jmp .L12 + .align 16 + +.L15: + // prefetch -8 * SIZE(BB) + subq $-16 * SIZE, BB + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + testq $4, %rax + je .L16 + xorq %rax, %rax + ALIGN_4 + + KERNEL_SUB1(16 * 0) + KERNEL_SUB2(16 * 0) + KERNEL_SUB3(16 * 0) + KERNEL_SUB4(16 * 0) + + subq $-16 * SIZE, BO + subq $-16 * SIZE, AO + ALIGN_4 + +.L16: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L19 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 4), BO + negq %rax + ALIGN_4 + +.L17: + mulpd %xmm1, %xmm0 + mulpd -14 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm8 + movapd %xmm2, %xmm0 + addpd %xmm1, %xmm12 + movddup -14 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm3, %xmm2 + mulpd -14 * SIZE(AO, %rax, 4), %xmm3 + addpd %xmm2, %xmm9 + movapd %xmm0, %xmm2 + addpd %xmm3, %xmm13 + movddup -13 * SIZE(BO, %rax, 4), %xmm3 + mulpd %xmm1, %xmm0 + mulpd -14 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm10 + movapd -12 * SIZE(AO, %rax, 4), %xmm0 + addpd %xmm1, %xmm14 + movddup -12 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm3, %xmm2 + mulpd -14 * SIZE(AO, %rax, 4), %xmm3 + addpd %xmm2, %xmm11 + addpd %xmm3, %xmm15 + movddup -11 * SIZE(BO, %rax, 4), %xmm3 + movapd %xmm0, %xmm2 + + addq $SIZE, %rax + jl .L17 + ALIGN_4 + +.L19: +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $4, %rax +#else + subq $4, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 4), AO + leaq (B, %rax, 4), BO +#endif + +#if defined(LN) || defined(LT) + movapd %xmm8, %xmm0 + unpcklpd %xmm9, %xmm8 + unpckhpd %xmm9, %xmm0 + + movapd %xmm10, %xmm2 + unpcklpd %xmm11, %xmm10 + unpckhpd %xmm11, %xmm2 + + movapd %xmm12, %xmm4 + unpcklpd %xmm13, %xmm12 + unpckhpd %xmm13, %xmm4 + + movapd %xmm14, %xmm6 + unpcklpd %xmm15, %xmm14 + unpckhpd %xmm15, %xmm6 + + movapd -16 * SIZE(BO), %xmm9 + movapd -14 * SIZE(BO), %xmm11 + movapd -12 * SIZE(BO), %xmm13 + movapd -10 * SIZE(BO), %xmm15 + movapd -8 * SIZE(BO), %xmm1 + movapd -6 * SIZE(BO), %xmm3 + movapd -4 * SIZE(BO), %xmm5 + movapd -2 * SIZE(BO), %xmm7 + + subpd %xmm8, %xmm9 + subpd %xmm10, %xmm11 + subpd %xmm0, %xmm13 + subpd %xmm2, %xmm15 + subpd %xmm12, %xmm1 + subpd %xmm14, %xmm3 + subpd %xmm4, %xmm5 + subpd %xmm6, %xmm7 +#else + movapd -16 * SIZE(AO), %xmm0 + movapd -14 * SIZE(AO), %xmm1 + movapd -12 * SIZE(AO), %xmm2 + movapd -10 * SIZE(AO), %xmm3 + + movapd -8 * SIZE(AO), %xmm4 + movapd -6 * SIZE(AO), %xmm5 + movapd -4 * SIZE(AO), %xmm6 + movapd -2 * SIZE(AO), %xmm7 + + subpd %xmm8, %xmm0 + subpd %xmm12, %xmm1 + subpd %xmm9, %xmm2 + subpd %xmm13, %xmm3 + subpd %xmm10, %xmm4 + subpd %xmm14, %xmm5 + subpd %xmm11, %xmm6 + subpd %xmm15, %xmm7 +#endif + +#ifdef LN + movddup -1 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm5 + mulpd %xmm8, %xmm7 + + movddup -2 * SIZE(AO), %xmm10 + mulpd %xmm5, %xmm10 + subpd %xmm10, %xmm1 + movddup -2 * SIZE(AO), %xmm10 + mulpd %xmm7, %xmm10 + subpd %xmm10, %xmm3 + + movddup -3 * SIZE(AO), %xmm12 + mulpd %xmm5, %xmm12 + subpd %xmm12, %xmm13 + movddup -3 * SIZE(AO), %xmm12 + mulpd %xmm7, %xmm12 + subpd %xmm12, %xmm15 + + movddup -4 * SIZE(AO), %xmm14 + mulpd %xmm5, %xmm14 + subpd %xmm14, %xmm9 + movddup -4 * SIZE(AO), %xmm14 + mulpd %xmm7, %xmm14 + subpd %xmm14, %xmm11 + + movddup -6 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm1 + mulpd %xmm8, %xmm3 + + movddup -7 * SIZE(AO), %xmm10 + mulpd %xmm1, %xmm10 + subpd %xmm10, %xmm13 + movddup -7 * SIZE(AO), %xmm10 + mulpd %xmm3, %xmm10 + subpd %xmm10, %xmm15 + + movddup -8 * SIZE(AO), %xmm12 + mulpd %xmm1, %xmm12 + subpd %xmm12, %xmm9 + movddup -8 * SIZE(AO), %xmm12 + mulpd %xmm3, %xmm12 + subpd %xmm12, %xmm11 + + movddup -11 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + mulpd %xmm8, %xmm15 + + movddup -12 * SIZE(AO), %xmm10 + mulpd %xmm13, %xmm10 + subpd %xmm10, %xmm9 + movddup -12 * SIZE(AO), %xmm10 + mulpd %xmm15, %xmm10 + subpd %xmm10, %xmm11 + + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 + mulpd %xmm8, %xmm11 +#endif + +#ifdef LT + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 + mulpd %xmm8, %xmm11 + + movddup -15 * SIZE(AO), %xmm10 + mulpd %xmm9, %xmm10 + subpd %xmm10, %xmm13 + + movddup -15 * SIZE(AO), %xmm10 + mulpd %xmm11, %xmm10 + subpd %xmm10, %xmm15 + + movddup -14 * SIZE(AO), %xmm12 + mulpd %xmm9, %xmm12 + subpd %xmm12, %xmm1 + movddup -14 * SIZE(AO), %xmm12 + mulpd %xmm11, %xmm12 + subpd %xmm12, %xmm3 + + movddup -13 * SIZE(AO), %xmm14 + mulpd %xmm9, %xmm14 + subpd %xmm14, %xmm5 + movddup -13 * SIZE(AO), %xmm14 + mulpd %xmm11, %xmm14 + subpd %xmm14, %xmm7 + + movddup -11 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + mulpd %xmm8, %xmm15 + + movddup -10 * SIZE(AO), %xmm10 + mulpd %xmm13, %xmm10 + subpd %xmm10, %xmm1 + movddup -10 * SIZE(AO), %xmm10 + mulpd %xmm15, %xmm10 + subpd %xmm10, %xmm3 + + movddup -9 * SIZE(AO), %xmm12 + mulpd %xmm13, %xmm12 + subpd %xmm12, %xmm5 + movddup -9 * SIZE(AO), %xmm12 + mulpd %xmm15, %xmm12 + subpd %xmm12, %xmm7 + + movddup -6 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm1 + mulpd %xmm8, %xmm3 + + movddup -5 * SIZE(AO), %xmm10 + mulpd %xmm1, %xmm10 + subpd %xmm10, %xmm5 + movddup -5 * SIZE(AO), %xmm10 + mulpd %xmm3, %xmm10 + subpd %xmm10, %xmm7 + + movddup -1 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm5 + mulpd %xmm8, %xmm7 +#endif + +#ifdef RN + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 + mulpd %xmm8, %xmm1 + + movddup -15 * SIZE(BO), %xmm9 + mulpd %xmm0, %xmm9 + subpd %xmm9, %xmm2 + movddup -15 * SIZE(BO), %xmm9 + mulpd %xmm1, %xmm9 + subpd %xmm9, %xmm3 + + movddup -14 * SIZE(BO), %xmm10 + mulpd %xmm0, %xmm10 + subpd %xmm10, %xmm4 + movddup -14 * SIZE(BO), %xmm10 + mulpd %xmm1, %xmm10 + subpd %xmm10, %xmm5 + + movddup -13 * SIZE(BO), %xmm11 + mulpd %xmm0, %xmm11 + subpd %xmm11, %xmm6 + movddup -13 * SIZE(BO), %xmm11 + mulpd %xmm1, %xmm11 + subpd %xmm11, %xmm7 + + movddup -11 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + mulpd %xmm8, %xmm3 + + movddup -10 * SIZE(BO), %xmm9 + mulpd %xmm2, %xmm9 + subpd %xmm9, %xmm4 + movddup -10 * SIZE(BO), %xmm9 + mulpd %xmm3, %xmm9 + subpd %xmm9, %xmm5 + + movddup -9 * SIZE(BO), %xmm10 + mulpd %xmm2, %xmm10 + subpd %xmm10, %xmm6 + movddup -9 * SIZE(BO), %xmm10 + mulpd %xmm3, %xmm10 + subpd %xmm10, %xmm7 + + movddup -6 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm4 + mulpd %xmm8, %xmm5 + + movddup -5 * SIZE(BO), %xmm9 + mulpd %xmm4, %xmm9 + subpd %xmm9, %xmm6 + movddup -5 * SIZE(BO), %xmm9 + mulpd %xmm5, %xmm9 + subpd %xmm9, %xmm7 + + movddup -1 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm6 + mulpd %xmm8, %xmm7 +#endif + +#ifdef RT + movddup -1 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm6 + mulpd %xmm8, %xmm7 + + movddup -2 * SIZE(BO), %xmm9 + mulpd %xmm6, %xmm9 + subpd %xmm9, %xmm4 + movddup -2 * SIZE(BO), %xmm9 + mulpd %xmm7, %xmm9 + subpd %xmm9, %xmm5 + + movddup -3 * SIZE(BO), %xmm10 + mulpd %xmm6, %xmm10 + subpd %xmm10, %xmm2 + movddup -3 * SIZE(BO), %xmm10 + mulpd %xmm7, %xmm10 + subpd %xmm10, %xmm3 + + movddup -4 * SIZE(BO), %xmm11 + mulpd %xmm6, %xmm11 + subpd %xmm11, %xmm0 + movddup -4 * SIZE(BO), %xmm11 + mulpd %xmm7, %xmm11 + subpd %xmm11, %xmm1 + + movddup -6 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm4 + mulpd %xmm8, %xmm5 + + movddup -7 * SIZE(BO), %xmm9 + mulpd %xmm4, %xmm9 + subpd %xmm9, %xmm2 + movddup -7 * SIZE(BO), %xmm9 + mulpd %xmm5, %xmm9 + subpd %xmm9, %xmm3 + + movddup -8 * SIZE(BO), %xmm10 + mulpd %xmm4, %xmm10 + subpd %xmm10, %xmm0 + movddup -8 * SIZE(BO), %xmm10 + mulpd %xmm5, %xmm10 + subpd %xmm10, %xmm1 + + movddup -11 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + mulpd %xmm8, %xmm3 + + movddup -12 * SIZE(BO), %xmm9 + mulpd %xmm2, %xmm9 + subpd %xmm9, %xmm0 + movddup -12 * SIZE(BO), %xmm9 + mulpd %xmm3, %xmm9 + subpd %xmm9, %xmm1 + + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 + mulpd %xmm8, %xmm1 +#endif + +#ifdef LN + subq $4 * SIZE, CO1 + subq $4 * SIZE, CO2 +#endif + +#if defined(LN) || defined(LT) + movlpd %xmm9, 0 * SIZE(CO1) + movlpd %xmm13, 1 * SIZE(CO1) + movlpd %xmm1, 2 * SIZE(CO1) + movlpd %xmm5, 3 * SIZE(CO1) + + movhpd %xmm9, 0 * SIZE(CO2) + movhpd %xmm13, 1 * SIZE(CO2) + movhpd %xmm1, 2 * SIZE(CO2) + movhpd %xmm5, 3 * SIZE(CO2) + + movlpd %xmm11, 0 * SIZE(CO1, LDC, 2) + movlpd %xmm15, 1 * SIZE(CO1, LDC, 2) + movlpd %xmm3, 2 * SIZE(CO1, LDC, 2) + movlpd %xmm7, 3 * SIZE(CO1, LDC, 2) + + movhpd %xmm11, 0 * SIZE(CO2, LDC, 2) + movhpd %xmm15, 1 * SIZE(CO2, LDC, 2) + movhpd %xmm3, 2 * SIZE(CO2, LDC, 2) + movhpd %xmm7, 3 * SIZE(CO2, LDC, 2) +#else + movlpd %xmm0, 0 * SIZE(CO1) + movhpd %xmm0, 1 * SIZE(CO1) + movlpd %xmm1, 2 * SIZE(CO1) + movhpd %xmm1, 3 * SIZE(CO1) + + movlpd %xmm2, 0 * SIZE(CO2) + movhpd %xmm2, 1 * SIZE(CO2) + movlpd %xmm3, 2 * SIZE(CO2) + movhpd %xmm3, 3 * SIZE(CO2) + + movlpd %xmm4, 0 * SIZE(CO1, LDC, 2) + movhpd %xmm4, 1 * SIZE(CO1, LDC, 2) + movlpd %xmm5, 2 * SIZE(CO1, LDC, 2) + movhpd %xmm5, 3 * SIZE(CO1, LDC, 2) + + movlpd %xmm6, 0 * SIZE(CO2, LDC, 2) + movhpd %xmm6, 1 * SIZE(CO2, LDC, 2) + movlpd %xmm7, 2 * SIZE(CO2, LDC, 2) + movhpd %xmm7, 3 * SIZE(CO2, LDC, 2) +#endif + +#if defined(LN) || defined(LT) + movaps %xmm9, -16 * SIZE(BO) + movaps %xmm11, -14 * SIZE(BO) + movaps %xmm13, -12 * SIZE(BO) + movaps %xmm15, -10 * SIZE(BO) + movaps %xmm1, -8 * SIZE(BO) + movaps %xmm3, -6 * SIZE(BO) + movaps %xmm5, -4 * SIZE(BO) + movaps %xmm7, -2 * SIZE(BO) +#else + movaps %xmm0, -16 * SIZE(AO) + movaps %xmm1, -14 * SIZE(AO) + movaps %xmm2, -12 * SIZE(AO) + movaps %xmm3, -10 * SIZE(AO) + movaps %xmm4, -8 * SIZE(AO) + movaps %xmm5, -6 * SIZE(AO) + movaps %xmm6, -4 * SIZE(AO) + movaps %xmm7, -2 * SIZE(AO) +#endif + +#ifndef LN + addq $4 * SIZE, CO1 + addq $4 * SIZE, CO2 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 4), BO +#endif + +#ifdef LN + subq $4, KK +#endif + +#ifdef LT + addq $4, KK +#endif + +#ifdef RT + movq K, %rax + salq $2 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + + decq I # i -- + jg .L11 + ALIGN_4 + +.L20: + testq $3, M + je .L39 + + testq $2, M + je .L30 + ALIGN_4 + +.L21: +#ifdef LN + movq K, %rax + salq $1 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (, %rax, SIZE), %rax + leaq (BO, %rax, 4), BO +#endif + + movapd -16 * SIZE(AO), %xmm0 + pxor %xmm8, %xmm8 + movapd -12 * SIZE(AO), %xmm2 + pxor %xmm9, %xmm9 + movddup -16 * SIZE(BO), %xmm1 + pxor %xmm10, %xmm10 + movddup -15 * SIZE(BO), %xmm5 + pxor %xmm11, %xmm11 + movddup -8 * SIZE(BO), %xmm3 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 4), BO + negq %rax + NOBRANCH + je .L26 + ALIGN_4 + +.L22: + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm8 + movddup -14 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm0, %xmm5 + addpd %xmm5, %xmm9 + movddup -13 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm10 + movddup -12 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm0, %xmm5 + movapd -14 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm5, %xmm11 + movddup -11 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm8 + movddup -10 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm0, %xmm5 + addpd %xmm5, %xmm9 + movddup -9 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm10 + movddup (BO, %rax, 4), %xmm1 + mulpd %xmm0, %xmm5 + movapd -8 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm5, %xmm11 + movddup -7 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm2, %xmm3 + addpd %xmm3, %xmm8 + movddup -6 * SIZE(BO, %rax, 4), %xmm3 + mulpd %xmm2, %xmm5 + addpd %xmm5, %xmm9 + movddup -5 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm2, %xmm3 + addpd %xmm3, %xmm10 + movddup -4 * SIZE(BO, %rax, 4), %xmm3 + mulpd %xmm2, %xmm5 + movapd -10 * SIZE(AO, %rax, 2), %xmm2 + addpd %xmm5, %xmm11 + movddup -3 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm2, %xmm3 + addpd %xmm3, %xmm8 + movddup -2 * SIZE(BO, %rax, 4), %xmm3 + mulpd %xmm2, %xmm5 + addpd %xmm5, %xmm9 + movddup -1 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm2, %xmm3 + addpd %xmm3, %xmm10 + movddup 8 * SIZE(BO, %rax, 4), %xmm3 + mulpd %xmm2, %xmm5 + movapd -4 * SIZE(AO, %rax, 2), %xmm2 + addpd %xmm5, %xmm11 + movddup 1 * SIZE(BO, %rax, 4), %xmm5 + + addq $4 * SIZE, %rax + BRANCH + jl .L22 + ALIGN_4 + +.L26: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L29 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 4), BO + negq %rax + ALIGN_4 + +.L27: + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm8 + movddup -14 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm0, %xmm5 + addpd %xmm5, %xmm9 + movddup -13 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm10 + movddup -12 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm0, %xmm5 + movapd -14 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm5, %xmm11 + movddup -11 * SIZE(BO, %rax, 4), %xmm5 + + addq $SIZE, %rax + jl .L27 + ALIGN_4 + +.L29: +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $2, %rax +#else + subq $4, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 2), AO + leaq (B, %rax, 4), BO +#endif + +#if defined(LN) || defined(LT) + movapd %xmm8, %xmm0 + unpcklpd %xmm9, %xmm8 + unpckhpd %xmm9, %xmm0 + + movapd %xmm10, %xmm2 + unpcklpd %xmm11, %xmm10 + unpckhpd %xmm11, %xmm2 + + movapd -16 * SIZE(BO), %xmm9 + movapd -14 * SIZE(BO), %xmm11 + movapd -12 * SIZE(BO), %xmm13 + movapd -10 * SIZE(BO), %xmm15 + + subpd %xmm8, %xmm9 + subpd %xmm10, %xmm11 + subpd %xmm0, %xmm13 + subpd %xmm2, %xmm15 +#else + movapd -16 * SIZE(AO), %xmm0 + movapd -14 * SIZE(AO), %xmm2 + movapd -12 * SIZE(AO), %xmm4 + movapd -10 * SIZE(AO), %xmm6 + + subpd %xmm8, %xmm0 + subpd %xmm9, %xmm2 + subpd %xmm10, %xmm4 + subpd %xmm11, %xmm6 +#endif + +#ifdef LN + movddup -13 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + mulpd %xmm8, %xmm15 + + movddup -14 * SIZE(AO), %xmm10 + mulpd %xmm13, %xmm10 + subpd %xmm10, %xmm9 + movddup -14 * SIZE(AO), %xmm10 + mulpd %xmm15, %xmm10 + subpd %xmm10, %xmm11 + + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 + mulpd %xmm8, %xmm11 +#endif + +#ifdef LT + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 + mulpd %xmm8, %xmm11 + + movddup -15 * SIZE(AO), %xmm10 + mulpd %xmm9, %xmm10 + subpd %xmm10, %xmm13 + movddup -15 * SIZE(AO), %xmm10 + mulpd %xmm11, %xmm10 + subpd %xmm10, %xmm15 + + movddup -13 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + mulpd %xmm8, %xmm15 +#endif + +#ifdef RN + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 + + movddup -15 * SIZE(BO), %xmm9 + mulpd %xmm0, %xmm9 + subpd %xmm9, %xmm2 + movddup -14 * SIZE(BO), %xmm10 + mulpd %xmm0, %xmm10 + subpd %xmm10, %xmm4 + movddup -13 * SIZE(BO), %xmm11 + mulpd %xmm0, %xmm11 + subpd %xmm11, %xmm6 + + movddup -11 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + movddup -10 * SIZE(BO), %xmm9 + mulpd %xmm2, %xmm9 + subpd %xmm9, %xmm4 + movddup -9 * SIZE(BO), %xmm10 + mulpd %xmm2, %xmm10 + subpd %xmm10, %xmm6 + + movddup -6 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm4 + + movddup -5 * SIZE(BO), %xmm9 + mulpd %xmm4, %xmm9 + subpd %xmm9, %xmm6 + + movddup -1 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm6 +#endif + +#ifdef RT + movddup -1 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm6 + + movddup -2 * SIZE(BO), %xmm9 + mulpd %xmm6, %xmm9 + subpd %xmm9, %xmm4 + movddup -3 * SIZE(BO), %xmm10 + mulpd %xmm6, %xmm10 + subpd %xmm10, %xmm2 + movddup -4 * SIZE(BO), %xmm11 + mulpd %xmm6, %xmm11 + subpd %xmm11, %xmm0 + + movddup -6 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm4 + movddup -7 * SIZE(BO), %xmm9 + mulpd %xmm4, %xmm9 + subpd %xmm9, %xmm2 + movddup -8 * SIZE(BO), %xmm10 + mulpd %xmm4, %xmm10 + subpd %xmm10, %xmm0 + + movddup -11 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + movddup -12 * SIZE(BO), %xmm9 + mulpd %xmm2, %xmm9 + subpd %xmm9, %xmm0 + + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 +#endif + +#ifdef LN + subq $2 * SIZE, CO1 + subq $2 * SIZE, CO2 +#endif + +#if defined(LN) || defined(LT) + movlpd %xmm9, 0 * SIZE(CO1) + movlpd %xmm13, 1 * SIZE(CO1) + + movhpd %xmm9, 0 * SIZE(CO2) + movhpd %xmm13, 1 * SIZE(CO2) + + movlpd %xmm11, 0 * SIZE(CO1, LDC, 2) + movlpd %xmm15, 1 * SIZE(CO1, LDC, 2) + + movhpd %xmm11, 0 * SIZE(CO2, LDC, 2) + movhpd %xmm15, 1 * SIZE(CO2, LDC, 2) +#else + movlpd %xmm0, 0 * SIZE(CO1) + movhpd %xmm0, 1 * SIZE(CO1) + + movlpd %xmm2, 0 * SIZE(CO2) + movhpd %xmm2, 1 * SIZE(CO2) + + movlpd %xmm4, 0 * SIZE(CO1, LDC, 2) + movhpd %xmm4, 1 * SIZE(CO1, LDC, 2) + + movlpd %xmm6, 0 * SIZE(CO2, LDC, 2) + movhpd %xmm6, 1 * SIZE(CO2, LDC, 2) +#endif + +#if defined(LN) || defined(LT) + movaps %xmm9, -16 * SIZE(BO) + movaps %xmm11, -14 * SIZE(BO) + movaps %xmm13, -12 * SIZE(BO) + movaps %xmm15, -10 * SIZE(BO) +#else + movaps %xmm0, -16 * SIZE(AO) + movaps %xmm2, -14 * SIZE(AO) + movaps %xmm4, -12 * SIZE(AO) + movaps %xmm6, -10 * SIZE(AO) +#endif + +#ifndef LN + addq $2 * SIZE, CO1 + addq $2 * SIZE, CO2 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 4), BO +#endif + +#ifdef LN + subq $2, KK +#endif + +#ifdef LT + addq $2, KK +#endif + +#ifdef RT + movq K, %rax + salq $1 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + ALIGN_4 + +.L30: + testq $1, M + je .L39 + +#ifdef LN + movq K, %rax + salq $0 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (, %rax, SIZE), %rax + leaq (BO, %rax, 4), BO +#endif + + movddup -16 * SIZE(AO), %xmm0 + pxor %xmm8, %xmm8 + movddup -14 * SIZE(AO), %xmm2 + pxor %xmm9, %xmm9 + movddup -15 * SIZE(AO), %xmm4 + pxor %xmm10, %xmm10 + movapd -16 * SIZE(BO), %xmm1 + pxor %xmm11, %xmm11 + movapd -8 * SIZE(BO), %xmm3 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 4), BO + negq %rax + NOBRANCH + je .L36 + ALIGN_4 + +.L32: + mulpd %xmm0, %xmm1 + mulpd -14 * SIZE(BO, %rax, 4), %xmm0 + addpd %xmm1, %xmm8 + movapd -12 * SIZE(BO, %rax, 4), %xmm1 + addpd %xmm0, %xmm9 + movddup -12 * SIZE(AO, %rax, 1), %xmm0 + mulpd %xmm4, %xmm1 + mulpd -10 * SIZE(BO, %rax, 4), %xmm4 + addpd %xmm1, %xmm10 + movapd (BO, %rax, 4), %xmm1 + addpd %xmm4, %xmm11 + movddup -11 * SIZE(AO, %rax, 1), %xmm4 + mulpd %xmm2, %xmm3 + mulpd -6 * SIZE(BO, %rax, 4), %xmm2 + addpd %xmm3, %xmm8 + movapd -4 * SIZE(BO, %rax, 4), %xmm3 + addpd %xmm2, %xmm9 + movddup -13 * SIZE(AO, %rax, 1), %xmm2 + mulpd %xmm2, %xmm3 + mulpd -2 * SIZE(BO, %rax, 4), %xmm2 + addpd %xmm3, %xmm10 + movapd 8 * SIZE(BO, %rax, 4), %xmm3 + addpd %xmm2, %xmm11 + movddup -10 * SIZE(AO, %rax, 1), %xmm2 + + addq $4 * SIZE, %rax + BRANCH + jl .L32 + ALIGN_4 + +.L36: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L38 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 4), BO + negq %rax + ALIGN_4 + +.L37: + mulpd %xmm0, %xmm1 + mulpd -14 * SIZE(BO, %rax, 4), %xmm0 + addpd %xmm1, %xmm8 + movapd -12 * SIZE(BO, %rax, 4), %xmm1 + addpd %xmm0, %xmm9 + movddup -15 * SIZE(AO, %rax, 1), %xmm0 + + addq $SIZE, %rax + jl .L37 + ALIGN_4 + +.L38: + addpd %xmm10, %xmm8 + addpd %xmm11, %xmm9 + +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $1, %rax +#else + subq $4, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 1), AO + leaq (B, %rax, 4), BO +#endif + +#if defined(LN) || defined(LT) + movapd -16 * SIZE(BO), %xmm2 + movapd -14 * SIZE(BO), %xmm3 + + subpd %xmm8, %xmm2 + subpd %xmm9, %xmm3 +#else + movapd -16 * SIZE(AO), %xmm2 + movapd -14 * SIZE(AO), %xmm3 + + subpd %xmm8, %xmm2 + subpd %xmm9, %xmm3 +#endif + +#if defined(LN) || defined(LT) + movddup -16 * SIZE(AO), %xmm0 + mulpd %xmm0, %xmm2 + mulpd %xmm0, %xmm3 +#endif + +#ifdef RN + movapd %xmm2, %xmm0 + unpckhpd %xmm0, %xmm0 + + movapd %xmm3, %xmm1 + unpckhpd %xmm1, %xmm1 + + movsd -16 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm2 + + movsd -15 * SIZE(BO), %xmm5 + mulsd %xmm2, %xmm5 + subsd %xmm5, %xmm0 + movsd -14 * SIZE(BO), %xmm6 + mulsd %xmm2, %xmm6 + subsd %xmm6, %xmm3 + movsd -13 * SIZE(BO), %xmm7 + mulsd %xmm2, %xmm7 + subsd %xmm7, %xmm1 + + movsd -11 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm0 + + movsd -10 * SIZE(BO), %xmm5 + mulsd %xmm0, %xmm5 + subsd %xmm5, %xmm3 + movsd -9 * SIZE(BO), %xmm6 + mulsd %xmm0, %xmm6 + subsd %xmm6, %xmm1 + + movsd -6 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm3 + + movsd -5 * SIZE(BO), %xmm5 + mulsd %xmm3, %xmm5 + subsd %xmm5, %xmm1 + + movsd -1 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm1 + + unpcklpd %xmm0, %xmm2 + unpcklpd %xmm1, %xmm3 +#endif + +#ifdef RT + movapd %xmm2, %xmm0 + unpckhpd %xmm0, %xmm0 + + movapd %xmm3, %xmm1 + unpckhpd %xmm1, %xmm1 + + movsd -1 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm1 + + movsd -2 * SIZE(BO), %xmm5 + mulsd %xmm1, %xmm5 + subsd %xmm5, %xmm3 + movsd -3 * SIZE(BO), %xmm6 + mulsd %xmm1, %xmm6 + subsd %xmm6, %xmm0 + movsd -4 * SIZE(BO), %xmm7 + mulsd %xmm1, %xmm7 + subsd %xmm7, %xmm2 + + movsd -6 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm3 + + movsd -7 * SIZE(BO), %xmm5 + mulsd %xmm3, %xmm5 + subsd %xmm5, %xmm0 + movsd -8 * SIZE(BO), %xmm6 + mulsd %xmm3, %xmm6 + subsd %xmm6, %xmm2 + + movsd -11 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm0 + + movsd -12 * SIZE(BO), %xmm5 + mulsd %xmm0, %xmm5 + subsd %xmm5, %xmm2 + + movsd -16 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm2 + + unpcklpd %xmm0, %xmm2 + unpcklpd %xmm1, %xmm3 + +#endif + +#ifdef LN + subq $1 * SIZE, CO1 + subq $1 * SIZE, CO2 +#endif + +#if defined(LN) || defined(LT) + movlpd %xmm2, 0 * SIZE(CO1) + movhpd %xmm2, 0 * SIZE(CO2) + movlpd %xmm3, 0 * SIZE(CO1, LDC, 2) + movhpd %xmm3, 0 * SIZE(CO2, LDC, 2) +#else + movlpd %xmm2, 0 * SIZE(CO1) + movhpd %xmm2, 0 * SIZE(CO2) + movlpd %xmm3, 0 * SIZE(CO1, LDC, 2) + movhpd %xmm3, 0 * SIZE(CO2, LDC, 2) +#endif + +#if defined(LN) || defined(LT) + movaps %xmm2, -16 * SIZE(BO) + movaps %xmm3, -14 * SIZE(BO) +#else + movaps %xmm2, -16 * SIZE(AO) + movaps %xmm3, -14 * SIZE(AO) +#endif + +#ifndef LN + addq $1 * SIZE, CO1 + addq $1 * SIZE, CO2 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 4), BO +#endif + +#ifdef LN + subq $1, KK +#endif + +#ifdef LT + addq $1, KK +#endif + +#ifdef RT + movq K, %rax + salq $0 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + ALIGN_4 + +.L39: +#ifdef LN + leaq (, K, SIZE), %rax + leaq (B, %rax, 4), B +#endif + +#if defined(LT) || defined(RN) + movq BO, B +#endif + +#ifdef RN + addq $4, KK +#endif + +#ifdef RT + subq $4, KK +#endif + + decq J # j -- + jg .L01 + ALIGN_4 + +.L40: + testq $2, N + je .L80 + +#if defined(LT) || defined(RN) + movq A, AO +#else + movq A, AORIG +#endif + +#ifdef RT + movq K, %rax + salq $1 + BASE_SHIFT, %rax + subq %rax, B + + leaq (, LDC, 2), %rax + subq %rax, C +#endif + + movq C, CO1 # coffset1 = c + leaq (C, LDC, 1), CO2 # coffset2 = c + ldc +#ifndef RT + leaq (C, LDC, 2), C +#endif + +#ifdef LN + movq OFFSET, %rax + addq M, %rax + movq %rax, KK +#endif + +#if defined(LT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq M, I + sarq $2, I # i = (m >> 2) + jle .L60 + ALIGN_4 + +.L51: +#ifdef LN + movq K, %rax + salq $2 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (, %rax, SIZE), %rax + leaq (BO, %rax, 2), BO +#endif + + movddup -16 * SIZE(BO), %xmm1 + movddup -15 * SIZE(BO), %xmm5 + pxor %xmm8, %xmm8 + movddup -12 * SIZE(BO), %xmm3 + pxor %xmm9, %xmm9 + movapd -16 * SIZE(AO), %xmm0 + pxor %xmm12, %xmm12 + movapd -8 * SIZE(AO), %xmm4 + pxor %xmm13, %xmm13 + + movapd %xmm0, %xmm2 + + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 2), BO + negq %rax + NOBRANCH + je .L56 + ALIGN_4 + +.L52: + mulpd %xmm1, %xmm0 + mulpd -14 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm8 + movapd -12 * SIZE(AO, %rax, 4), %xmm0 + addpd %xmm1, %xmm12 + movddup -14 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm5, %xmm2 + mulpd -14 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm2, %xmm9 + addpd %xmm5, %xmm13 + movddup -13 * SIZE(BO, %rax, 2), %xmm5 + movapd %xmm0, %xmm2 + mulpd %xmm1, %xmm0 + mulpd -10 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm8 + movapd (AO, %rax, 4), %xmm0 + addpd %xmm1, %xmm12 + movddup -8 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm5, %xmm2 + mulpd -10 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm2, %xmm9 + addpd %xmm5, %xmm13 + movddup -11 * SIZE(BO, %rax, 2), %xmm5 + movapd %xmm4, %xmm2 + mulpd %xmm3, %xmm4 + mulpd -6 * SIZE(AO, %rax, 4), %xmm3 + addpd %xmm4, %xmm8 + movapd -4 * SIZE(AO, %rax, 4), %xmm4 + addpd %xmm3, %xmm12 + movddup -10 * SIZE(BO, %rax, 2), %xmm3 + mulpd %xmm5, %xmm2 + mulpd -6 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm2, %xmm9 + addpd %xmm5, %xmm13 + movddup -9 * SIZE(BO, %rax, 2), %xmm5 + movapd %xmm4, %xmm2 + mulpd %xmm3, %xmm4 + mulpd -2 * SIZE(AO, %rax, 4), %xmm3 + addpd %xmm4, %xmm8 + movapd 8 * SIZE(AO, %rax, 4), %xmm4 + addpd %xmm3, %xmm12 + movddup -4 * SIZE(BO, %rax, 2), %xmm3 + mulpd %xmm5, %xmm2 + mulpd -2 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm2, %xmm9 + addpd %xmm5, %xmm13 + movddup -7 * SIZE(BO, %rax, 2), %xmm5 + movapd %xmm0, %xmm2 + + addq $4 * SIZE, %rax + BRANCH + jl .L52 + ALIGN_4 + +.L56: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L59 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 2), BO + negq %rax + ALIGN_4 + +.L57: + mulpd %xmm1, %xmm0 + mulpd -14 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm8 + movapd -12 * SIZE(AO, %rax, 4), %xmm0 + addpd %xmm1, %xmm12 + movddup -14 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm5, %xmm2 + mulpd -14 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm2, %xmm9 + addpd %xmm5, %xmm13 + movddup -13 * SIZE(BO, %rax, 2), %xmm5 + movapd %xmm0, %xmm2 + + addq $SIZE, %rax + jl .L57 + ALIGN_4 + +.L59: +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $4, %rax +#else + subq $2, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 4), AO + leaq (B, %rax, 2), BO +#endif + +#if defined(LN) || defined(LT) + movapd %xmm8, %xmm0 + unpcklpd %xmm9, %xmm8 + unpckhpd %xmm9, %xmm0 + + movapd %xmm12, %xmm4 + unpcklpd %xmm13, %xmm12 + unpckhpd %xmm13, %xmm4 + + movapd -16 * SIZE(BO), %xmm9 + movapd -14 * SIZE(BO), %xmm13 + movapd -12 * SIZE(BO), %xmm1 + movapd -10 * SIZE(BO), %xmm5 + + subpd %xmm8, %xmm9 + subpd %xmm0, %xmm13 + subpd %xmm12, %xmm1 + subpd %xmm4, %xmm5 +#else + movapd -16 * SIZE(AO), %xmm0 + movapd -14 * SIZE(AO), %xmm1 + movapd -12 * SIZE(AO), %xmm2 + movapd -10 * SIZE(AO), %xmm3 + + subpd %xmm8, %xmm0 + subpd %xmm12, %xmm1 + subpd %xmm9, %xmm2 + subpd %xmm13, %xmm3 +#endif + +#ifdef LN + movddup -1 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm5 + movddup -2 * SIZE(AO), %xmm10 + mulpd %xmm5, %xmm10 + subpd %xmm10, %xmm1 + movddup -3 * SIZE(AO), %xmm12 + mulpd %xmm5, %xmm12 + subpd %xmm12, %xmm13 + movddup -4 * SIZE(AO), %xmm14 + mulpd %xmm5, %xmm14 + subpd %xmm14, %xmm9 + + movddup -6 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm1 + movddup -7 * SIZE(AO), %xmm10 + mulpd %xmm1, %xmm10 + subpd %xmm10, %xmm13 + movddup -8 * SIZE(AO), %xmm12 + mulpd %xmm1, %xmm12 + subpd %xmm12, %xmm9 + + movddup -11 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + movddup -12 * SIZE(AO), %xmm10 + mulpd %xmm13, %xmm10 + subpd %xmm10, %xmm9 + + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 +#endif + +#ifdef LT + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 + movddup -15 * SIZE(AO), %xmm10 + mulpd %xmm9, %xmm10 + subpd %xmm10, %xmm13 + movddup -14 * SIZE(AO), %xmm12 + mulpd %xmm9, %xmm12 + subpd %xmm12, %xmm1 + movddup -13 * SIZE(AO), %xmm14 + mulpd %xmm9, %xmm14 + subpd %xmm14, %xmm5 + + + movddup -11 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + + movddup -10 * SIZE(AO), %xmm10 + mulpd %xmm13, %xmm10 + subpd %xmm10, %xmm1 + movddup -9 * SIZE(AO), %xmm12 + mulpd %xmm13, %xmm12 + subpd %xmm12, %xmm5 + + movddup -6 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm1 + movddup -5 * SIZE(AO), %xmm10 + mulpd %xmm1, %xmm10 + subpd %xmm10, %xmm5 + + movddup -1 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm5 +#endif + +#ifdef RN + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 + mulpd %xmm8, %xmm1 + + movddup -15 * SIZE(BO), %xmm9 + mulpd %xmm0, %xmm9 + subpd %xmm9, %xmm2 + movddup -15 * SIZE(BO), %xmm9 + mulpd %xmm1, %xmm9 + subpd %xmm9, %xmm3 + + movddup -13 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + mulpd %xmm8, %xmm3 +#endif + +#ifdef RT + movddup -13 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + mulpd %xmm8, %xmm3 + + movddup -14 * SIZE(BO), %xmm9 + mulpd %xmm2, %xmm9 + subpd %xmm9, %xmm0 + movddup -14 * SIZE(BO), %xmm9 + mulpd %xmm3, %xmm9 + subpd %xmm9, %xmm1 + + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 + mulpd %xmm8, %xmm1 +#endif + +#ifdef LN + subq $4 * SIZE, CO1 + subq $4 * SIZE, CO2 +#endif + +#if defined(LN) || defined(LT) + movlpd %xmm9, 0 * SIZE(CO1) + movlpd %xmm13, 1 * SIZE(CO1) + movlpd %xmm1, 2 * SIZE(CO1) + movlpd %xmm5, 3 * SIZE(CO1) + + movhpd %xmm9, 0 * SIZE(CO2) + movhpd %xmm13, 1 * SIZE(CO2) + movhpd %xmm1, 2 * SIZE(CO2) + movhpd %xmm5, 3 * SIZE(CO2) +#else + movlpd %xmm0, 0 * SIZE(CO1) + movhpd %xmm0, 1 * SIZE(CO1) + movlpd %xmm1, 2 * SIZE(CO1) + movhpd %xmm1, 3 * SIZE(CO1) + + movlpd %xmm2, 0 * SIZE(CO2) + movhpd %xmm2, 1 * SIZE(CO2) + movlpd %xmm3, 2 * SIZE(CO2) + movhpd %xmm3, 3 * SIZE(CO2) +#endif + +#if defined(LN) || defined(LT) + movaps %xmm9, -16 * SIZE(BO) + movaps %xmm13,-14 * SIZE(BO) + movaps %xmm1, -12 * SIZE(BO) + movaps %xmm5, -10 * SIZE(BO) +#else + movaps %xmm0, -16 * SIZE(AO) + movaps %xmm1, -14 * SIZE(AO) + movaps %xmm2, -12 * SIZE(AO) + movaps %xmm3, -10 * SIZE(AO) +#endif + +#ifndef LN + addq $4 * SIZE, CO1 + addq $4 * SIZE, CO2 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 2), BO +#endif + +#ifdef LN + subq $4, KK +#endif + +#ifdef LT + addq $4, KK +#endif + +#ifdef RT + movq K, %rax + salq $2 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + + decq I # i -- + jg .L51 + ALIGN_4 + +.L60: + testq $2, M + je .L70 + +#ifdef LN + movq K, %rax + salq $1 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (, %rax, SIZE), %rax + leaq (BO, %rax, 2), BO +#endif + + movapd -16 * SIZE(AO), %xmm0 + pxor %xmm8, %xmm8 + movapd -12 * SIZE(AO), %xmm2 + pxor %xmm9, %xmm9 + movddup -16 * SIZE(BO), %xmm1 + pxor %xmm10, %xmm10 + movddup -15 * SIZE(BO), %xmm3 + pxor %xmm11, %xmm11 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 2), BO + negq %rax + NOBRANCH + je .L66 + ALIGN_4 + +.L62: + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm8 + movddup -14 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm0, %xmm3 + movapd -14 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm3, %xmm9 + movddup -13 * SIZE(BO, %rax, 2), %xmm3 + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm10 + movddup -12 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm0, %xmm3 + movapd -8 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm3, %xmm11 + movddup -11 * SIZE(BO, %rax, 2), %xmm3 + mulpd %xmm2, %xmm1 + addpd %xmm1, %xmm8 + movddup -10 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm2, %xmm3 + movapd -10 * SIZE(AO, %rax, 2), %xmm2 + addpd %xmm3, %xmm9 + movddup -9 * SIZE(BO, %rax, 2), %xmm3 + mulpd %xmm2, %xmm1 + addpd %xmm1, %xmm10 + movddup -8 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm2, %xmm3 + movapd -4 * SIZE(AO, %rax, 2), %xmm2 + addpd %xmm3, %xmm11 + movddup -7 * SIZE(BO, %rax, 2), %xmm3 + + addq $4 * SIZE, %rax + BRANCH + jl .L62 + ALIGN_4 + +.L66: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L69 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 2), BO + negq %rax + ALIGN_4 + +.L67: + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm8 + movddup -14 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm0, %xmm3 + movapd -14 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm3, %xmm9 + movddup -13 * SIZE(BO, %rax, 2), %xmm3 + + addq $SIZE, %rax + jl .L67 + ALIGN_4 + +.L69: + addpd %xmm10, %xmm8 + addpd %xmm11, %xmm9 + +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $2, %rax +#else + subq $2, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 2), AO + leaq (B, %rax, 2), BO +#endif + +#if defined(LN) || defined(LT) + movapd %xmm8, %xmm0 + unpcklpd %xmm9, %xmm8 + unpckhpd %xmm9, %xmm0 + + movapd -16 * SIZE(BO), %xmm9 + movapd -14 * SIZE(BO), %xmm13 + + subpd %xmm8, %xmm9 + subpd %xmm0, %xmm13 +#else + movapd -16 * SIZE(AO), %xmm0 + movapd -14 * SIZE(AO), %xmm2 + + subpd %xmm8, %xmm0 + subpd %xmm9, %xmm2 +#endif + + +#ifdef LN + movddup -13 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + + movddup -14 * SIZE(AO), %xmm10 + mulpd %xmm13, %xmm10 + subpd %xmm10, %xmm9 + + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 +#endif + +#ifdef LT + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 + + movddup -15 * SIZE(AO), %xmm10 + mulpd %xmm9, %xmm10 + subpd %xmm10, %xmm13 + + movddup -13 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 +#endif + +#ifdef RN + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 + + movddup -15 * SIZE(BO), %xmm9 + mulpd %xmm0, %xmm9 + subpd %xmm9, %xmm2 + + movddup -13 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 +#endif + +#ifdef RT + movddup -13 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + + movddup -14 * SIZE(BO), %xmm9 + mulpd %xmm2, %xmm9 + subpd %xmm9, %xmm0 + + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 +#endif + +#ifdef LN + subq $2 * SIZE, CO1 + subq $2 * SIZE, CO2 +#endif + +#if defined(LN) || defined(LT) + movlpd %xmm9, 0 * SIZE(CO1) + movlpd %xmm13, 1 * SIZE(CO1) + + movhpd %xmm9, 0 * SIZE(CO2) + movhpd %xmm13, 1 * SIZE(CO2) +#else + movlpd %xmm0, 0 * SIZE(CO1) + movhpd %xmm0, 1 * SIZE(CO1) + + movlpd %xmm2, 0 * SIZE(CO2) + movhpd %xmm2, 1 * SIZE(CO2) +#endif + +#if defined(LN) || defined(LT) + movaps %xmm9, -16 * SIZE(BO) + movaps %xmm13, -14 * SIZE(BO) +#else + movaps %xmm0, -16 * SIZE(AO) + movaps %xmm2, -14 * SIZE(AO) +#endif + +#ifndef LN + addq $2 * SIZE, CO1 + addq $2 * SIZE, CO2 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 2), BO +#endif + +#ifdef LN + subq $2, KK +#endif + +#ifdef LT + addq $2, KK +#endif + +#ifdef RT + movq K, %rax + salq $1 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + ALIGN_4 + +.L70: + testq $1, M + je .L79 + ALIGN_4 + +.L71: +#ifdef LN + movq K, %rax + salq $0 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + salq $1 + BASE_SHIFT, %rax + leaq (BO, %rax, 1), BO +#endif + + movddup -16 * SIZE(AO), %xmm0 + pxor %xmm8, %xmm8 + movddup -15 * SIZE(AO), %xmm1 + pxor %xmm9, %xmm9 + movddup -14 * SIZE(AO), %xmm2 + pxor %xmm10, %xmm10 + movddup -13 * SIZE(AO), %xmm3 + pxor %xmm11, %xmm11 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 2), BO + negq %rax + NOBRANCH + je .L76 + ALIGN_4 + +.L72: + mulpd -16 * SIZE(BO, %rax, 2), %xmm0 + addpd %xmm0, %xmm8 + movddup -12 * SIZE(AO, %rax, 1), %xmm0 + + mulpd -14 * SIZE(BO, %rax, 2), %xmm1 + addpd %xmm1, %xmm9 + movddup -11 * SIZE(AO, %rax, 1), %xmm1 + + mulpd -12 * SIZE(BO, %rax, 2), %xmm2 + addpd %xmm2, %xmm10 + movddup -10 * SIZE(AO, %rax, 1), %xmm2 + + mulpd -10 * SIZE(BO, %rax, 2), %xmm3 + addpd %xmm3, %xmm11 + movddup -9 * SIZE(AO, %rax, 1), %xmm3 + + addq $4 * SIZE, %rax + BRANCH + jl .L72 + ALIGN_4 + +.L76: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L78 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 2), BO + negq %rax + ALIGN_4 + +.L77: + mulpd -16 * SIZE(BO, %rax, 2), %xmm0 + addpd %xmm0, %xmm8 + movddup -15 * SIZE(AO, %rax, 1), %xmm0 + + addq $SIZE, %rax + jl .L77 + ALIGN_4 + +.L78: + addpd %xmm9, %xmm8 + addpd %xmm11, %xmm10 + addpd %xmm10, %xmm8 + +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $1, %rax +#else + subq $2, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 1), AO + leaq (B, %rax, 2), BO +#endif + +#if defined(LN) || defined(LT) + movapd -16 * SIZE(BO), %xmm2 +#else + movapd -16 * SIZE(AO), %xmm2 +#endif + + subpd %xmm8, %xmm2 + +#if defined(LN) || defined(LT) + movddup -16 * SIZE(AO), %xmm0 + + mulpd %xmm0, %xmm2 +#endif + +#ifdef RN + movapd %xmm2, %xmm0 + unpckhpd %xmm0, %xmm0 + + mulsd -16 * SIZE(BO), %xmm2 + movsd -15 * SIZE(BO), %xmm4 + mulsd %xmm2, %xmm4 + subsd %xmm4, %xmm0 + + mulsd -13 * SIZE(BO), %xmm0 + unpcklpd %xmm0, %xmm2 +#endif + +#ifdef RT + movapd %xmm2, %xmm0 + unpckhpd %xmm0, %xmm0 + + mulsd -13 * SIZE(BO), %xmm0 + + movlpd -14 * SIZE(BO), %xmm4 + mulsd %xmm0, %xmm4 + subsd %xmm4, %xmm2 + + mulsd -16 * SIZE(BO), %xmm2 + unpcklpd %xmm0, %xmm2 +#endif + +#ifdef LN + subq $1 * SIZE, CO1 + subq $1 * SIZE, CO2 +#endif + + movlpd %xmm2, 0 * SIZE(CO1) + movhpd %xmm2, 0 * SIZE(CO2) + +#if defined(LN) || defined(LT) + movaps %xmm2, -16 * SIZE(BO) +#else + movaps %xmm2, -16 * SIZE(AO) +#endif + +#ifndef LN + addq $1 * SIZE, CO1 + addq $1 * SIZE, CO2 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 2), BO +#endif + +#ifdef LN + subq $1, KK +#endif + +#ifdef LT + addq $1, KK +#endif + +#ifdef RT + movq K, %rax + salq $0 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + ALIGN_4 + +.L79: +#ifdef LN + leaq (, K, SIZE), %rax + leaq (B, %rax, 2), B +#endif + +#if defined(LT) || defined(RN) + movq BO, B +#endif + +#ifdef RN + addq $2, KK +#endif + +#ifdef RT + subq $2, KK +#endif + ALIGN_4 + +.L80: + testq $1, N + je .L999 + +#if defined(LT) || defined(RN) + movq A, AO +#else + movq A, AORIG +#endif + +#ifdef RT + movq K, %rax + salq $0 + BASE_SHIFT, %rax + subq %rax, B + + subq LDC, C +#endif + + movq C, CO1 # coffset1 = c +#ifndef RT + addq LDC, C +#endif + +#ifdef LN + movq OFFSET, %rax + addq M, %rax + movq %rax, KK +#endif + +#ifdef LT + movq OFFSET, %rax + movq %rax, KK +#endif + + movq M, I + sarq $2, I # i = (m >> 2) + jle .L100 + ALIGN_4 + +.L91: +#ifdef LN + movq K, %rax + salq $2 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (BO, %rax, SIZE), BO +#endif + + movapd -16 * SIZE(AO), %xmm0 + pxor %xmm8, %xmm8 + movapd -8 * SIZE(AO), %xmm2 + pxor %xmm9, %xmm9 + movddup -16 * SIZE(BO), %xmm1 + pxor %xmm10, %xmm10 + movddup -15 * SIZE(BO), %xmm5 + pxor %xmm11, %xmm11 + movddup -14 * SIZE(BO), %xmm3 + + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 1), BO + negq %rax + NOBRANCH + je .L96 + ALIGN_4 + +.L92: + mulpd %xmm1, %xmm0 + mulpd -14 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm8 + movapd -12 * SIZE(AO, %rax, 4), %xmm0 + addpd %xmm1, %xmm9 + movddup -12 * SIZE(BO, %rax, 1), %xmm1 + mulpd %xmm5, %xmm0 + mulpd -10 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm0, %xmm10 + movapd (AO, %rax, 4), %xmm0 + addpd %xmm5, %xmm11 + movddup -13 * SIZE(BO, %rax, 1), %xmm5 + mulpd %xmm3, %xmm2 + mulpd -6 * SIZE(AO, %rax, 4), %xmm3 + addpd %xmm2, %xmm8 + movapd -4 * SIZE(AO, %rax, 4), %xmm2 + addpd %xmm3, %xmm9 + movddup -10 * SIZE(BO, %rax, 1), %xmm3 + mulpd %xmm5, %xmm2 + mulpd -2 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm2, %xmm10 + movapd 8 * SIZE(AO, %rax, 4), %xmm2 + addpd %xmm5, %xmm11 + movddup -11 * SIZE(BO, %rax, 1), %xmm5 + + addq $4 * SIZE, %rax + BRANCH + jl .L92 + ALIGN_4 + +.L96: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L99 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 1), BO + negq %rax + ALIGN_4 + +.L97: + mulpd %xmm1, %xmm0 + mulpd -14 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm8 + movapd -12 * SIZE(AO, %rax, 4), %xmm0 + addpd %xmm1, %xmm9 + movddup -15 * SIZE(BO, %rax, 1), %xmm1 + + addq $SIZE, %rax + jl .L97 + ALIGN_4 +.L99: + addpd %xmm10, %xmm8 + addpd %xmm11, %xmm9 + +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $4, %rax +#else + subq $1, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 4), AO + leaq (B, %rax, 1), BO +#endif + +#if defined(LN) || defined(LT) + movapd -16 * SIZE(BO), %xmm10 + movapd -14 * SIZE(BO), %xmm11 + + subpd %xmm8, %xmm10 + subpd %xmm9, %xmm11 +#else + movapd -16 * SIZE(AO), %xmm10 + movapd -14 * SIZE(AO), %xmm11 + + subpd %xmm8, %xmm10 + subpd %xmm9, %xmm11 +#endif + +#ifdef LN + movapd %xmm10, %xmm8 + unpckhpd %xmm8, %xmm8 + + movapd %xmm11, %xmm9 + unpckhpd %xmm9, %xmm9 + + movsd -1 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm9 + + movsd -2 * SIZE(AO), %xmm13 + mulsd %xmm9, %xmm13 + subsd %xmm13, %xmm11 + movsd -3 * SIZE(AO), %xmm14 + mulsd %xmm9, %xmm14 + subsd %xmm14, %xmm8 + movsd -4 * SIZE(AO), %xmm15 + mulsd %xmm9, %xmm15 + subsd %xmm15, %xmm10 + + movsd -6 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm11 + + movsd -7 * SIZE(AO), %xmm13 + mulsd %xmm11, %xmm13 + subsd %xmm13, %xmm8 + movsd -8 * SIZE(AO), %xmm14 + mulsd %xmm11, %xmm14 + subsd %xmm14, %xmm10 + + movsd -11 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm8 + + movsd -12 * SIZE(AO), %xmm13 + mulsd %xmm8, %xmm13 + subsd %xmm13, %xmm10 + + movsd -16 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm10 + + unpcklpd %xmm8, %xmm10 + unpcklpd %xmm9, %xmm11 +#endif + +#ifdef LT + movapd %xmm10, %xmm8 + unpckhpd %xmm8, %xmm8 + + movapd %xmm11, %xmm9 + unpckhpd %xmm9, %xmm9 + + movsd -16 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm10 + + movsd -15 * SIZE(AO), %xmm13 + mulsd %xmm10, %xmm13 + subsd %xmm13, %xmm8 + movsd -14 * SIZE(AO), %xmm14 + mulsd %xmm10, %xmm14 + subsd %xmm14, %xmm11 + movsd -13 * SIZE(AO), %xmm15 + mulsd %xmm10, %xmm15 + subsd %xmm15, %xmm9 + + movsd -11 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm8 + + movsd -10 * SIZE(AO), %xmm13 + mulsd %xmm8, %xmm13 + subsd %xmm13, %xmm11 + movsd -9 * SIZE(AO), %xmm14 + mulsd %xmm8, %xmm14 + subsd %xmm14, %xmm9 + + movsd -6 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm11 + + movsd -5 * SIZE(AO), %xmm13 + mulsd %xmm11, %xmm13 + subsd %xmm13, %xmm9 + + movsd -1 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm9 + + unpcklpd %xmm8, %xmm10 + unpcklpd %xmm9, %xmm11 +#endif + +#ifdef RN + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm10 + mulpd %xmm8, %xmm11 +#endif + +#ifdef RT + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm10 + mulpd %xmm8, %xmm11 +#endif + +#ifdef LN + subq $4 * SIZE, CO1 +#endif + + movlpd %xmm10, 0 * SIZE(CO1) + movhpd %xmm10, 1 * SIZE(CO1) + movlpd %xmm11, 2 * SIZE(CO1) + movhpd %xmm11, 3 * SIZE(CO1) + +#if defined(LN) || defined(LT) + movaps %xmm10, -16 * SIZE(BO) + movaps %xmm11, -14 * SIZE(BO) +#else + movaps %xmm10, -16 * SIZE(AO) + movaps %xmm11, -14 * SIZE(AO) +#endif + +#ifndef LN + addq $4 * SIZE, CO1 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 4), AO + addq %rax, BO +#endif + +#ifdef LN + subq $4, KK +#endif + +#ifdef LT + addq $4, KK +#endif + +#ifdef RT + movq K, %rax + salq $2 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + + decq I # i -- + jg .L91 + ALIGN_4 + +.L100: + testq $2, M + je .L110 + +#ifdef LN + movq K, %rax + salq $1 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (BO, %rax, SIZE), BO +#endif + + movddup -16 * SIZE(BO), %xmm0 + pxor %xmm8, %xmm8 + movddup -15 * SIZE(BO), %xmm1 + pxor %xmm9, %xmm9 + movddup -14 * SIZE(BO), %xmm2 + pxor %xmm10, %xmm10 + movddup -13 * SIZE(BO), %xmm3 + pxor %xmm11, %xmm11 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 1), BO + negq %rax + NOBRANCH + je .L106 + ALIGN_4 + +.L102: + mulpd -16 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm0, %xmm8 + movddup -12 * SIZE(BO, %rax, 1), %xmm0 + + mulpd -14 * SIZE(AO, %rax, 2), %xmm1 + addpd %xmm1, %xmm9 + movddup -11 * SIZE(BO, %rax, 1), %xmm1 + + mulpd -12 * SIZE(AO, %rax, 2), %xmm2 + addpd %xmm2, %xmm10 + movddup -10 * SIZE(BO, %rax, 1), %xmm2 + + mulpd -10 * SIZE(AO, %rax, 2), %xmm3 + addpd %xmm3, %xmm11 + movddup -9 * SIZE(BO, %rax, 1), %xmm3 + + addq $4 * SIZE, %rax + BRANCH + jl .L102 + ALIGN_4 + +.L106: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L109 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 1), BO + negq %rax + ALIGN_4 + +.L107: + movddup -16 * SIZE(BO, %rax, 1), %xmm0 + mulpd -16 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm0, %xmm8 + + addq $SIZE, %rax + jl .L107 + ALIGN_4 + +.L109: + addpd %xmm9, %xmm8 + addpd %xmm11, %xmm10 + addpd %xmm10, %xmm8 + +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $2, %rax +#else + subq $1, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 2), AO + leaq (B, %rax, 1), BO +#endif + +#if defined(LN) || defined(LT) + movapd -16 * SIZE(BO), %xmm10 + subpd %xmm8, %xmm10 +#else + movapd -16 * SIZE(AO), %xmm10 + subpd %xmm8, %xmm10 +#endif + +#ifdef LN + movapd %xmm10, %xmm8 + unpckhpd %xmm8, %xmm8 + + movsd -13 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm8 + + movsd -14 * SIZE(AO), %xmm13 + mulsd %xmm8, %xmm13 + subsd %xmm13, %xmm10 + + movsd -16 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm10 + + unpcklpd %xmm8, %xmm10 +#endif + +#ifdef LT + movapd %xmm10, %xmm8 + unpckhpd %xmm8, %xmm8 + + movsd -16 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm10 + + movsd -15 * SIZE(AO), %xmm13 + mulsd %xmm10, %xmm13 + subsd %xmm13, %xmm8 + + movsd -13 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm8 + + unpcklpd %xmm8, %xmm10 +#endif + +#ifdef RN + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm10 +#endif + +#ifdef RT + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm10 +#endif + +#ifdef LN + subq $2 * SIZE, CO1 +#endif + +#if defined(LN) || defined(LT) + movlpd %xmm10, 0 * SIZE(CO1) + movhpd %xmm10, 1 * SIZE(CO1) +#else + movlpd %xmm10, 0 * SIZE(CO1) + movhpd %xmm10, 1 * SIZE(CO1) +#endif + +#if defined(LN) || defined(LT) + movaps %xmm10, -16 * SIZE(BO) +#else + movaps %xmm10, -16 * SIZE(AO) +#endif + +#ifndef LN + addq $2 * SIZE, CO1 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 2), AO + addq %rax, BO +#endif + +#ifdef LN + subq $2, KK +#endif + +#ifdef LT + addq $2, KK +#endif + +#ifdef RT + movq K, %rax + salq $1 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + ALIGN_4 + +.L110: + testq $1, M + je .L119 + ALIGN_4 + +.L111: +#ifdef LN + movq K, %rax + salq $0 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (BO, %rax, SIZE), BO +#endif + + movapd -16 * SIZE(AO), %xmm0 + pxor %xmm8, %xmm8 + movapd -14 * SIZE(AO), %xmm1 + pxor %xmm9, %xmm9 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 1), BO + negq %rax + NOBRANCH + je .L116 + ALIGN_4 + +.L112: + mulpd -16 * SIZE(BO, %rax, 1), %xmm0 + addpd %xmm0, %xmm8 + movapd -12 * SIZE(AO, %rax, 1), %xmm0 + + mulpd -14 * SIZE(BO, %rax, 1), %xmm1 + addpd %xmm1, %xmm9 + movapd -10 * SIZE(AO, %rax, 1), %xmm1 + + addq $4 * SIZE, %rax + BRANCH + jl .L112 + ALIGN_4 + +.L116: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L118 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 1), BO + negq %rax + ALIGN_4 + +.L117: + mulsd -16 * SIZE(BO, %rax, 1), %xmm0 + addsd %xmm0, %xmm8 + movsd -15 * SIZE(AO, %rax, 1), %xmm0 + + addq $SIZE, %rax + jl .L117 + ALIGN_4 + +.L118: + addpd %xmm9, %xmm8 + haddpd %xmm8, %xmm8 + +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $1, %rax +#else + subq $1, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 1), AO + leaq (B, %rax, 1), BO +#endif + +#if defined(LN) || defined(LT) + movsd -16 * SIZE(BO), %xmm10 + subsd %xmm8, %xmm10 +#else + movsd -16 * SIZE(AO), %xmm10 + subsd %xmm8, %xmm10 +#endif + +#if defined(LN) || defined(LT) + movsd -16 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm10 +#endif + +#if defined(RN) || defined(RT) + movsd -16 * SIZE(BO), %xmm8 + mulsd %xmm8, %xmm10 +#endif + +#ifdef LN + subq $1 * SIZE, CO1 +#endif + + movsd %xmm10, 0 * SIZE(CO1) + +#if defined(LN) || defined(LT) + movlpd %xmm10, -16 * SIZE(BO) +#else + movlpd %xmm10, -16 * SIZE(AO) +#endif + +#ifndef LN + addq $1 * SIZE, CO1 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + addq %rax, AO + addq %rax, BO +#endif + +#ifdef LN + subq $1, KK +#endif + +#ifdef LT + addq $1, KK +#endif + +#ifdef RT + movq K, %rax + salq $0 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + ALIGN_4 + +.L119: +#ifdef LN + leaq (B, K, SIZE), B +#endif + +#if defined(LT) || defined(RN) + movq BO, B +#endif + +#ifdef RN + addq $1, KK +#endif + +#ifdef RT + subq $1, KK +#endif + ALIGN_4 + + +.L999: + movq (%rsp), %rbx + movq 8(%rsp), %rbp + movq 16(%rsp), %r12 + movq 24(%rsp), %r13 + movq 32(%rsp), %r14 + movq 40(%rsp), %r15 + +#ifdef WINDOWS_ABI + movq 48(%rsp), %rdi + movq 56(%rsp), %rsi + movups 64(%rsp), %xmm6 + movups 80(%rsp), %xmm7 + movups 96(%rsp), %xmm8 + movups 112(%rsp), %xmm9 + movups 128(%rsp), %xmm10 + movups 144(%rsp), %xmm11 + movups 160(%rsp), %xmm12 + movups 176(%rsp), %xmm13 + movups 192(%rsp), %xmm14 + movups 208(%rsp), %xmm15 +#endif + + addq $STACKSIZE, %rsp + ret + + EPILOGUE From 7ac306e0da1d3ba76b902cc62f2c2cfba6bcfada Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sat, 27 Apr 2013 16:48:48 +0200 Subject: [PATCH 04/15] added trsm_kernel_RT_4x4_bulldozer.S --- kernel/x86_64/KERNEL.BULLDOZER | 2 +- kernel/x86_64/trsm_kernel_RT_4x4_bulldozer.S | 3292 ++++++++++++++++++ 2 files changed, 3293 insertions(+), 1 deletion(-) create mode 100644 kernel/x86_64/trsm_kernel_RT_4x4_bulldozer.S diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index e0b8a71e4..a41c94aca 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -46,7 +46,7 @@ STRSMKERNEL_RT = trsm_kernel_RT_8x4_sse.S DTRSMKERNEL_LN = trsm_kernel_LN_4x4_barcelona.S DTRSMKERNEL_LT = trsm_kernel_LT_4x4_bulldozer.S DTRSMKERNEL_RN = trsm_kernel_LT_4x4_bulldozer.S -DTRSMKERNEL_RT = trsm_kernel_RT_4x4_barcelona.S +DTRSMKERNEL_RT = trsm_kernel_RT_4x4_bulldozer.S CTRSMKERNEL_LN = ztrsm_kernel_LN_4x2_sse.S CTRSMKERNEL_LT = ztrsm_kernel_LT_4x2_sse.S diff --git a/kernel/x86_64/trsm_kernel_RT_4x4_bulldozer.S b/kernel/x86_64/trsm_kernel_RT_4x4_bulldozer.S new file mode 100644 index 000000000..e1880851a --- /dev/null +++ b/kernel/x86_64/trsm_kernel_RT_4x4_bulldozer.S @@ -0,0 +1,3292 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#define ASSEMBLER +#include "common.h" + +#define OLD_M %rdi +#define OLD_N %rsi +#define M %r13 +#define N %r14 +#define K %rdx + +#define A %rcx +#define B %r8 +#define C %r9 +#define LDC %r10 + +#define I %r11 +#define AO %rdi +#define BO %rsi +#define CO1 %r15 +#define CO2 %r12 +#define BB %rbp +#define J %rbx + +#ifndef WINDOWS_ABI + +#define STACKSIZE 96 + +#define OFFSET 48(%rsp) +#define AORIG 56(%rsp) +#define KK 64(%rsp) +#define KKK 72(%rsp) + +#else + +#define STACKSIZE 256 + +#define OLD_A 40 + STACKSIZE(%rsp) +#define OLD_B 48 + STACKSIZE(%rsp) +#define OLD_C 56 + STACKSIZE(%rsp) +#define OLD_LDC 64 + STACKSIZE(%rsp) +#define OLD_OFFSET 72 + STACKSIZE(%rsp) + +#define OFFSET 224(%rsp) +#define AORIG 232(%rsp) +#define KK 240(%rsp) +#define KKK 248(%rsp) + +#endif + + +#define movlpd movsd +#define movapd movups +#define movupd movups + +#define A_PR1 224 +#define B_PR1 224 + +#define KERNEL1(xx) \ + vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm6,%xmm7,%xmm8 ;\ + vmovddup -14 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2,%xmm7,%xmm12 ;\ + vmovddup -13 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\ + vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups -12 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\ + +#define KERNEL2(xx) \ + vmovups -10 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vmovddup -10 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -9 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL3(xx) \ + vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8, %xmm6, %xmm7, %xmm8 ;\ + vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ + vmovddup -5 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -4 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups -4 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\ + +#define KERNEL4(xx) \ + vmovups -2 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vmovups (AO, %rax, 4), %xmm6 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1 ,%xmm12;\ + vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -1 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vmovddup (BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL5(xx) \ + vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ + vmovddup 2 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ + vmovddup 3 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups 4 * SIZE(AO, %rax, 4),%xmm0 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ + +#define KERNEL6(xx) \ + vmovups 6 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vmovups 8 * SIZE(AO, %rax, 4), %xmm6 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vmovddup 6 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 7 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vmovddup 8 * SIZE(BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL7(xx) \ + vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\ + vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ + vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ + vmovddup 11 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup 12 * SIZE(BO, %rax, 4), %xmm1 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + vmovups 12 * SIZE(AO, %rax, 4), %xmm0 ;\ + vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ + vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ + +#define KERNEL8(xx) \ + vmovups 14 * SIZE(AO, %rax, 4), %xmm2 ;\ + vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ + vmovups 16 * SIZE(AO, %rax, 4),%xmm6 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ + vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ + vmovddup 15 * SIZE(BO, %rax, 4), %xmm4 ;\ + vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ + vmovddup 16 * SIZE(BO, %rax, 4), %xmm7 ;\ + vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ + vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ + vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ + vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ + +#define KERNEL_SUB1(xx) \ + vmovups -16 * SIZE(AO),%xmm0 ;\ + vmovups -14 * SIZE(AO),%xmm2 ;\ + vmovddup -16 * SIZE(BO), %xmm1 ;\ + vmovddup -15 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm12, %xmm2, %xmm1,%xmm12 ;\ + vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ + vmovddup -14 * SIZE(BO), %xmm1 ;\ + vmovddup -13 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm10, %xmm0, %xmm1,%xmm10 ;\ + vfmaddpd %xmm11, %xmm0, %xmm3,%xmm11 ;\ + vfmaddpd %xmm14, %xmm2, %xmm1,%xmm14 ;\ + vfmaddpd %xmm15, %xmm2, %xmm3,%xmm15 ;\ + +#define KERNEL_SUB2(xx) \ + vmovups -12 * SIZE(AO), %xmm0 ;\ + vmovups -10 * SIZE(AO), %xmm2 ;\ + vmovddup -12 * SIZE(BO), %xmm1 ;\ + vmovddup -11 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -10 * SIZE(BO), %xmm1 ;\ + vmovddup -9 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ + vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ + vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ + vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ + +#define KERNEL_SUB3(xx) \ + vmovups -8 * SIZE(AO),%xmm0 ;\ + vmovups -6 * SIZE(AO),%xmm2 ;\ + vmovddup -8 * SIZE(BO), %xmm1 ;\ + vmovddup -7 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -6 * SIZE(BO), %xmm1 ;\ + vmovddup -5 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ + vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ + vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ + vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ + +#define KERNEL_SUB4(xx) \ + vmovups -4 * SIZE(AO), %xmm0 ;\ + vmovups -2 * SIZE(AO), %xmm2 ;\ + vmovddup -4 * SIZE(BO), %xmm1 ;\ + vmovddup -3 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ + vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ + vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ + vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ + vmovddup -2 * SIZE(BO), %xmm1 ;\ + vmovddup -1 * SIZE(BO), %xmm3 ;\ + vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ + vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ + vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ + vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ + vmovups (AO), %xmm0 ;\ + vmovddup (BO), %xmm1 ;\ + vmovddup 1 * SIZE(BO), %xmm3 ;\ + vmovaps %xmm0, %xmm2 + + + + PROLOGUE + PROFCODE + + subq $STACKSIZE, %rsp + movq %rbx, (%rsp) + movq %rbp, 8(%rsp) + movq %r12, 16(%rsp) + movq %r13, 24(%rsp) + movq %r14, 32(%rsp) + movq %r15, 40(%rsp) + +#ifdef WINDOWS_ABI + movq %rdi, 48(%rsp) + movq %rsi, 56(%rsp) + movups %xmm6, 64(%rsp) + movups %xmm7, 80(%rsp) + movups %xmm8, 96(%rsp) + movups %xmm9, 112(%rsp) + movups %xmm10, 128(%rsp) + movups %xmm11, 144(%rsp) + movups %xmm12, 160(%rsp) + movups %xmm13, 176(%rsp) + movups %xmm14, 192(%rsp) + movups %xmm15, 208(%rsp) + + movq ARG1, OLD_M + movq ARG2, OLD_N + movq ARG3, K + movq OLD_A, A + movq OLD_B, B + movq OLD_C, C + movq OLD_LDC, LDC + movsd OLD_OFFSET, %xmm12 +#else + movq STACKSIZE + 8(%rsp), LDC + movsd STACKSIZE + 16(%rsp), %xmm12 +#endif + + movq OLD_M, M + movq OLD_N, N + + subq $-16 * SIZE, A + subq $-16 * SIZE, B + + movsd %xmm12, OFFSET + movsd %xmm12, KK + + leaq (, LDC, SIZE), LDC + +#ifdef LN + leaq (, M, SIZE), %rax + addq %rax, C + imulq K, %rax + addq %rax, A +#endif + +#ifdef RT + leaq (, N, SIZE), %rax + imulq K, %rax + addq %rax, B + movq N, %rax + imulq LDC, %rax + addq %rax, C +#endif + +#ifdef RN + negq KK +#endif + +#ifdef RT + movq N, %rax + subq OFFSET, %rax + movq %rax, KK +#endif + + testq $1, N + je .L40 + +#if defined(LT) || defined(RN) + movq A, AO +#else + movq A, AORIG +#endif + +#ifdef RT + movq K, %rax + salq $0 + BASE_SHIFT, %rax + subq %rax, B + + subq LDC, C +#endif + + movq C, CO1 # coffset1 = c +#ifndef RT + addq LDC, C +#endif + +#ifdef LN + movq OFFSET, %rax + addq M, %rax + movq %rax, KK +#endif + +#ifdef LT + movq OFFSET, %rax + movq %rax, KK +#endif + + movq M, I + sarq $2, I # i = (m >> 2) + jle .L100 + ALIGN_4 + +.L91: +#ifdef LN + movq K, %rax + salq $2 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (BO, %rax, SIZE), BO +#endif + + movapd -16 * SIZE(AO), %xmm0 + pxor %xmm8, %xmm8 + movapd -8 * SIZE(AO), %xmm2 + pxor %xmm9, %xmm9 + movddup -16 * SIZE(BO), %xmm1 + pxor %xmm10, %xmm10 + movddup -15 * SIZE(BO), %xmm5 + pxor %xmm11, %xmm11 + movddup -14 * SIZE(BO), %xmm3 + +#ifndef LN + prefetchw 3 * SIZE(CO1) +#else + prefetchw -8 * SIZE(CO1) +#endif + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 1), BO + negq %rax + NOBRANCH + je .L96 + ALIGN_4 + +.L92: + mulpd %xmm1, %xmm0 + mulpd -14 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm8 + movapd -12 * SIZE(AO, %rax, 4), %xmm0 + addpd %xmm1, %xmm9 + movddup -12 * SIZE(BO, %rax, 1), %xmm1 + mulpd %xmm5, %xmm0 + mulpd -10 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm0, %xmm10 + movapd (AO, %rax, 4), %xmm0 + addpd %xmm5, %xmm11 + movddup -13 * SIZE(BO, %rax, 1), %xmm5 + mulpd %xmm3, %xmm2 + mulpd -6 * SIZE(AO, %rax, 4), %xmm3 + addpd %xmm2, %xmm8 + movapd -4 * SIZE(AO, %rax, 4), %xmm2 + addpd %xmm3, %xmm9 + movddup -10 * SIZE(BO, %rax, 1), %xmm3 + mulpd %xmm5, %xmm2 + mulpd -2 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm2, %xmm10 + movapd 8 * SIZE(AO, %rax, 4), %xmm2 + addpd %xmm5, %xmm11 + movddup -11 * SIZE(BO, %rax, 1), %xmm5 + + addq $4 * SIZE, %rax + BRANCH + jl .L92 + ALIGN_4 + +.L96: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L99 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 1), BO + negq %rax + ALIGN_4 + +.L97: + mulpd %xmm1, %xmm0 + mulpd -14 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm8 + movapd -12 * SIZE(AO, %rax, 4), %xmm0 + addpd %xmm1, %xmm9 + movddup -15 * SIZE(BO, %rax, 1), %xmm1 + + addq $SIZE, %rax + jl .L97 + ALIGN_4 +.L99: + addpd %xmm10, %xmm8 + addpd %xmm11, %xmm9 + +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $4, %rax +#else + subq $1, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 4), AO + leaq (B, %rax, 1), BO +#endif + +#if defined(LN) || defined(LT) + movapd -16 * SIZE(BO), %xmm10 + movapd -14 * SIZE(BO), %xmm11 + + subpd %xmm8, %xmm10 + subpd %xmm9, %xmm11 +#else + movapd -16 * SIZE(AO), %xmm10 + movapd -14 * SIZE(AO), %xmm11 + + subpd %xmm8, %xmm10 + subpd %xmm9, %xmm11 +#endif + +#ifdef LN + movapd %xmm10, %xmm8 + unpckhpd %xmm8, %xmm8 + + movapd %xmm11, %xmm9 + unpckhpd %xmm9, %xmm9 + + movsd -1 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm9 + + movsd -2 * SIZE(AO), %xmm13 + mulsd %xmm9, %xmm13 + subsd %xmm13, %xmm11 + movsd -3 * SIZE(AO), %xmm14 + mulsd %xmm9, %xmm14 + subsd %xmm14, %xmm8 + movsd -4 * SIZE(AO), %xmm15 + mulsd %xmm9, %xmm15 + subsd %xmm15, %xmm10 + + movsd -6 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm11 + + movsd -7 * SIZE(AO), %xmm13 + mulsd %xmm11, %xmm13 + subsd %xmm13, %xmm8 + movsd -8 * SIZE(AO), %xmm14 + mulsd %xmm11, %xmm14 + subsd %xmm14, %xmm10 + + movsd -11 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm8 + + movsd -12 * SIZE(AO), %xmm13 + mulsd %xmm8, %xmm13 + subsd %xmm13, %xmm10 + + movsd -16 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm10 + + unpcklpd %xmm8, %xmm10 + unpcklpd %xmm9, %xmm11 +#endif + +#ifdef LT + movapd %xmm10, %xmm8 + unpckhpd %xmm8, %xmm8 + + movapd %xmm11, %xmm9 + unpckhpd %xmm9, %xmm9 + + movsd -16 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm10 + + movsd -15 * SIZE(AO), %xmm13 + mulsd %xmm10, %xmm13 + subsd %xmm13, %xmm8 + movsd -14 * SIZE(AO), %xmm14 + mulsd %xmm10, %xmm14 + subsd %xmm14, %xmm11 + movsd -13 * SIZE(AO), %xmm15 + mulsd %xmm10, %xmm15 + subsd %xmm15, %xmm9 + + movsd -11 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm8 + + movsd -10 * SIZE(AO), %xmm13 + mulsd %xmm8, %xmm13 + subsd %xmm13, %xmm11 + movsd -9 * SIZE(AO), %xmm14 + mulsd %xmm8, %xmm14 + subsd %xmm14, %xmm9 + + movsd -6 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm11 + + movsd -5 * SIZE(AO), %xmm13 + mulsd %xmm11, %xmm13 + subsd %xmm13, %xmm9 + + movsd -1 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm9 + + unpcklpd %xmm8, %xmm10 + unpcklpd %xmm9, %xmm11 +#endif + +#ifdef RN + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm10 + mulpd %xmm8, %xmm11 +#endif + +#ifdef RT + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm10 + mulpd %xmm8, %xmm11 +#endif + +#ifdef LN + subq $4 * SIZE, CO1 +#endif + + movlpd %xmm10, 0 * SIZE(CO1) + movhpd %xmm10, 1 * SIZE(CO1) + movlpd %xmm11, 2 * SIZE(CO1) + movhpd %xmm11, 3 * SIZE(CO1) + +#if defined(LN) || defined(LT) + movaps %xmm10, -16 * SIZE(BO) + movaps %xmm11, -14 * SIZE(BO) +#else + movaps %xmm10, -16 * SIZE(AO) + movaps %xmm11, -14 * SIZE(AO) +#endif + +#ifndef LN + addq $4 * SIZE, CO1 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 4), AO + addq %rax, BO +#endif + +#ifdef LN + subq $4, KK +#endif + +#ifdef LT + addq $4, KK +#endif + +#ifdef RT + movq K, %rax + salq $2 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + + decq I # i -- + jg .L91 + ALIGN_4 + +.L100: + testq $2, M + je .L110 + +#ifdef LN + movq K, %rax + salq $1 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (BO, %rax, SIZE), BO +#endif + + movddup -16 * SIZE(BO), %xmm0 + pxor %xmm8, %xmm8 + movddup -15 * SIZE(BO), %xmm1 + pxor %xmm9, %xmm9 + movddup -14 * SIZE(BO), %xmm2 + pxor %xmm10, %xmm10 + movddup -13 * SIZE(BO), %xmm3 + pxor %xmm11, %xmm11 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 1), BO + negq %rax + NOBRANCH + je .L106 + ALIGN_4 + +.L102: + mulpd -16 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm0, %xmm8 + movddup -12 * SIZE(BO, %rax, 1), %xmm0 + + mulpd -14 * SIZE(AO, %rax, 2), %xmm1 + addpd %xmm1, %xmm9 + movddup -11 * SIZE(BO, %rax, 1), %xmm1 + + mulpd -12 * SIZE(AO, %rax, 2), %xmm2 + addpd %xmm2, %xmm10 + movddup -10 * SIZE(BO, %rax, 1), %xmm2 + + mulpd -10 * SIZE(AO, %rax, 2), %xmm3 + addpd %xmm3, %xmm11 + movddup -9 * SIZE(BO, %rax, 1), %xmm3 + + addq $4 * SIZE, %rax + BRANCH + jl .L102 + ALIGN_4 + +.L106: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L109 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 1), BO + negq %rax + ALIGN_4 + +.L107: + movddup -16 * SIZE(BO, %rax, 1), %xmm0 + mulpd -16 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm0, %xmm8 + + addq $SIZE, %rax + jl .L107 + ALIGN_4 + +.L109: + addpd %xmm9, %xmm8 + addpd %xmm11, %xmm10 + addpd %xmm10, %xmm8 + +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $2, %rax +#else + subq $1, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 2), AO + leaq (B, %rax, 1), BO +#endif + +#if defined(LN) || defined(LT) + movapd -16 * SIZE(BO), %xmm10 + subpd %xmm8, %xmm10 +#else + movapd -16 * SIZE(AO), %xmm10 + subpd %xmm8, %xmm10 +#endif + +#ifdef LN + movapd %xmm10, %xmm8 + unpckhpd %xmm8, %xmm8 + + movsd -13 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm8 + + movsd -14 * SIZE(AO), %xmm13 + mulsd %xmm8, %xmm13 + subsd %xmm13, %xmm10 + + movsd -16 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm10 + + unpcklpd %xmm8, %xmm10 +#endif + +#ifdef LT + movapd %xmm10, %xmm8 + unpckhpd %xmm8, %xmm8 + + movsd -16 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm10 + + movsd -15 * SIZE(AO), %xmm13 + mulsd %xmm10, %xmm13 + subsd %xmm13, %xmm8 + + movsd -13 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm8 + + unpcklpd %xmm8, %xmm10 +#endif + +#ifdef RN + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm10 +#endif + +#ifdef RT + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm10 +#endif + +#ifdef LN + subq $2 * SIZE, CO1 +#endif + +#if defined(LN) || defined(LT) + movlpd %xmm10, 0 * SIZE(CO1) + movhpd %xmm10, 1 * SIZE(CO1) +#else + movlpd %xmm10, 0 * SIZE(CO1) + movhpd %xmm10, 1 * SIZE(CO1) +#endif + +#if defined(LN) || defined(LT) + movaps %xmm10, -16 * SIZE(BO) +#else + movaps %xmm10, -16 * SIZE(AO) +#endif + +#ifndef LN + addq $2 * SIZE, CO1 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 2), AO + addq %rax, BO +#endif + +#ifdef LN + subq $2, KK +#endif + +#ifdef LT + addq $2, KK +#endif + +#ifdef RT + movq K, %rax + salq $1 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + ALIGN_4 + +.L110: + testq $1, M + je .L119 + +#ifdef LN + movq K, %rax + salq $0 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (BO, %rax, SIZE), BO +#endif + + movapd -16 * SIZE(AO), %xmm0 + pxor %xmm8, %xmm8 + movapd -14 * SIZE(AO), %xmm1 + pxor %xmm9, %xmm9 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 1), BO + negq %rax + NOBRANCH + je .L116 + ALIGN_4 + +.L112: + mulpd -16 * SIZE(BO, %rax, 1), %xmm0 + addpd %xmm0, %xmm8 + movapd -12 * SIZE(AO, %rax, 1), %xmm0 + + mulpd -14 * SIZE(BO, %rax, 1), %xmm1 + addpd %xmm1, %xmm9 + movapd -10 * SIZE(AO, %rax, 1), %xmm1 + + addq $4 * SIZE, %rax + BRANCH + jl .L112 + ALIGN_4 + +.L116: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L118 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 1), BO + negq %rax + ALIGN_4 + +.L117: + mulsd -16 * SIZE(BO, %rax, 1), %xmm0 + addsd %xmm0, %xmm8 + movsd -15 * SIZE(AO, %rax, 1), %xmm0 + + addq $SIZE, %rax + jl .L117 + ALIGN_4 + +.L118: + addpd %xmm9, %xmm8 + haddpd %xmm8, %xmm8 + +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $1, %rax +#else + subq $1, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 1), AO + leaq (B, %rax, 1), BO +#endif + +#if defined(LN) || defined(LT) + movsd -16 * SIZE(BO), %xmm10 + subsd %xmm8, %xmm10 +#else + movsd -16 * SIZE(AO), %xmm10 + subsd %xmm8, %xmm10 +#endif + +#if defined(LN) || defined(LT) + movsd -16 * SIZE(AO), %xmm12 + mulsd %xmm12, %xmm10 +#endif + +#if defined(RN) || defined(RT) + movsd -16 * SIZE(BO), %xmm8 + mulsd %xmm8, %xmm10 +#endif + +#ifdef LN + subq $1 * SIZE, CO1 +#endif + + movsd %xmm10, 0 * SIZE(CO1) + +#if defined(LN) || defined(LT) + movlpd %xmm10, -16 * SIZE(BO) +#else + movlpd %xmm10, -16 * SIZE(AO) +#endif + +#ifndef LN + addq $1 * SIZE, CO1 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + addq %rax, AO + addq %rax, BO +#endif + +#ifdef LN + subq $1, KK +#endif + +#ifdef LT + addq $1, KK +#endif + +#ifdef RT + movq K, %rax + salq $0 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + ALIGN_4 + +.L119: +#ifdef LN + leaq (B, K, SIZE), B +#endif + +#if defined(LT) || defined(RN) + movq BO, B +#endif + +#ifdef RN + addq $1, KK +#endif + +#ifdef RT + subq $1, KK +#endif + ALIGN_4 + +.L40: + testq $2, N + je .L80 + +#if defined(LT) || defined(RN) + movq A, AO +#else + movq A, AORIG +#endif + +#ifdef RT + movq K, %rax + salq $1 + BASE_SHIFT, %rax + subq %rax, B + + leaq (, LDC, 2), %rax + subq %rax, C +#endif + + movq C, CO1 # coffset1 = c + leaq (C, LDC, 1), CO2 # coffset2 = c + ldc +#ifndef RT + leaq (C, LDC, 2), C +#endif + +#ifdef LN + movq OFFSET, %rax + addq M, %rax + movq %rax, KK +#endif + +#if defined(LT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq M, I + sarq $2, I # i = (m >> 2) + jle .L60 + ALIGN_4 + +.L51: +#ifdef LN + movq K, %rax + salq $2 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (, %rax, SIZE), %rax + leaq (BO, %rax, 2), BO +#endif + + movddup -16 * SIZE(BO), %xmm1 + movddup -15 * SIZE(BO), %xmm5 + pxor %xmm8, %xmm8 + movddup -12 * SIZE(BO), %xmm3 + pxor %xmm9, %xmm9 + movapd -16 * SIZE(AO), %xmm0 + pxor %xmm12, %xmm12 + movapd -8 * SIZE(AO), %xmm4 + pxor %xmm13, %xmm13 + +#ifndef LN + prefetchw 3 * SIZE(CO1) + movapd %xmm0, %xmm2 + prefetchw 5 * SIZE(CO2) +#else + prefetchw -4 * SIZE(CO1) + movapd %xmm0, %xmm2 + prefetchw -4 * SIZE(CO2) +#endif + + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 2), BO + negq %rax + NOBRANCH + je .L56 + ALIGN_4 + +.L52: + mulpd %xmm1, %xmm0 + mulpd -14 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm8 + movapd -12 * SIZE(AO, %rax, 4), %xmm0 + addpd %xmm1, %xmm12 + movddup -14 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm5, %xmm2 + mulpd -14 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm2, %xmm9 + addpd %xmm5, %xmm13 + movddup -13 * SIZE(BO, %rax, 2), %xmm5 + movapd %xmm0, %xmm2 + mulpd %xmm1, %xmm0 + mulpd -10 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm8 + movapd (AO, %rax, 4), %xmm0 + addpd %xmm1, %xmm12 + movddup -8 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm5, %xmm2 + mulpd -10 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm2, %xmm9 + addpd %xmm5, %xmm13 + movddup -11 * SIZE(BO, %rax, 2), %xmm5 + movapd %xmm4, %xmm2 + mulpd %xmm3, %xmm4 + mulpd -6 * SIZE(AO, %rax, 4), %xmm3 + addpd %xmm4, %xmm8 + movapd -4 * SIZE(AO, %rax, 4), %xmm4 + addpd %xmm3, %xmm12 + movddup -10 * SIZE(BO, %rax, 2), %xmm3 + mulpd %xmm5, %xmm2 + mulpd -6 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm2, %xmm9 + addpd %xmm5, %xmm13 + movddup -9 * SIZE(BO, %rax, 2), %xmm5 + movapd %xmm4, %xmm2 + mulpd %xmm3, %xmm4 + mulpd -2 * SIZE(AO, %rax, 4), %xmm3 + addpd %xmm4, %xmm8 + movapd 8 * SIZE(AO, %rax, 4), %xmm4 + addpd %xmm3, %xmm12 + movddup -4 * SIZE(BO, %rax, 2), %xmm3 + mulpd %xmm5, %xmm2 + mulpd -2 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm2, %xmm9 + addpd %xmm5, %xmm13 + movddup -7 * SIZE(BO, %rax, 2), %xmm5 + movapd %xmm0, %xmm2 + + addq $4 * SIZE, %rax + BRANCH + jl .L52 + ALIGN_4 + +.L56: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L59 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 2), BO + negq %rax + ALIGN_4 + +.L57: + mulpd %xmm1, %xmm0 + mulpd -14 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm8 + movapd -12 * SIZE(AO, %rax, 4), %xmm0 + addpd %xmm1, %xmm12 + movddup -14 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm5, %xmm2 + mulpd -14 * SIZE(AO, %rax, 4), %xmm5 + addpd %xmm2, %xmm9 + addpd %xmm5, %xmm13 + movddup -13 * SIZE(BO, %rax, 2), %xmm5 + movapd %xmm0, %xmm2 + + addq $SIZE, %rax + jl .L57 + ALIGN_4 + +.L59: +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $4, %rax +#else + subq $2, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 4), AO + leaq (B, %rax, 2), BO +#endif + +#if defined(LN) || defined(LT) + movapd %xmm8, %xmm0 + unpcklpd %xmm9, %xmm8 + unpckhpd %xmm9, %xmm0 + + movapd %xmm12, %xmm4 + unpcklpd %xmm13, %xmm12 + unpckhpd %xmm13, %xmm4 + + movapd -16 * SIZE(BO), %xmm9 + movapd -14 * SIZE(BO), %xmm13 + movapd -12 * SIZE(BO), %xmm1 + movapd -10 * SIZE(BO), %xmm5 + + subpd %xmm8, %xmm9 + subpd %xmm0, %xmm13 + subpd %xmm12, %xmm1 + subpd %xmm4, %xmm5 +#else + movapd -16 * SIZE(AO), %xmm0 + movapd -14 * SIZE(AO), %xmm1 + movapd -12 * SIZE(AO), %xmm2 + movapd -10 * SIZE(AO), %xmm3 + + subpd %xmm8, %xmm0 + subpd %xmm12, %xmm1 + subpd %xmm9, %xmm2 + subpd %xmm13, %xmm3 +#endif + +#ifdef LN + movddup -1 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm5 + movddup -2 * SIZE(AO), %xmm10 + mulpd %xmm5, %xmm10 + subpd %xmm10, %xmm1 + movddup -3 * SIZE(AO), %xmm12 + mulpd %xmm5, %xmm12 + subpd %xmm12, %xmm13 + movddup -4 * SIZE(AO), %xmm14 + mulpd %xmm5, %xmm14 + subpd %xmm14, %xmm9 + + movddup -6 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm1 + movddup -7 * SIZE(AO), %xmm10 + mulpd %xmm1, %xmm10 + subpd %xmm10, %xmm13 + movddup -8 * SIZE(AO), %xmm12 + mulpd %xmm1, %xmm12 + subpd %xmm12, %xmm9 + + movddup -11 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + movddup -12 * SIZE(AO), %xmm10 + mulpd %xmm13, %xmm10 + subpd %xmm10, %xmm9 + + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 +#endif + +#ifdef LT + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 + movddup -15 * SIZE(AO), %xmm10 + mulpd %xmm9, %xmm10 + subpd %xmm10, %xmm13 + movddup -14 * SIZE(AO), %xmm12 + mulpd %xmm9, %xmm12 + subpd %xmm12, %xmm1 + movddup -13 * SIZE(AO), %xmm14 + mulpd %xmm9, %xmm14 + subpd %xmm14, %xmm5 + + + movddup -11 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + + movddup -10 * SIZE(AO), %xmm10 + mulpd %xmm13, %xmm10 + subpd %xmm10, %xmm1 + movddup -9 * SIZE(AO), %xmm12 + mulpd %xmm13, %xmm12 + subpd %xmm12, %xmm5 + + movddup -6 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm1 + movddup -5 * SIZE(AO), %xmm10 + mulpd %xmm1, %xmm10 + subpd %xmm10, %xmm5 + + movddup -1 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm5 +#endif + +#ifdef RN + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 + mulpd %xmm8, %xmm1 + + movddup -15 * SIZE(BO), %xmm9 + mulpd %xmm0, %xmm9 + subpd %xmm9, %xmm2 + movddup -15 * SIZE(BO), %xmm9 + mulpd %xmm1, %xmm9 + subpd %xmm9, %xmm3 + + movddup -13 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + mulpd %xmm8, %xmm3 +#endif + +#ifdef RT + movddup -13 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + mulpd %xmm8, %xmm3 + + movddup -14 * SIZE(BO), %xmm9 + mulpd %xmm2, %xmm9 + subpd %xmm9, %xmm0 + movddup -14 * SIZE(BO), %xmm9 + mulpd %xmm3, %xmm9 + subpd %xmm9, %xmm1 + + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 + mulpd %xmm8, %xmm1 +#endif + +#ifdef LN + subq $4 * SIZE, CO1 + subq $4 * SIZE, CO2 +#endif + +#if defined(LN) || defined(LT) + movlpd %xmm9, 0 * SIZE(CO1) + movlpd %xmm13, 1 * SIZE(CO1) + movlpd %xmm1, 2 * SIZE(CO1) + movlpd %xmm5, 3 * SIZE(CO1) + + movhpd %xmm9, 0 * SIZE(CO2) + movhpd %xmm13, 1 * SIZE(CO2) + movhpd %xmm1, 2 * SIZE(CO2) + movhpd %xmm5, 3 * SIZE(CO2) +#else + movlpd %xmm0, 0 * SIZE(CO1) + movhpd %xmm0, 1 * SIZE(CO1) + movlpd %xmm1, 2 * SIZE(CO1) + movhpd %xmm1, 3 * SIZE(CO1) + + movlpd %xmm2, 0 * SIZE(CO2) + movhpd %xmm2, 1 * SIZE(CO2) + movlpd %xmm3, 2 * SIZE(CO2) + movhpd %xmm3, 3 * SIZE(CO2) +#endif + +#if defined(LN) || defined(LT) + movaps %xmm9, -16 * SIZE(BO) + movaps %xmm13,-14 * SIZE(BO) + movaps %xmm1, -12 * SIZE(BO) + movaps %xmm5, -10 * SIZE(BO) +#else + movaps %xmm0, -16 * SIZE(AO) + movaps %xmm1, -14 * SIZE(AO) + movaps %xmm2, -12 * SIZE(AO) + movaps %xmm3, -10 * SIZE(AO) +#endif + +#ifndef LN + addq $4 * SIZE, CO1 + addq $4 * SIZE, CO2 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 2), BO +#endif + +#ifdef LN + subq $4, KK +#endif + +#ifdef LT + addq $4, KK +#endif + +#ifdef RT + movq K, %rax + salq $2 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + + decq I # i -- + jg .L51 + ALIGN_4 + +.L60: + testq $2, M + je .L70 + +#ifdef LN + movq K, %rax + salq $1 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (, %rax, SIZE), %rax + leaq (BO, %rax, 2), BO +#endif + + movapd -16 * SIZE(AO), %xmm0 + pxor %xmm8, %xmm8 + movapd -12 * SIZE(AO), %xmm2 + pxor %xmm9, %xmm9 + movddup -16 * SIZE(BO), %xmm1 + pxor %xmm10, %xmm10 + movddup -15 * SIZE(BO), %xmm3 + pxor %xmm11, %xmm11 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 2), BO + negq %rax + NOBRANCH + je .L66 + ALIGN_4 + +.L62: + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm8 + movddup -14 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm0, %xmm3 + movapd -14 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm3, %xmm9 + movddup -13 * SIZE(BO, %rax, 2), %xmm3 + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm10 + movddup -12 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm0, %xmm3 + movapd -8 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm3, %xmm11 + movddup -11 * SIZE(BO, %rax, 2), %xmm3 + mulpd %xmm2, %xmm1 + addpd %xmm1, %xmm8 + movddup -10 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm2, %xmm3 + movapd -10 * SIZE(AO, %rax, 2), %xmm2 + addpd %xmm3, %xmm9 + movddup -9 * SIZE(BO, %rax, 2), %xmm3 + mulpd %xmm2, %xmm1 + addpd %xmm1, %xmm10 + movddup -8 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm2, %xmm3 + movapd -4 * SIZE(AO, %rax, 2), %xmm2 + addpd %xmm3, %xmm11 + movddup -7 * SIZE(BO, %rax, 2), %xmm3 + + addq $4 * SIZE, %rax + BRANCH + jl .L62 + ALIGN_4 + +.L66: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L69 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 2), BO + negq %rax + ALIGN_4 + +.L67: + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm8 + movddup -14 * SIZE(BO, %rax, 2), %xmm1 + mulpd %xmm0, %xmm3 + movapd -14 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm3, %xmm9 + movddup -13 * SIZE(BO, %rax, 2), %xmm3 + + addq $SIZE, %rax + jl .L67 + ALIGN_4 + +.L69: + addpd %xmm10, %xmm8 + addpd %xmm11, %xmm9 + +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $2, %rax +#else + subq $2, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 2), AO + leaq (B, %rax, 2), BO +#endif + +#if defined(LN) || defined(LT) + movapd %xmm8, %xmm0 + unpcklpd %xmm9, %xmm8 + unpckhpd %xmm9, %xmm0 + + movapd -16 * SIZE(BO), %xmm9 + movapd -14 * SIZE(BO), %xmm13 + + subpd %xmm8, %xmm9 + subpd %xmm0, %xmm13 +#else + movapd -16 * SIZE(AO), %xmm0 + movapd -14 * SIZE(AO), %xmm2 + + subpd %xmm8, %xmm0 + subpd %xmm9, %xmm2 +#endif + + +#ifdef LN + movddup -13 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + + movddup -14 * SIZE(AO), %xmm10 + mulpd %xmm13, %xmm10 + subpd %xmm10, %xmm9 + + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 +#endif + +#ifdef LT + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 + + movddup -15 * SIZE(AO), %xmm10 + mulpd %xmm9, %xmm10 + subpd %xmm10, %xmm13 + + movddup -13 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 +#endif + +#ifdef RN + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 + + movddup -15 * SIZE(BO), %xmm9 + mulpd %xmm0, %xmm9 + subpd %xmm9, %xmm2 + + movddup -13 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 +#endif + +#ifdef RT + movddup -13 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + + movddup -14 * SIZE(BO), %xmm9 + mulpd %xmm2, %xmm9 + subpd %xmm9, %xmm0 + + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 +#endif + +#ifdef LN + subq $2 * SIZE, CO1 + subq $2 * SIZE, CO2 +#endif + +#if defined(LN) || defined(LT) + movlpd %xmm9, 0 * SIZE(CO1) + movlpd %xmm13, 1 * SIZE(CO1) + + movhpd %xmm9, 0 * SIZE(CO2) + movhpd %xmm13, 1 * SIZE(CO2) +#else + movlpd %xmm0, 0 * SIZE(CO1) + movhpd %xmm0, 1 * SIZE(CO1) + + movlpd %xmm2, 0 * SIZE(CO2) + movhpd %xmm2, 1 * SIZE(CO2) +#endif + +#if defined(LN) || defined(LT) + movaps %xmm9, -16 * SIZE(BO) + movaps %xmm13, -14 * SIZE(BO) +#else + movaps %xmm0, -16 * SIZE(AO) + movaps %xmm2, -14 * SIZE(AO) +#endif + +#ifndef LN + addq $2 * SIZE, CO1 + addq $2 * SIZE, CO2 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 2), BO +#endif + +#ifdef LN + subq $2, KK +#endif + +#ifdef LT + addq $2, KK +#endif + +#ifdef RT + movq K, %rax + salq $1 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + ALIGN_4 + +.L70: + testq $1, M + je .L79 + ALIGN_4 + +.L71: +#ifdef LN + movq K, %rax + salq $0 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + salq $1 + BASE_SHIFT, %rax + leaq (BO, %rax, 1), BO +#endif + + movddup -16 * SIZE(AO), %xmm0 + pxor %xmm8, %xmm8 + movddup -15 * SIZE(AO), %xmm1 + pxor %xmm9, %xmm9 + movddup -14 * SIZE(AO), %xmm2 + pxor %xmm10, %xmm10 + movddup -13 * SIZE(AO), %xmm3 + pxor %xmm11, %xmm11 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 2), BO + negq %rax + NOBRANCH + je .L76 + ALIGN_4 + +.L72: + mulpd -16 * SIZE(BO, %rax, 2), %xmm0 + addpd %xmm0, %xmm8 + movddup -12 * SIZE(AO, %rax, 1), %xmm0 + + mulpd -14 * SIZE(BO, %rax, 2), %xmm1 + addpd %xmm1, %xmm9 + movddup -11 * SIZE(AO, %rax, 1), %xmm1 + + mulpd -12 * SIZE(BO, %rax, 2), %xmm2 + addpd %xmm2, %xmm10 + movddup -10 * SIZE(AO, %rax, 1), %xmm2 + + mulpd -10 * SIZE(BO, %rax, 2), %xmm3 + addpd %xmm3, %xmm11 + movddup -9 * SIZE(AO, %rax, 1), %xmm3 + + addq $4 * SIZE, %rax + BRANCH + jl .L72 + ALIGN_4 + +.L76: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L78 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 2), BO + negq %rax + ALIGN_4 + +.L77: + mulpd -16 * SIZE(BO, %rax, 2), %xmm0 + addpd %xmm0, %xmm8 + movddup -15 * SIZE(AO, %rax, 1), %xmm0 + + addq $SIZE, %rax + jl .L77 + ALIGN_4 + +.L78: + addpd %xmm9, %xmm8 + addpd %xmm11, %xmm10 + addpd %xmm10, %xmm8 + +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $1, %rax +#else + subq $2, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 1), AO + leaq (B, %rax, 2), BO +#endif + +#if defined(LN) || defined(LT) + movapd -16 * SIZE(BO), %xmm2 +#else + movapd -16 * SIZE(AO), %xmm2 +#endif + + subpd %xmm8, %xmm2 + +#if defined(LN) || defined(LT) + movddup -16 * SIZE(AO), %xmm0 + + mulpd %xmm0, %xmm2 +#endif + +#ifdef RN + movapd %xmm2, %xmm0 + unpckhpd %xmm0, %xmm0 + + mulsd -16 * SIZE(BO), %xmm2 + movsd -15 * SIZE(BO), %xmm4 + mulsd %xmm2, %xmm4 + subsd %xmm4, %xmm0 + + mulsd -13 * SIZE(BO), %xmm0 + unpcklpd %xmm0, %xmm2 +#endif + +#ifdef RT + movapd %xmm2, %xmm0 + unpckhpd %xmm0, %xmm0 + + mulsd -13 * SIZE(BO), %xmm0 + + movlpd -14 * SIZE(BO), %xmm4 + mulsd %xmm0, %xmm4 + subsd %xmm4, %xmm2 + + mulsd -16 * SIZE(BO), %xmm2 + unpcklpd %xmm0, %xmm2 +#endif + +#ifdef LN + subq $1 * SIZE, CO1 + subq $1 * SIZE, CO2 +#endif + + movlpd %xmm2, 0 * SIZE(CO1) + movhpd %xmm2, 0 * SIZE(CO2) + +#if defined(LN) || defined(LT) + movaps %xmm2, -16 * SIZE(BO) +#else + movaps %xmm2, -16 * SIZE(AO) +#endif + +#ifndef LN + addq $1 * SIZE, CO1 + addq $1 * SIZE, CO2 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 2), BO +#endif + +#ifdef LN + subq $1, KK +#endif + +#ifdef LT + addq $1, KK +#endif + +#ifdef RT + movq K, %rax + salq $0 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + ALIGN_4 + +.L79: +#ifdef LN + leaq (, K, SIZE), %rax + leaq (B, %rax, 2), B +#endif + +#if defined(LT) || defined(RN) + movq BO, B +#endif + +#ifdef RN + addq $2, KK +#endif + +#ifdef RT + subq $2, KK +#endif + ALIGN_4 + +.L80: + movq N, J + sarq $2, J # j = (n >> 2) + jle .L999 + +.L01: +#if defined(LT) || defined(RN) + movq A, AO +#else + movq A, AORIG +#endif + +#ifdef RT + movq K, %rax + salq $2 + BASE_SHIFT, %rax + subq %rax, B + + leaq (, LDC, 4), %rax + subq %rax, C +#endif + + movq C, CO1 # coffset1 = c + leaq (C, LDC, 1), CO2 # coffset2 = c + ldc +#ifndef RT + leaq (C, LDC, 4), C +#endif + +#ifdef LN + movq OFFSET, %rax + addq M, %rax + movq %rax, KK +#endif + + movq K, %rax + salq $BASE_SHIFT + 2, %rax + movq B, BB + subq %rax, BB + +#if defined(LT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq M, I + sarq $2, I # i = (m >> 2) + jle .L20 + ALIGN_4 + +.L11: +#ifdef LN + movq K, %rax + salq $2 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (, %rax, SIZE), %rax + leaq (BO, %rax, 4), BO +#endif + + movapd -16 * SIZE(AO), %xmm0 + movddup -16 * SIZE(BO), %xmm1 + pxor %xmm8, %xmm8 + movddup -15 * SIZE(BO), %xmm3 + pxor %xmm9, %xmm9 + movapd -8 * SIZE(AO), %xmm4 + pxor %xmm10, %xmm10 + movddup -8 * SIZE(BO), %xmm5 + pxor %xmm11, %xmm11 + +#ifndef LN + prefetchw 3 * SIZE(CO1) + pxor %xmm12, %xmm12 + prefetchw 5 * SIZE(CO2) + pxor %xmm13, %xmm13 + prefetchw 3 * SIZE(CO1, LDC, 2) + pxor %xmm14, %xmm14 + prefetchw 5 * SIZE(CO2, LDC, 2) + pxor %xmm15, %xmm15 + movapd %xmm0, %xmm2 +#else + prefetchw -8 * SIZE(CO1) + pxor %xmm12, %xmm12 + prefetchw -8 * SIZE(CO2) + pxor %xmm13, %xmm13 + prefetchw -8 * SIZE(CO1, LDC, 2) + pxor %xmm14, %xmm14 + prefetchw -8 * SIZE(CO2, LDC, 2) + pxor %xmm15, %xmm15 + movapd %xmm0, %xmm2 +#endif + + subq $-16 * SIZE, BB + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + + andq $-8, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 4), BO + negq %rax + NOBRANCH + je .L15 + vmovups -16 * SIZE(AO, %rax, 4),%xmm6 + vmovddup -16 * SIZE(BO, %rax, 4), %xmm7 + vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 + + ALIGN_4 + +.L12: + + prefetcht0 A_PR1(AO,%rax,4) + prefetcht0 B_PR1(BO,%rax,4) + KERNEL1(16 * 0) + KERNEL2(16 * 0) + prefetcht0 A_PR1+64(AO,%rax,4) + prefetcht0 B_PR1+64(BO,%rax,4) + KERNEL3(16 * 0) + KERNEL4(16 * 0) + prefetcht0 A_PR1+128(AO,%rax,4) + prefetcht0 B_PR1+128(BO,%rax,4) + KERNEL5(16 * 0) + KERNEL6(16 * 0) + prefetcht0 A_PR1+192(AO,%rax,4) + prefetcht0 B_PR1+192(BO,%rax,4) + KERNEL7(16 * 0) + KERNEL8(16 * 0) + + addq $8 * SIZE, %rax + jl .L12 + ALIGN_4 + +.L15: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + testq $4, %rax + je .L16 + xorq %rax, %rax + ALIGN_4 + + KERNEL_SUB1(16 * 0) + KERNEL_SUB2(16 * 0) + KERNEL_SUB3(16 * 0) + KERNEL_SUB4(16 * 0) + + subq $-16 * SIZE, BO + subq $-16 * SIZE, AO + ALIGN_4 + +.L16: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L19 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 4), BO + negq %rax + ALIGN_4 + +.L17: + mulpd %xmm1, %xmm0 + mulpd -14 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm8 + movapd %xmm2, %xmm0 + addpd %xmm1, %xmm12 + movddup -14 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm3, %xmm2 + mulpd -14 * SIZE(AO, %rax, 4), %xmm3 + addpd %xmm2, %xmm9 + movapd %xmm0, %xmm2 + addpd %xmm3, %xmm13 + movddup -13 * SIZE(BO, %rax, 4), %xmm3 + mulpd %xmm1, %xmm0 + mulpd -14 * SIZE(AO, %rax, 4), %xmm1 + addpd %xmm0, %xmm10 + movapd -12 * SIZE(AO, %rax, 4), %xmm0 + addpd %xmm1, %xmm14 + movddup -12 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm3, %xmm2 + mulpd -14 * SIZE(AO, %rax, 4), %xmm3 + addpd %xmm2, %xmm11 + addpd %xmm3, %xmm15 + movddup -11 * SIZE(BO, %rax, 4), %xmm3 + movapd %xmm0, %xmm2 + + addq $SIZE, %rax + jl .L17 + ALIGN_4 + +.L19: +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $4, %rax +#else + subq $4, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 4), AO + leaq (B, %rax, 4), BO +#endif + +#if defined(LN) || defined(LT) + movapd %xmm8, %xmm0 + unpcklpd %xmm9, %xmm8 + unpckhpd %xmm9, %xmm0 + + movapd %xmm10, %xmm2 + unpcklpd %xmm11, %xmm10 + unpckhpd %xmm11, %xmm2 + + movapd %xmm12, %xmm4 + unpcklpd %xmm13, %xmm12 + unpckhpd %xmm13, %xmm4 + + movapd %xmm14, %xmm6 + unpcklpd %xmm15, %xmm14 + unpckhpd %xmm15, %xmm6 + + movapd -16 * SIZE(BO), %xmm9 + movapd -14 * SIZE(BO), %xmm11 + movapd -12 * SIZE(BO), %xmm13 + movapd -10 * SIZE(BO), %xmm15 + movapd -8 * SIZE(BO), %xmm1 + movapd -6 * SIZE(BO), %xmm3 + movapd -4 * SIZE(BO), %xmm5 + movapd -2 * SIZE(BO), %xmm7 + + subpd %xmm8, %xmm9 + subpd %xmm10, %xmm11 + subpd %xmm0, %xmm13 + subpd %xmm2, %xmm15 + subpd %xmm12, %xmm1 + subpd %xmm14, %xmm3 + subpd %xmm4, %xmm5 + subpd %xmm6, %xmm7 +#else + movapd -16 * SIZE(AO), %xmm0 + movapd -14 * SIZE(AO), %xmm1 + movapd -12 * SIZE(AO), %xmm2 + movapd -10 * SIZE(AO), %xmm3 + + movapd -8 * SIZE(AO), %xmm4 + movapd -6 * SIZE(AO), %xmm5 + movapd -4 * SIZE(AO), %xmm6 + movapd -2 * SIZE(AO), %xmm7 + + subpd %xmm8, %xmm0 + subpd %xmm12, %xmm1 + subpd %xmm9, %xmm2 + subpd %xmm13, %xmm3 + subpd %xmm10, %xmm4 + subpd %xmm14, %xmm5 + subpd %xmm11, %xmm6 + subpd %xmm15, %xmm7 +#endif + +#ifdef LN + movddup -1 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm5 + mulpd %xmm8, %xmm7 + + movddup -2 * SIZE(AO), %xmm10 + mulpd %xmm5, %xmm10 + subpd %xmm10, %xmm1 + movddup -2 * SIZE(AO), %xmm10 + mulpd %xmm7, %xmm10 + subpd %xmm10, %xmm3 + + movddup -3 * SIZE(AO), %xmm12 + mulpd %xmm5, %xmm12 + subpd %xmm12, %xmm13 + movddup -3 * SIZE(AO), %xmm12 + mulpd %xmm7, %xmm12 + subpd %xmm12, %xmm15 + + movddup -4 * SIZE(AO), %xmm14 + mulpd %xmm5, %xmm14 + subpd %xmm14, %xmm9 + movddup -4 * SIZE(AO), %xmm14 + mulpd %xmm7, %xmm14 + subpd %xmm14, %xmm11 + + movddup -6 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm1 + mulpd %xmm8, %xmm3 + + movddup -7 * SIZE(AO), %xmm10 + mulpd %xmm1, %xmm10 + subpd %xmm10, %xmm13 + movddup -7 * SIZE(AO), %xmm10 + mulpd %xmm3, %xmm10 + subpd %xmm10, %xmm15 + + movddup -8 * SIZE(AO), %xmm12 + mulpd %xmm1, %xmm12 + subpd %xmm12, %xmm9 + movddup -8 * SIZE(AO), %xmm12 + mulpd %xmm3, %xmm12 + subpd %xmm12, %xmm11 + + movddup -11 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + mulpd %xmm8, %xmm15 + + movddup -12 * SIZE(AO), %xmm10 + mulpd %xmm13, %xmm10 + subpd %xmm10, %xmm9 + movddup -12 * SIZE(AO), %xmm10 + mulpd %xmm15, %xmm10 + subpd %xmm10, %xmm11 + + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 + mulpd %xmm8, %xmm11 +#endif + +#ifdef LT + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 + mulpd %xmm8, %xmm11 + + movddup -15 * SIZE(AO), %xmm10 + mulpd %xmm9, %xmm10 + subpd %xmm10, %xmm13 + + movddup -15 * SIZE(AO), %xmm10 + mulpd %xmm11, %xmm10 + subpd %xmm10, %xmm15 + + movddup -14 * SIZE(AO), %xmm12 + mulpd %xmm9, %xmm12 + subpd %xmm12, %xmm1 + movddup -14 * SIZE(AO), %xmm12 + mulpd %xmm11, %xmm12 + subpd %xmm12, %xmm3 + + movddup -13 * SIZE(AO), %xmm14 + mulpd %xmm9, %xmm14 + subpd %xmm14, %xmm5 + movddup -13 * SIZE(AO), %xmm14 + mulpd %xmm11, %xmm14 + subpd %xmm14, %xmm7 + + movddup -11 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + mulpd %xmm8, %xmm15 + + movddup -10 * SIZE(AO), %xmm10 + mulpd %xmm13, %xmm10 + subpd %xmm10, %xmm1 + movddup -10 * SIZE(AO), %xmm10 + mulpd %xmm15, %xmm10 + subpd %xmm10, %xmm3 + + movddup -9 * SIZE(AO), %xmm12 + mulpd %xmm13, %xmm12 + subpd %xmm12, %xmm5 + movddup -9 * SIZE(AO), %xmm12 + mulpd %xmm15, %xmm12 + subpd %xmm12, %xmm7 + + movddup -6 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm1 + mulpd %xmm8, %xmm3 + + movddup -5 * SIZE(AO), %xmm10 + mulpd %xmm1, %xmm10 + subpd %xmm10, %xmm5 + movddup -5 * SIZE(AO), %xmm10 + mulpd %xmm3, %xmm10 + subpd %xmm10, %xmm7 + + movddup -1 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm5 + mulpd %xmm8, %xmm7 +#endif + +#ifdef RN + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 + mulpd %xmm8, %xmm1 + + movddup -15 * SIZE(BO), %xmm9 + mulpd %xmm0, %xmm9 + subpd %xmm9, %xmm2 + movddup -15 * SIZE(BO), %xmm9 + mulpd %xmm1, %xmm9 + subpd %xmm9, %xmm3 + + movddup -14 * SIZE(BO), %xmm10 + mulpd %xmm0, %xmm10 + subpd %xmm10, %xmm4 + movddup -14 * SIZE(BO), %xmm10 + mulpd %xmm1, %xmm10 + subpd %xmm10, %xmm5 + + movddup -13 * SIZE(BO), %xmm11 + mulpd %xmm0, %xmm11 + subpd %xmm11, %xmm6 + movddup -13 * SIZE(BO), %xmm11 + mulpd %xmm1, %xmm11 + subpd %xmm11, %xmm7 + + movddup -11 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + mulpd %xmm8, %xmm3 + + movddup -10 * SIZE(BO), %xmm9 + mulpd %xmm2, %xmm9 + subpd %xmm9, %xmm4 + movddup -10 * SIZE(BO), %xmm9 + mulpd %xmm3, %xmm9 + subpd %xmm9, %xmm5 + + movddup -9 * SIZE(BO), %xmm10 + mulpd %xmm2, %xmm10 + subpd %xmm10, %xmm6 + movddup -9 * SIZE(BO), %xmm10 + mulpd %xmm3, %xmm10 + subpd %xmm10, %xmm7 + + movddup -6 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm4 + mulpd %xmm8, %xmm5 + + movddup -5 * SIZE(BO), %xmm9 + mulpd %xmm4, %xmm9 + subpd %xmm9, %xmm6 + movddup -5 * SIZE(BO), %xmm9 + mulpd %xmm5, %xmm9 + subpd %xmm9, %xmm7 + + movddup -1 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm6 + mulpd %xmm8, %xmm7 +#endif + +#ifdef RT + movddup -1 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm6 + mulpd %xmm8, %xmm7 + + movddup -2 * SIZE(BO), %xmm9 + mulpd %xmm6, %xmm9 + subpd %xmm9, %xmm4 + movddup -2 * SIZE(BO), %xmm9 + mulpd %xmm7, %xmm9 + subpd %xmm9, %xmm5 + + movddup -3 * SIZE(BO), %xmm10 + mulpd %xmm6, %xmm10 + subpd %xmm10, %xmm2 + movddup -3 * SIZE(BO), %xmm10 + mulpd %xmm7, %xmm10 + subpd %xmm10, %xmm3 + + movddup -4 * SIZE(BO), %xmm11 + mulpd %xmm6, %xmm11 + subpd %xmm11, %xmm0 + movddup -4 * SIZE(BO), %xmm11 + mulpd %xmm7, %xmm11 + subpd %xmm11, %xmm1 + + movddup -6 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm4 + mulpd %xmm8, %xmm5 + + movddup -7 * SIZE(BO), %xmm9 + mulpd %xmm4, %xmm9 + subpd %xmm9, %xmm2 + movddup -7 * SIZE(BO), %xmm9 + mulpd %xmm5, %xmm9 + subpd %xmm9, %xmm3 + + movddup -8 * SIZE(BO), %xmm10 + mulpd %xmm4, %xmm10 + subpd %xmm10, %xmm0 + movddup -8 * SIZE(BO), %xmm10 + mulpd %xmm5, %xmm10 + subpd %xmm10, %xmm1 + + movddup -11 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + mulpd %xmm8, %xmm3 + + movddup -12 * SIZE(BO), %xmm9 + mulpd %xmm2, %xmm9 + subpd %xmm9, %xmm0 + movddup -12 * SIZE(BO), %xmm9 + mulpd %xmm3, %xmm9 + subpd %xmm9, %xmm1 + + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 + mulpd %xmm8, %xmm1 +#endif + +#ifdef LN + subq $4 * SIZE, CO1 + subq $4 * SIZE, CO2 +#endif + +#if defined(LN) || defined(LT) + movlpd %xmm9, 0 * SIZE(CO1) + movlpd %xmm13, 1 * SIZE(CO1) + movlpd %xmm1, 2 * SIZE(CO1) + movlpd %xmm5, 3 * SIZE(CO1) + + movhpd %xmm9, 0 * SIZE(CO2) + movhpd %xmm13, 1 * SIZE(CO2) + movhpd %xmm1, 2 * SIZE(CO2) + movhpd %xmm5, 3 * SIZE(CO2) + + movlpd %xmm11, 0 * SIZE(CO1, LDC, 2) + movlpd %xmm15, 1 * SIZE(CO1, LDC, 2) + movlpd %xmm3, 2 * SIZE(CO1, LDC, 2) + movlpd %xmm7, 3 * SIZE(CO1, LDC, 2) + + movhpd %xmm11, 0 * SIZE(CO2, LDC, 2) + movhpd %xmm15, 1 * SIZE(CO2, LDC, 2) + movhpd %xmm3, 2 * SIZE(CO2, LDC, 2) + movhpd %xmm7, 3 * SIZE(CO2, LDC, 2) +#else + movlpd %xmm0, 0 * SIZE(CO1) + movhpd %xmm0, 1 * SIZE(CO1) + movlpd %xmm1, 2 * SIZE(CO1) + movhpd %xmm1, 3 * SIZE(CO1) + + movlpd %xmm2, 0 * SIZE(CO2) + movhpd %xmm2, 1 * SIZE(CO2) + movlpd %xmm3, 2 * SIZE(CO2) + movhpd %xmm3, 3 * SIZE(CO2) + + movlpd %xmm4, 0 * SIZE(CO1, LDC, 2) + movhpd %xmm4, 1 * SIZE(CO1, LDC, 2) + movlpd %xmm5, 2 * SIZE(CO1, LDC, 2) + movhpd %xmm5, 3 * SIZE(CO1, LDC, 2) + + movlpd %xmm6, 0 * SIZE(CO2, LDC, 2) + movhpd %xmm6, 1 * SIZE(CO2, LDC, 2) + movlpd %xmm7, 2 * SIZE(CO2, LDC, 2) + movhpd %xmm7, 3 * SIZE(CO2, LDC, 2) +#endif + +#if defined(LN) || defined(LT) + movaps %xmm9, -16 * SIZE(BO) + movaps %xmm11, -14 * SIZE(BO) + movaps %xmm13, -12 * SIZE(BO) + movaps %xmm15, -10 * SIZE(BO) + movaps %xmm1, -8 * SIZE(BO) + movaps %xmm3, -6 * SIZE(BO) + movaps %xmm5, -4 * SIZE(BO) + movaps %xmm7, -2 * SIZE(BO) +#else + movaps %xmm0, -16 * SIZE(AO) + movaps %xmm1, -14 * SIZE(AO) + movaps %xmm2, -12 * SIZE(AO) + movaps %xmm3, -10 * SIZE(AO) + movaps %xmm4, -8 * SIZE(AO) + movaps %xmm5, -6 * SIZE(AO) + movaps %xmm6, -4 * SIZE(AO) + movaps %xmm7, -2 * SIZE(AO) +#endif + +#ifndef LN + addq $4 * SIZE, CO1 + addq $4 * SIZE, CO2 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 4), AO + leaq (BO, %rax, 4), BO +#endif + +#ifdef LN + subq $4, KK +#endif + +#ifdef LT + addq $4, KK +#endif + +#ifdef RT + movq K, %rax + salq $2 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + + decq I # i -- + jg .L11 + ALIGN_4 + +.L20: + testq $3, M + je .L39 + + testq $2, M + je .L30 + ALIGN_4 + +.L21: +#ifdef LN + movq K, %rax + salq $1 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (, %rax, SIZE), %rax + leaq (BO, %rax, 4), BO +#endif + + movapd -16 * SIZE(AO), %xmm0 + pxor %xmm8, %xmm8 + movapd -12 * SIZE(AO), %xmm2 + pxor %xmm9, %xmm9 + movddup -16 * SIZE(BO), %xmm1 + pxor %xmm10, %xmm10 + movddup -15 * SIZE(BO), %xmm5 + pxor %xmm11, %xmm11 + movddup -8 * SIZE(BO), %xmm3 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 4), BO + negq %rax + NOBRANCH + je .L26 + ALIGN_4 + +.L22: + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm8 + movddup -14 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm0, %xmm5 + addpd %xmm5, %xmm9 + movddup -13 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm10 + movddup -12 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm0, %xmm5 + movapd -14 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm5, %xmm11 + movddup -11 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm8 + movddup -10 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm0, %xmm5 + addpd %xmm5, %xmm9 + movddup -9 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm10 + movddup (BO, %rax, 4), %xmm1 + mulpd %xmm0, %xmm5 + movapd -8 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm5, %xmm11 + movddup -7 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm2, %xmm3 + addpd %xmm3, %xmm8 + movddup -6 * SIZE(BO, %rax, 4), %xmm3 + mulpd %xmm2, %xmm5 + addpd %xmm5, %xmm9 + movddup -5 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm2, %xmm3 + addpd %xmm3, %xmm10 + movddup -4 * SIZE(BO, %rax, 4), %xmm3 + mulpd %xmm2, %xmm5 + movapd -10 * SIZE(AO, %rax, 2), %xmm2 + addpd %xmm5, %xmm11 + movddup -3 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm2, %xmm3 + addpd %xmm3, %xmm8 + movddup -2 * SIZE(BO, %rax, 4), %xmm3 + mulpd %xmm2, %xmm5 + addpd %xmm5, %xmm9 + movddup -1 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm2, %xmm3 + addpd %xmm3, %xmm10 + movddup 8 * SIZE(BO, %rax, 4), %xmm3 + mulpd %xmm2, %xmm5 + movapd -4 * SIZE(AO, %rax, 2), %xmm2 + addpd %xmm5, %xmm11 + movddup 1 * SIZE(BO, %rax, 4), %xmm5 + + addq $4 * SIZE, %rax + BRANCH + jl .L22 + ALIGN_4 + +.L26: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L29 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 4), BO + negq %rax + ALIGN_4 + +.L27: + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm8 + movddup -14 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm0, %xmm5 + addpd %xmm5, %xmm9 + movddup -13 * SIZE(BO, %rax, 4), %xmm5 + mulpd %xmm0, %xmm1 + addpd %xmm1, %xmm10 + movddup -12 * SIZE(BO, %rax, 4), %xmm1 + mulpd %xmm0, %xmm5 + movapd -14 * SIZE(AO, %rax, 2), %xmm0 + addpd %xmm5, %xmm11 + movddup -11 * SIZE(BO, %rax, 4), %xmm5 + + addq $SIZE, %rax + jl .L27 + ALIGN_4 + +.L29: +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $2, %rax +#else + subq $4, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 2), AO + leaq (B, %rax, 4), BO +#endif + +#if defined(LN) || defined(LT) + movapd %xmm8, %xmm0 + unpcklpd %xmm9, %xmm8 + unpckhpd %xmm9, %xmm0 + + movapd %xmm10, %xmm2 + unpcklpd %xmm11, %xmm10 + unpckhpd %xmm11, %xmm2 + + movapd -16 * SIZE(BO), %xmm9 + movapd -14 * SIZE(BO), %xmm11 + movapd -12 * SIZE(BO), %xmm13 + movapd -10 * SIZE(BO), %xmm15 + + subpd %xmm8, %xmm9 + subpd %xmm10, %xmm11 + subpd %xmm0, %xmm13 + subpd %xmm2, %xmm15 +#else + movapd -16 * SIZE(AO), %xmm0 + movapd -14 * SIZE(AO), %xmm2 + movapd -12 * SIZE(AO), %xmm4 + movapd -10 * SIZE(AO), %xmm6 + + subpd %xmm8, %xmm0 + subpd %xmm9, %xmm2 + subpd %xmm10, %xmm4 + subpd %xmm11, %xmm6 +#endif + +#ifdef LN + movddup -13 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + mulpd %xmm8, %xmm15 + + movddup -14 * SIZE(AO), %xmm10 + mulpd %xmm13, %xmm10 + subpd %xmm10, %xmm9 + movddup -14 * SIZE(AO), %xmm10 + mulpd %xmm15, %xmm10 + subpd %xmm10, %xmm11 + + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 + mulpd %xmm8, %xmm11 +#endif + +#ifdef LT + movddup -16 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm9 + mulpd %xmm8, %xmm11 + + movddup -15 * SIZE(AO), %xmm10 + mulpd %xmm9, %xmm10 + subpd %xmm10, %xmm13 + movddup -15 * SIZE(AO), %xmm10 + mulpd %xmm11, %xmm10 + subpd %xmm10, %xmm15 + + movddup -13 * SIZE(AO), %xmm8 + mulpd %xmm8, %xmm13 + mulpd %xmm8, %xmm15 +#endif + +#ifdef RN + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 + + movddup -15 * SIZE(BO), %xmm9 + mulpd %xmm0, %xmm9 + subpd %xmm9, %xmm2 + movddup -14 * SIZE(BO), %xmm10 + mulpd %xmm0, %xmm10 + subpd %xmm10, %xmm4 + movddup -13 * SIZE(BO), %xmm11 + mulpd %xmm0, %xmm11 + subpd %xmm11, %xmm6 + + movddup -11 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + movddup -10 * SIZE(BO), %xmm9 + mulpd %xmm2, %xmm9 + subpd %xmm9, %xmm4 + movddup -9 * SIZE(BO), %xmm10 + mulpd %xmm2, %xmm10 + subpd %xmm10, %xmm6 + + movddup -6 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm4 + + movddup -5 * SIZE(BO), %xmm9 + mulpd %xmm4, %xmm9 + subpd %xmm9, %xmm6 + + movddup -1 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm6 +#endif + +#ifdef RT + movddup -1 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm6 + + movddup -2 * SIZE(BO), %xmm9 + mulpd %xmm6, %xmm9 + subpd %xmm9, %xmm4 + movddup -3 * SIZE(BO), %xmm10 + mulpd %xmm6, %xmm10 + subpd %xmm10, %xmm2 + movddup -4 * SIZE(BO), %xmm11 + mulpd %xmm6, %xmm11 + subpd %xmm11, %xmm0 + + movddup -6 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm4 + movddup -7 * SIZE(BO), %xmm9 + mulpd %xmm4, %xmm9 + subpd %xmm9, %xmm2 + movddup -8 * SIZE(BO), %xmm10 + mulpd %xmm4, %xmm10 + subpd %xmm10, %xmm0 + + movddup -11 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm2 + movddup -12 * SIZE(BO), %xmm9 + mulpd %xmm2, %xmm9 + subpd %xmm9, %xmm0 + + movddup -16 * SIZE(BO), %xmm8 + mulpd %xmm8, %xmm0 +#endif + +#ifdef LN + subq $2 * SIZE, CO1 + subq $2 * SIZE, CO2 +#endif + +#if defined(LN) || defined(LT) + movlpd %xmm9, 0 * SIZE(CO1) + movlpd %xmm13, 1 * SIZE(CO1) + + movhpd %xmm9, 0 * SIZE(CO2) + movhpd %xmm13, 1 * SIZE(CO2) + + movlpd %xmm11, 0 * SIZE(CO1, LDC, 2) + movlpd %xmm15, 1 * SIZE(CO1, LDC, 2) + + movhpd %xmm11, 0 * SIZE(CO2, LDC, 2) + movhpd %xmm15, 1 * SIZE(CO2, LDC, 2) +#else + movlpd %xmm0, 0 * SIZE(CO1) + movhpd %xmm0, 1 * SIZE(CO1) + + movlpd %xmm2, 0 * SIZE(CO2) + movhpd %xmm2, 1 * SIZE(CO2) + + movlpd %xmm4, 0 * SIZE(CO1, LDC, 2) + movhpd %xmm4, 1 * SIZE(CO1, LDC, 2) + + movlpd %xmm6, 0 * SIZE(CO2, LDC, 2) + movhpd %xmm6, 1 * SIZE(CO2, LDC, 2) +#endif + +#if defined(LN) || defined(LT) + movaps %xmm9, -16 * SIZE(BO) + movaps %xmm11, -14 * SIZE(BO) + movaps %xmm13, -12 * SIZE(BO) + movaps %xmm15, -10 * SIZE(BO) +#else + movaps %xmm0, -16 * SIZE(AO) + movaps %xmm2, -14 * SIZE(AO) + movaps %xmm4, -12 * SIZE(AO) + movaps %xmm6, -10 * SIZE(AO) +#endif + +#ifndef LN + addq $2 * SIZE, CO1 + addq $2 * SIZE, CO2 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 2), AO + leaq (BO, %rax, 4), BO +#endif + +#ifdef LN + subq $2, KK +#endif + +#ifdef LT + addq $2, KK +#endif + +#ifdef RT + movq K, %rax + salq $1 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + ALIGN_4 + +.L30: + testq $1, M + je .L39 + +#ifdef LN + movq K, %rax + salq $0 + BASE_SHIFT, %rax + subq %rax, AORIG +#endif + +#if defined(LN) || defined(RT) + movq KK, %rax + movq AORIG, AO + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO +#endif + + movq B, BO + +#if defined(LN) || defined(RT) + movq KK, %rax + leaq (, %rax, SIZE), %rax + leaq (BO, %rax, 4), BO +#endif + + movddup -16 * SIZE(AO), %xmm0 + pxor %xmm8, %xmm8 + movddup -14 * SIZE(AO), %xmm2 + pxor %xmm9, %xmm9 + movddup -15 * SIZE(AO), %xmm4 + pxor %xmm10, %xmm10 + movapd -16 * SIZE(BO), %xmm1 + pxor %xmm11, %xmm11 + movapd -8 * SIZE(BO), %xmm3 + +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $-4, %rax + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 4), BO + negq %rax + NOBRANCH + je .L36 + ALIGN_4 + +.L32: + mulpd %xmm0, %xmm1 + mulpd -14 * SIZE(BO, %rax, 4), %xmm0 + addpd %xmm1, %xmm8 + movapd -12 * SIZE(BO, %rax, 4), %xmm1 + addpd %xmm0, %xmm9 + movddup -12 * SIZE(AO, %rax, 1), %xmm0 + mulpd %xmm4, %xmm1 + mulpd -10 * SIZE(BO, %rax, 4), %xmm4 + addpd %xmm1, %xmm10 + movapd (BO, %rax, 4), %xmm1 + addpd %xmm4, %xmm11 + movddup -11 * SIZE(AO, %rax, 1), %xmm4 + mulpd %xmm2, %xmm3 + mulpd -6 * SIZE(BO, %rax, 4), %xmm2 + addpd %xmm3, %xmm8 + movapd -4 * SIZE(BO, %rax, 4), %xmm3 + addpd %xmm2, %xmm9 + movddup -13 * SIZE(AO, %rax, 1), %xmm2 + mulpd %xmm2, %xmm3 + mulpd -2 * SIZE(BO, %rax, 4), %xmm2 + addpd %xmm3, %xmm10 + movapd 8 * SIZE(BO, %rax, 4), %xmm3 + addpd %xmm2, %xmm11 + movddup -10 * SIZE(AO, %rax, 1), %xmm2 + + addq $4 * SIZE, %rax + BRANCH + jl .L32 + ALIGN_4 + +.L36: +#if defined(LT) || defined(RN) + movq KK, %rax +#else + movq K, %rax + subq KK, %rax +#endif + andq $3, %rax # if (k & 1) + je .L38 + + leaq (, %rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 4), BO + negq %rax + ALIGN_4 + +.L37: + mulpd %xmm0, %xmm1 + mulpd -14 * SIZE(BO, %rax, 4), %xmm0 + addpd %xmm1, %xmm8 + movapd -12 * SIZE(BO, %rax, 4), %xmm1 + addpd %xmm0, %xmm9 + movddup -15 * SIZE(AO, %rax, 1), %xmm0 + + addq $SIZE, %rax + jl .L37 + ALIGN_4 + +.L38: + addpd %xmm10, %xmm8 + addpd %xmm11, %xmm9 + +#if defined(LN) || defined(RT) + movq KK, %rax +#ifdef LN + subq $1, %rax +#else + subq $4, %rax +#endif + + leaq (, %rax, SIZE), %rax + + movq AORIG, AO + leaq (AO, %rax, 1), AO + leaq (B, %rax, 4), BO +#endif + +#if defined(LN) || defined(LT) + movapd -16 * SIZE(BO), %xmm2 + movapd -14 * SIZE(BO), %xmm3 + + subpd %xmm8, %xmm2 + subpd %xmm9, %xmm3 +#else + movapd -16 * SIZE(AO), %xmm2 + movapd -14 * SIZE(AO), %xmm3 + + subpd %xmm8, %xmm2 + subpd %xmm9, %xmm3 +#endif + +#if defined(LN) || defined(LT) + movddup -16 * SIZE(AO), %xmm0 + mulpd %xmm0, %xmm2 + mulpd %xmm0, %xmm3 +#endif + +#ifdef RN + movapd %xmm2, %xmm0 + unpckhpd %xmm0, %xmm0 + + movapd %xmm3, %xmm1 + unpckhpd %xmm1, %xmm1 + + movsd -16 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm2 + + movsd -15 * SIZE(BO), %xmm5 + mulsd %xmm2, %xmm5 + subsd %xmm5, %xmm0 + movsd -14 * SIZE(BO), %xmm6 + mulsd %xmm2, %xmm6 + subsd %xmm6, %xmm3 + movsd -13 * SIZE(BO), %xmm7 + mulsd %xmm2, %xmm7 + subsd %xmm7, %xmm1 + + movsd -11 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm0 + + movsd -10 * SIZE(BO), %xmm5 + mulsd %xmm0, %xmm5 + subsd %xmm5, %xmm3 + movsd -9 * SIZE(BO), %xmm6 + mulsd %xmm0, %xmm6 + subsd %xmm6, %xmm1 + + movsd -6 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm3 + + movsd -5 * SIZE(BO), %xmm5 + mulsd %xmm3, %xmm5 + subsd %xmm5, %xmm1 + + movsd -1 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm1 + + unpcklpd %xmm0, %xmm2 + unpcklpd %xmm1, %xmm3 +#endif + +#ifdef RT + movapd %xmm2, %xmm0 + unpckhpd %xmm0, %xmm0 + + movapd %xmm3, %xmm1 + unpckhpd %xmm1, %xmm1 + + movsd -1 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm1 + + movsd -2 * SIZE(BO), %xmm5 + mulsd %xmm1, %xmm5 + subsd %xmm5, %xmm3 + movsd -3 * SIZE(BO), %xmm6 + mulsd %xmm1, %xmm6 + subsd %xmm6, %xmm0 + movsd -4 * SIZE(BO), %xmm7 + mulsd %xmm1, %xmm7 + subsd %xmm7, %xmm2 + + movsd -6 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm3 + + movsd -7 * SIZE(BO), %xmm5 + mulsd %xmm3, %xmm5 + subsd %xmm5, %xmm0 + movsd -8 * SIZE(BO), %xmm6 + mulsd %xmm3, %xmm6 + subsd %xmm6, %xmm2 + + movsd -11 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm0 + + movsd -12 * SIZE(BO), %xmm5 + mulsd %xmm0, %xmm5 + subsd %xmm5, %xmm2 + + movsd -16 * SIZE(BO), %xmm4 + mulsd %xmm4, %xmm2 + + unpcklpd %xmm0, %xmm2 + unpcklpd %xmm1, %xmm3 + +#endif + +#ifdef LN + subq $1 * SIZE, CO1 + subq $1 * SIZE, CO2 +#endif + +#if defined(LN) || defined(LT) + movlpd %xmm2, 0 * SIZE(CO1) + movhpd %xmm2, 0 * SIZE(CO2) + movlpd %xmm3, 0 * SIZE(CO1, LDC, 2) + movhpd %xmm3, 0 * SIZE(CO2, LDC, 2) +#else + movlpd %xmm2, 0 * SIZE(CO1) + movhpd %xmm2, 0 * SIZE(CO2) + movlpd %xmm3, 0 * SIZE(CO1, LDC, 2) + movhpd %xmm3, 0 * SIZE(CO2, LDC, 2) +#endif + +#if defined(LN) || defined(LT) + movaps %xmm2, -16 * SIZE(BO) + movaps %xmm3, -14 * SIZE(BO) +#else + movaps %xmm2, -16 * SIZE(AO) + movaps %xmm3, -14 * SIZE(AO) +#endif + +#ifndef LN + addq $1 * SIZE, CO1 + addq $1 * SIZE, CO2 +#endif + +#if defined(LT) || defined(RN) + movq K, %rax + subq KK, %rax + leaq (,%rax, SIZE), %rax + leaq (AO, %rax, 1), AO + leaq (BO, %rax, 4), BO +#endif + +#ifdef LN + subq $1, KK +#endif + +#ifdef LT + addq $1, KK +#endif + +#ifdef RT + movq K, %rax + salq $0 + BASE_SHIFT, %rax + addq %rax, AORIG +#endif + ALIGN_4 + +.L39: +#ifdef LN + leaq (, K, SIZE), %rax + leaq (B, %rax, 4), B +#endif + +#if defined(LT) || defined(RN) + movq BO, B +#endif + +#ifdef RN + addq $4, KK +#endif + +#ifdef RT + subq $4, KK +#endif + + decq J # j -- + jg .L01 + ALIGN_4 + +.L999: + movq (%rsp), %rbx + movq 8(%rsp), %rbp + movq 16(%rsp), %r12 + movq 24(%rsp), %r13 + movq 32(%rsp), %r14 + movq 40(%rsp), %r15 + +#ifdef WINDOWS_ABI + movq 48(%rsp), %rdi + movq 56(%rsp), %rsi + movups 64(%rsp), %xmm6 + movups 80(%rsp), %xmm7 + movups 96(%rsp), %xmm8 + movups 112(%rsp), %xmm9 + movups 128(%rsp), %xmm10 + movups 144(%rsp), %xmm11 + movups 160(%rsp), %xmm12 + movups 176(%rsp), %xmm13 + movups 192(%rsp), %xmm14 + movups 208(%rsp), %xmm15 +#endif + + addq $STACKSIZE, %rsp + ret + + EPILOGUE From 60b263f3d27ead24f01a05c2139ab83aeca1e302 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sat, 27 Apr 2013 17:23:08 +0200 Subject: [PATCH 05/15] removed trsm_kernel_RT_4x4_bulldozer.S. wrong results --- kernel/x86_64/KERNEL.BULLDOZER | 2 +- kernel/x86_64/trsm_kernel_RT_4x4_bulldozer.S | 3292 ------------------ 2 files changed, 1 insertion(+), 3293 deletions(-) delete mode 100644 kernel/x86_64/trsm_kernel_RT_4x4_bulldozer.S diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index a41c94aca..e0b8a71e4 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -46,7 +46,7 @@ STRSMKERNEL_RT = trsm_kernel_RT_8x4_sse.S DTRSMKERNEL_LN = trsm_kernel_LN_4x4_barcelona.S DTRSMKERNEL_LT = trsm_kernel_LT_4x4_bulldozer.S DTRSMKERNEL_RN = trsm_kernel_LT_4x4_bulldozer.S -DTRSMKERNEL_RT = trsm_kernel_RT_4x4_bulldozer.S +DTRSMKERNEL_RT = trsm_kernel_RT_4x4_barcelona.S CTRSMKERNEL_LN = ztrsm_kernel_LN_4x2_sse.S CTRSMKERNEL_LT = ztrsm_kernel_LT_4x2_sse.S diff --git a/kernel/x86_64/trsm_kernel_RT_4x4_bulldozer.S b/kernel/x86_64/trsm_kernel_RT_4x4_bulldozer.S deleted file mode 100644 index e1880851a..000000000 --- a/kernel/x86_64/trsm_kernel_RT_4x4_bulldozer.S +++ /dev/null @@ -1,3292 +0,0 @@ -/*********************************************************************/ -/* Copyright 2009, 2010 The University of Texas at Austin. */ -/* All rights reserved. */ -/* */ -/* Redistribution and use in source and binary forms, with or */ -/* without modification, are permitted provided that the following */ -/* conditions are met: */ -/* */ -/* 1. Redistributions of source code must retain the above */ -/* copyright notice, this list of conditions and the following */ -/* disclaimer. */ -/* */ -/* 2. Redistributions in binary form must reproduce the above */ -/* copyright notice, this list of conditions and the following */ -/* disclaimer in the documentation and/or other materials */ -/* provided with the distribution. */ -/* */ -/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ -/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ -/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ -/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ -/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ -/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ -/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ -/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ -/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ -/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ -/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ -/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ -/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ -/* POSSIBILITY OF SUCH DAMAGE. */ -/* */ -/* The views and conclusions contained in the software and */ -/* documentation are those of the authors and should not be */ -/* interpreted as representing official policies, either expressed */ -/* or implied, of The University of Texas at Austin. */ -/*********************************************************************/ - -#define ASSEMBLER -#include "common.h" - -#define OLD_M %rdi -#define OLD_N %rsi -#define M %r13 -#define N %r14 -#define K %rdx - -#define A %rcx -#define B %r8 -#define C %r9 -#define LDC %r10 - -#define I %r11 -#define AO %rdi -#define BO %rsi -#define CO1 %r15 -#define CO2 %r12 -#define BB %rbp -#define J %rbx - -#ifndef WINDOWS_ABI - -#define STACKSIZE 96 - -#define OFFSET 48(%rsp) -#define AORIG 56(%rsp) -#define KK 64(%rsp) -#define KKK 72(%rsp) - -#else - -#define STACKSIZE 256 - -#define OLD_A 40 + STACKSIZE(%rsp) -#define OLD_B 48 + STACKSIZE(%rsp) -#define OLD_C 56 + STACKSIZE(%rsp) -#define OLD_LDC 64 + STACKSIZE(%rsp) -#define OLD_OFFSET 72 + STACKSIZE(%rsp) - -#define OFFSET 224(%rsp) -#define AORIG 232(%rsp) -#define KK 240(%rsp) -#define KKK 248(%rsp) - -#endif - - -#define movlpd movsd -#define movapd movups -#define movupd movups - -#define A_PR1 224 -#define B_PR1 224 - -#define KERNEL1(xx) \ - vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6,%xmm7,%xmm8 ;\ - vmovddup -14 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2,%xmm7,%xmm12 ;\ - vmovddup -13 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\ - vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups -12 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\ - -#define KERNEL2(xx) \ - vmovups -10 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup -10 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -9 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL3(xx) \ - vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8, %xmm6, %xmm7, %xmm8 ;\ - vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ - vmovddup -5 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -4 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups -4 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\ - -#define KERNEL4(xx) \ - vmovups -2 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ - vmovups (AO, %rax, 4), %xmm6 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1 ,%xmm12;\ - vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -1 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vmovddup (BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL5(xx) \ - vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ - vmovddup 2 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ - vmovddup 3 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups 4 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ - -#define KERNEL6(xx) \ - vmovups 6 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ - vmovups 8 * SIZE(AO, %rax, 4), %xmm6 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup 6 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup 7 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vmovddup 8 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL7(xx) \ - vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ - vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ - vmovddup 11 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup 12 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups 12 * SIZE(AO, %rax, 4), %xmm0 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ - -#define KERNEL8(xx) \ - vmovups 14 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ - vmovups 16 * SIZE(AO, %rax, 4),%xmm6 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ - vmovddup 15 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vmovddup 16 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL_SUB1(xx) \ - vmovups -16 * SIZE(AO),%xmm0 ;\ - vmovups -14 * SIZE(AO),%xmm2 ;\ - vmovddup -16 * SIZE(BO), %xmm1 ;\ - vmovddup -15 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm12, %xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ - vmovddup -14 * SIZE(BO), %xmm1 ;\ - vmovddup -13 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm10, %xmm0, %xmm1,%xmm10 ;\ - vfmaddpd %xmm11, %xmm0, %xmm3,%xmm11 ;\ - vfmaddpd %xmm14, %xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm15, %xmm2, %xmm3,%xmm15 ;\ - -#define KERNEL_SUB2(xx) \ - vmovups -12 * SIZE(AO), %xmm0 ;\ - vmovups -10 * SIZE(AO), %xmm2 ;\ - vmovddup -12 * SIZE(BO), %xmm1 ;\ - vmovddup -11 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -10 * SIZE(BO), %xmm1 ;\ - vmovddup -9 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ - vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ - vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ - -#define KERNEL_SUB3(xx) \ - vmovups -8 * SIZE(AO),%xmm0 ;\ - vmovups -6 * SIZE(AO),%xmm2 ;\ - vmovddup -8 * SIZE(BO), %xmm1 ;\ - vmovddup -7 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -6 * SIZE(BO), %xmm1 ;\ - vmovddup -5 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ - vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ - vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ - -#define KERNEL_SUB4(xx) \ - vmovups -4 * SIZE(AO), %xmm0 ;\ - vmovups -2 * SIZE(AO), %xmm2 ;\ - vmovddup -4 * SIZE(BO), %xmm1 ;\ - vmovddup -3 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -2 * SIZE(BO), %xmm1 ;\ - vmovddup -1 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ - vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ - vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ - vmovups (AO), %xmm0 ;\ - vmovddup (BO), %xmm1 ;\ - vmovddup 1 * SIZE(BO), %xmm3 ;\ - vmovaps %xmm0, %xmm2 - - - - PROLOGUE - PROFCODE - - subq $STACKSIZE, %rsp - movq %rbx, (%rsp) - movq %rbp, 8(%rsp) - movq %r12, 16(%rsp) - movq %r13, 24(%rsp) - movq %r14, 32(%rsp) - movq %r15, 40(%rsp) - -#ifdef WINDOWS_ABI - movq %rdi, 48(%rsp) - movq %rsi, 56(%rsp) - movups %xmm6, 64(%rsp) - movups %xmm7, 80(%rsp) - movups %xmm8, 96(%rsp) - movups %xmm9, 112(%rsp) - movups %xmm10, 128(%rsp) - movups %xmm11, 144(%rsp) - movups %xmm12, 160(%rsp) - movups %xmm13, 176(%rsp) - movups %xmm14, 192(%rsp) - movups %xmm15, 208(%rsp) - - movq ARG1, OLD_M - movq ARG2, OLD_N - movq ARG3, K - movq OLD_A, A - movq OLD_B, B - movq OLD_C, C - movq OLD_LDC, LDC - movsd OLD_OFFSET, %xmm12 -#else - movq STACKSIZE + 8(%rsp), LDC - movsd STACKSIZE + 16(%rsp), %xmm12 -#endif - - movq OLD_M, M - movq OLD_N, N - - subq $-16 * SIZE, A - subq $-16 * SIZE, B - - movsd %xmm12, OFFSET - movsd %xmm12, KK - - leaq (, LDC, SIZE), LDC - -#ifdef LN - leaq (, M, SIZE), %rax - addq %rax, C - imulq K, %rax - addq %rax, A -#endif - -#ifdef RT - leaq (, N, SIZE), %rax - imulq K, %rax - addq %rax, B - movq N, %rax - imulq LDC, %rax - addq %rax, C -#endif - -#ifdef RN - negq KK -#endif - -#ifdef RT - movq N, %rax - subq OFFSET, %rax - movq %rax, KK -#endif - - testq $1, N - je .L40 - -#if defined(LT) || defined(RN) - movq A, AO -#else - movq A, AORIG -#endif - -#ifdef RT - movq K, %rax - salq $0 + BASE_SHIFT, %rax - subq %rax, B - - subq LDC, C -#endif - - movq C, CO1 # coffset1 = c -#ifndef RT - addq LDC, C -#endif - -#ifdef LN - movq OFFSET, %rax - addq M, %rax - movq %rax, KK -#endif - -#ifdef LT - movq OFFSET, %rax - movq %rax, KK -#endif - - movq M, I - sarq $2, I # i = (m >> 2) - jle .L100 - ALIGN_4 - -.L91: -#ifdef LN - movq K, %rax - salq $2 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (BO, %rax, SIZE), BO -#endif - - movapd -16 * SIZE(AO), %xmm0 - pxor %xmm8, %xmm8 - movapd -8 * SIZE(AO), %xmm2 - pxor %xmm9, %xmm9 - movddup -16 * SIZE(BO), %xmm1 - pxor %xmm10, %xmm10 - movddup -15 * SIZE(BO), %xmm5 - pxor %xmm11, %xmm11 - movddup -14 * SIZE(BO), %xmm3 - -#ifndef LN - prefetchw 3 * SIZE(CO1) -#else - prefetchw -8 * SIZE(CO1) -#endif - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 1), BO - negq %rax - NOBRANCH - je .L96 - ALIGN_4 - -.L92: - mulpd %xmm1, %xmm0 - mulpd -14 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm8 - movapd -12 * SIZE(AO, %rax, 4), %xmm0 - addpd %xmm1, %xmm9 - movddup -12 * SIZE(BO, %rax, 1), %xmm1 - mulpd %xmm5, %xmm0 - mulpd -10 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm0, %xmm10 - movapd (AO, %rax, 4), %xmm0 - addpd %xmm5, %xmm11 - movddup -13 * SIZE(BO, %rax, 1), %xmm5 - mulpd %xmm3, %xmm2 - mulpd -6 * SIZE(AO, %rax, 4), %xmm3 - addpd %xmm2, %xmm8 - movapd -4 * SIZE(AO, %rax, 4), %xmm2 - addpd %xmm3, %xmm9 - movddup -10 * SIZE(BO, %rax, 1), %xmm3 - mulpd %xmm5, %xmm2 - mulpd -2 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm2, %xmm10 - movapd 8 * SIZE(AO, %rax, 4), %xmm2 - addpd %xmm5, %xmm11 - movddup -11 * SIZE(BO, %rax, 1), %xmm5 - - addq $4 * SIZE, %rax - BRANCH - jl .L92 - ALIGN_4 - -.L96: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L99 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 1), BO - negq %rax - ALIGN_4 - -.L97: - mulpd %xmm1, %xmm0 - mulpd -14 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm8 - movapd -12 * SIZE(AO, %rax, 4), %xmm0 - addpd %xmm1, %xmm9 - movddup -15 * SIZE(BO, %rax, 1), %xmm1 - - addq $SIZE, %rax - jl .L97 - ALIGN_4 -.L99: - addpd %xmm10, %xmm8 - addpd %xmm11, %xmm9 - -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $4, %rax -#else - subq $1, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 4), AO - leaq (B, %rax, 1), BO -#endif - -#if defined(LN) || defined(LT) - movapd -16 * SIZE(BO), %xmm10 - movapd -14 * SIZE(BO), %xmm11 - - subpd %xmm8, %xmm10 - subpd %xmm9, %xmm11 -#else - movapd -16 * SIZE(AO), %xmm10 - movapd -14 * SIZE(AO), %xmm11 - - subpd %xmm8, %xmm10 - subpd %xmm9, %xmm11 -#endif - -#ifdef LN - movapd %xmm10, %xmm8 - unpckhpd %xmm8, %xmm8 - - movapd %xmm11, %xmm9 - unpckhpd %xmm9, %xmm9 - - movsd -1 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm9 - - movsd -2 * SIZE(AO), %xmm13 - mulsd %xmm9, %xmm13 - subsd %xmm13, %xmm11 - movsd -3 * SIZE(AO), %xmm14 - mulsd %xmm9, %xmm14 - subsd %xmm14, %xmm8 - movsd -4 * SIZE(AO), %xmm15 - mulsd %xmm9, %xmm15 - subsd %xmm15, %xmm10 - - movsd -6 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm11 - - movsd -7 * SIZE(AO), %xmm13 - mulsd %xmm11, %xmm13 - subsd %xmm13, %xmm8 - movsd -8 * SIZE(AO), %xmm14 - mulsd %xmm11, %xmm14 - subsd %xmm14, %xmm10 - - movsd -11 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm8 - - movsd -12 * SIZE(AO), %xmm13 - mulsd %xmm8, %xmm13 - subsd %xmm13, %xmm10 - - movsd -16 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm10 - - unpcklpd %xmm8, %xmm10 - unpcklpd %xmm9, %xmm11 -#endif - -#ifdef LT - movapd %xmm10, %xmm8 - unpckhpd %xmm8, %xmm8 - - movapd %xmm11, %xmm9 - unpckhpd %xmm9, %xmm9 - - movsd -16 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm10 - - movsd -15 * SIZE(AO), %xmm13 - mulsd %xmm10, %xmm13 - subsd %xmm13, %xmm8 - movsd -14 * SIZE(AO), %xmm14 - mulsd %xmm10, %xmm14 - subsd %xmm14, %xmm11 - movsd -13 * SIZE(AO), %xmm15 - mulsd %xmm10, %xmm15 - subsd %xmm15, %xmm9 - - movsd -11 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm8 - - movsd -10 * SIZE(AO), %xmm13 - mulsd %xmm8, %xmm13 - subsd %xmm13, %xmm11 - movsd -9 * SIZE(AO), %xmm14 - mulsd %xmm8, %xmm14 - subsd %xmm14, %xmm9 - - movsd -6 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm11 - - movsd -5 * SIZE(AO), %xmm13 - mulsd %xmm11, %xmm13 - subsd %xmm13, %xmm9 - - movsd -1 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm9 - - unpcklpd %xmm8, %xmm10 - unpcklpd %xmm9, %xmm11 -#endif - -#ifdef RN - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm10 - mulpd %xmm8, %xmm11 -#endif - -#ifdef RT - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm10 - mulpd %xmm8, %xmm11 -#endif - -#ifdef LN - subq $4 * SIZE, CO1 -#endif - - movlpd %xmm10, 0 * SIZE(CO1) - movhpd %xmm10, 1 * SIZE(CO1) - movlpd %xmm11, 2 * SIZE(CO1) - movhpd %xmm11, 3 * SIZE(CO1) - -#if defined(LN) || defined(LT) - movaps %xmm10, -16 * SIZE(BO) - movaps %xmm11, -14 * SIZE(BO) -#else - movaps %xmm10, -16 * SIZE(AO) - movaps %xmm11, -14 * SIZE(AO) -#endif - -#ifndef LN - addq $4 * SIZE, CO1 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 4), AO - addq %rax, BO -#endif - -#ifdef LN - subq $4, KK -#endif - -#ifdef LT - addq $4, KK -#endif - -#ifdef RT - movq K, %rax - salq $2 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - - decq I # i -- - jg .L91 - ALIGN_4 - -.L100: - testq $2, M - je .L110 - -#ifdef LN - movq K, %rax - salq $1 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (BO, %rax, SIZE), BO -#endif - - movddup -16 * SIZE(BO), %xmm0 - pxor %xmm8, %xmm8 - movddup -15 * SIZE(BO), %xmm1 - pxor %xmm9, %xmm9 - movddup -14 * SIZE(BO), %xmm2 - pxor %xmm10, %xmm10 - movddup -13 * SIZE(BO), %xmm3 - pxor %xmm11, %xmm11 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 1), BO - negq %rax - NOBRANCH - je .L106 - ALIGN_4 - -.L102: - mulpd -16 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm0, %xmm8 - movddup -12 * SIZE(BO, %rax, 1), %xmm0 - - mulpd -14 * SIZE(AO, %rax, 2), %xmm1 - addpd %xmm1, %xmm9 - movddup -11 * SIZE(BO, %rax, 1), %xmm1 - - mulpd -12 * SIZE(AO, %rax, 2), %xmm2 - addpd %xmm2, %xmm10 - movddup -10 * SIZE(BO, %rax, 1), %xmm2 - - mulpd -10 * SIZE(AO, %rax, 2), %xmm3 - addpd %xmm3, %xmm11 - movddup -9 * SIZE(BO, %rax, 1), %xmm3 - - addq $4 * SIZE, %rax - BRANCH - jl .L102 - ALIGN_4 - -.L106: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L109 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 1), BO - negq %rax - ALIGN_4 - -.L107: - movddup -16 * SIZE(BO, %rax, 1), %xmm0 - mulpd -16 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm0, %xmm8 - - addq $SIZE, %rax - jl .L107 - ALIGN_4 - -.L109: - addpd %xmm9, %xmm8 - addpd %xmm11, %xmm10 - addpd %xmm10, %xmm8 - -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $2, %rax -#else - subq $1, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 2), AO - leaq (B, %rax, 1), BO -#endif - -#if defined(LN) || defined(LT) - movapd -16 * SIZE(BO), %xmm10 - subpd %xmm8, %xmm10 -#else - movapd -16 * SIZE(AO), %xmm10 - subpd %xmm8, %xmm10 -#endif - -#ifdef LN - movapd %xmm10, %xmm8 - unpckhpd %xmm8, %xmm8 - - movsd -13 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm8 - - movsd -14 * SIZE(AO), %xmm13 - mulsd %xmm8, %xmm13 - subsd %xmm13, %xmm10 - - movsd -16 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm10 - - unpcklpd %xmm8, %xmm10 -#endif - -#ifdef LT - movapd %xmm10, %xmm8 - unpckhpd %xmm8, %xmm8 - - movsd -16 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm10 - - movsd -15 * SIZE(AO), %xmm13 - mulsd %xmm10, %xmm13 - subsd %xmm13, %xmm8 - - movsd -13 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm8 - - unpcklpd %xmm8, %xmm10 -#endif - -#ifdef RN - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm10 -#endif - -#ifdef RT - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm10 -#endif - -#ifdef LN - subq $2 * SIZE, CO1 -#endif - -#if defined(LN) || defined(LT) - movlpd %xmm10, 0 * SIZE(CO1) - movhpd %xmm10, 1 * SIZE(CO1) -#else - movlpd %xmm10, 0 * SIZE(CO1) - movhpd %xmm10, 1 * SIZE(CO1) -#endif - -#if defined(LN) || defined(LT) - movaps %xmm10, -16 * SIZE(BO) -#else - movaps %xmm10, -16 * SIZE(AO) -#endif - -#ifndef LN - addq $2 * SIZE, CO1 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 2), AO - addq %rax, BO -#endif - -#ifdef LN - subq $2, KK -#endif - -#ifdef LT - addq $2, KK -#endif - -#ifdef RT - movq K, %rax - salq $1 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - ALIGN_4 - -.L110: - testq $1, M - je .L119 - -#ifdef LN - movq K, %rax - salq $0 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (BO, %rax, SIZE), BO -#endif - - movapd -16 * SIZE(AO), %xmm0 - pxor %xmm8, %xmm8 - movapd -14 * SIZE(AO), %xmm1 - pxor %xmm9, %xmm9 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 1), BO - negq %rax - NOBRANCH - je .L116 - ALIGN_4 - -.L112: - mulpd -16 * SIZE(BO, %rax, 1), %xmm0 - addpd %xmm0, %xmm8 - movapd -12 * SIZE(AO, %rax, 1), %xmm0 - - mulpd -14 * SIZE(BO, %rax, 1), %xmm1 - addpd %xmm1, %xmm9 - movapd -10 * SIZE(AO, %rax, 1), %xmm1 - - addq $4 * SIZE, %rax - BRANCH - jl .L112 - ALIGN_4 - -.L116: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L118 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 1), BO - negq %rax - ALIGN_4 - -.L117: - mulsd -16 * SIZE(BO, %rax, 1), %xmm0 - addsd %xmm0, %xmm8 - movsd -15 * SIZE(AO, %rax, 1), %xmm0 - - addq $SIZE, %rax - jl .L117 - ALIGN_4 - -.L118: - addpd %xmm9, %xmm8 - haddpd %xmm8, %xmm8 - -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $1, %rax -#else - subq $1, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 1), AO - leaq (B, %rax, 1), BO -#endif - -#if defined(LN) || defined(LT) - movsd -16 * SIZE(BO), %xmm10 - subsd %xmm8, %xmm10 -#else - movsd -16 * SIZE(AO), %xmm10 - subsd %xmm8, %xmm10 -#endif - -#if defined(LN) || defined(LT) - movsd -16 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm10 -#endif - -#if defined(RN) || defined(RT) - movsd -16 * SIZE(BO), %xmm8 - mulsd %xmm8, %xmm10 -#endif - -#ifdef LN - subq $1 * SIZE, CO1 -#endif - - movsd %xmm10, 0 * SIZE(CO1) - -#if defined(LN) || defined(LT) - movlpd %xmm10, -16 * SIZE(BO) -#else - movlpd %xmm10, -16 * SIZE(AO) -#endif - -#ifndef LN - addq $1 * SIZE, CO1 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - addq %rax, AO - addq %rax, BO -#endif - -#ifdef LN - subq $1, KK -#endif - -#ifdef LT - addq $1, KK -#endif - -#ifdef RT - movq K, %rax - salq $0 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - ALIGN_4 - -.L119: -#ifdef LN - leaq (B, K, SIZE), B -#endif - -#if defined(LT) || defined(RN) - movq BO, B -#endif - -#ifdef RN - addq $1, KK -#endif - -#ifdef RT - subq $1, KK -#endif - ALIGN_4 - -.L40: - testq $2, N - je .L80 - -#if defined(LT) || defined(RN) - movq A, AO -#else - movq A, AORIG -#endif - -#ifdef RT - movq K, %rax - salq $1 + BASE_SHIFT, %rax - subq %rax, B - - leaq (, LDC, 2), %rax - subq %rax, C -#endif - - movq C, CO1 # coffset1 = c - leaq (C, LDC, 1), CO2 # coffset2 = c + ldc -#ifndef RT - leaq (C, LDC, 2), C -#endif - -#ifdef LN - movq OFFSET, %rax - addq M, %rax - movq %rax, KK -#endif - -#if defined(LT) - movq OFFSET, %rax - movq %rax, KK -#endif - - movq M, I - sarq $2, I # i = (m >> 2) - jle .L60 - ALIGN_4 - -.L51: -#ifdef LN - movq K, %rax - salq $2 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (BO, %rax, 2), BO -#endif - - movddup -16 * SIZE(BO), %xmm1 - movddup -15 * SIZE(BO), %xmm5 - pxor %xmm8, %xmm8 - movddup -12 * SIZE(BO), %xmm3 - pxor %xmm9, %xmm9 - movapd -16 * SIZE(AO), %xmm0 - pxor %xmm12, %xmm12 - movapd -8 * SIZE(AO), %xmm4 - pxor %xmm13, %xmm13 - -#ifndef LN - prefetchw 3 * SIZE(CO1) - movapd %xmm0, %xmm2 - prefetchw 5 * SIZE(CO2) -#else - prefetchw -4 * SIZE(CO1) - movapd %xmm0, %xmm2 - prefetchw -4 * SIZE(CO2) -#endif - - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 2), BO - negq %rax - NOBRANCH - je .L56 - ALIGN_4 - -.L52: - mulpd %xmm1, %xmm0 - mulpd -14 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm8 - movapd -12 * SIZE(AO, %rax, 4), %xmm0 - addpd %xmm1, %xmm12 - movddup -14 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm5, %xmm2 - mulpd -14 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm2, %xmm9 - addpd %xmm5, %xmm13 - movddup -13 * SIZE(BO, %rax, 2), %xmm5 - movapd %xmm0, %xmm2 - mulpd %xmm1, %xmm0 - mulpd -10 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm8 - movapd (AO, %rax, 4), %xmm0 - addpd %xmm1, %xmm12 - movddup -8 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm5, %xmm2 - mulpd -10 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm2, %xmm9 - addpd %xmm5, %xmm13 - movddup -11 * SIZE(BO, %rax, 2), %xmm5 - movapd %xmm4, %xmm2 - mulpd %xmm3, %xmm4 - mulpd -6 * SIZE(AO, %rax, 4), %xmm3 - addpd %xmm4, %xmm8 - movapd -4 * SIZE(AO, %rax, 4), %xmm4 - addpd %xmm3, %xmm12 - movddup -10 * SIZE(BO, %rax, 2), %xmm3 - mulpd %xmm5, %xmm2 - mulpd -6 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm2, %xmm9 - addpd %xmm5, %xmm13 - movddup -9 * SIZE(BO, %rax, 2), %xmm5 - movapd %xmm4, %xmm2 - mulpd %xmm3, %xmm4 - mulpd -2 * SIZE(AO, %rax, 4), %xmm3 - addpd %xmm4, %xmm8 - movapd 8 * SIZE(AO, %rax, 4), %xmm4 - addpd %xmm3, %xmm12 - movddup -4 * SIZE(BO, %rax, 2), %xmm3 - mulpd %xmm5, %xmm2 - mulpd -2 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm2, %xmm9 - addpd %xmm5, %xmm13 - movddup -7 * SIZE(BO, %rax, 2), %xmm5 - movapd %xmm0, %xmm2 - - addq $4 * SIZE, %rax - BRANCH - jl .L52 - ALIGN_4 - -.L56: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L59 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 2), BO - negq %rax - ALIGN_4 - -.L57: - mulpd %xmm1, %xmm0 - mulpd -14 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm8 - movapd -12 * SIZE(AO, %rax, 4), %xmm0 - addpd %xmm1, %xmm12 - movddup -14 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm5, %xmm2 - mulpd -14 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm2, %xmm9 - addpd %xmm5, %xmm13 - movddup -13 * SIZE(BO, %rax, 2), %xmm5 - movapd %xmm0, %xmm2 - - addq $SIZE, %rax - jl .L57 - ALIGN_4 - -.L59: -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $4, %rax -#else - subq $2, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 4), AO - leaq (B, %rax, 2), BO -#endif - -#if defined(LN) || defined(LT) - movapd %xmm8, %xmm0 - unpcklpd %xmm9, %xmm8 - unpckhpd %xmm9, %xmm0 - - movapd %xmm12, %xmm4 - unpcklpd %xmm13, %xmm12 - unpckhpd %xmm13, %xmm4 - - movapd -16 * SIZE(BO), %xmm9 - movapd -14 * SIZE(BO), %xmm13 - movapd -12 * SIZE(BO), %xmm1 - movapd -10 * SIZE(BO), %xmm5 - - subpd %xmm8, %xmm9 - subpd %xmm0, %xmm13 - subpd %xmm12, %xmm1 - subpd %xmm4, %xmm5 -#else - movapd -16 * SIZE(AO), %xmm0 - movapd -14 * SIZE(AO), %xmm1 - movapd -12 * SIZE(AO), %xmm2 - movapd -10 * SIZE(AO), %xmm3 - - subpd %xmm8, %xmm0 - subpd %xmm12, %xmm1 - subpd %xmm9, %xmm2 - subpd %xmm13, %xmm3 -#endif - -#ifdef LN - movddup -1 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm5 - movddup -2 * SIZE(AO), %xmm10 - mulpd %xmm5, %xmm10 - subpd %xmm10, %xmm1 - movddup -3 * SIZE(AO), %xmm12 - mulpd %xmm5, %xmm12 - subpd %xmm12, %xmm13 - movddup -4 * SIZE(AO), %xmm14 - mulpd %xmm5, %xmm14 - subpd %xmm14, %xmm9 - - movddup -6 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm1 - movddup -7 * SIZE(AO), %xmm10 - mulpd %xmm1, %xmm10 - subpd %xmm10, %xmm13 - movddup -8 * SIZE(AO), %xmm12 - mulpd %xmm1, %xmm12 - subpd %xmm12, %xmm9 - - movddup -11 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - movddup -12 * SIZE(AO), %xmm10 - mulpd %xmm13, %xmm10 - subpd %xmm10, %xmm9 - - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 -#endif - -#ifdef LT - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 - movddup -15 * SIZE(AO), %xmm10 - mulpd %xmm9, %xmm10 - subpd %xmm10, %xmm13 - movddup -14 * SIZE(AO), %xmm12 - mulpd %xmm9, %xmm12 - subpd %xmm12, %xmm1 - movddup -13 * SIZE(AO), %xmm14 - mulpd %xmm9, %xmm14 - subpd %xmm14, %xmm5 - - - movddup -11 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - - movddup -10 * SIZE(AO), %xmm10 - mulpd %xmm13, %xmm10 - subpd %xmm10, %xmm1 - movddup -9 * SIZE(AO), %xmm12 - mulpd %xmm13, %xmm12 - subpd %xmm12, %xmm5 - - movddup -6 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm1 - movddup -5 * SIZE(AO), %xmm10 - mulpd %xmm1, %xmm10 - subpd %xmm10, %xmm5 - - movddup -1 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm5 -#endif - -#ifdef RN - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 - mulpd %xmm8, %xmm1 - - movddup -15 * SIZE(BO), %xmm9 - mulpd %xmm0, %xmm9 - subpd %xmm9, %xmm2 - movddup -15 * SIZE(BO), %xmm9 - mulpd %xmm1, %xmm9 - subpd %xmm9, %xmm3 - - movddup -13 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - mulpd %xmm8, %xmm3 -#endif - -#ifdef RT - movddup -13 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - mulpd %xmm8, %xmm3 - - movddup -14 * SIZE(BO), %xmm9 - mulpd %xmm2, %xmm9 - subpd %xmm9, %xmm0 - movddup -14 * SIZE(BO), %xmm9 - mulpd %xmm3, %xmm9 - subpd %xmm9, %xmm1 - - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 - mulpd %xmm8, %xmm1 -#endif - -#ifdef LN - subq $4 * SIZE, CO1 - subq $4 * SIZE, CO2 -#endif - -#if defined(LN) || defined(LT) - movlpd %xmm9, 0 * SIZE(CO1) - movlpd %xmm13, 1 * SIZE(CO1) - movlpd %xmm1, 2 * SIZE(CO1) - movlpd %xmm5, 3 * SIZE(CO1) - - movhpd %xmm9, 0 * SIZE(CO2) - movhpd %xmm13, 1 * SIZE(CO2) - movhpd %xmm1, 2 * SIZE(CO2) - movhpd %xmm5, 3 * SIZE(CO2) -#else - movlpd %xmm0, 0 * SIZE(CO1) - movhpd %xmm0, 1 * SIZE(CO1) - movlpd %xmm1, 2 * SIZE(CO1) - movhpd %xmm1, 3 * SIZE(CO1) - - movlpd %xmm2, 0 * SIZE(CO2) - movhpd %xmm2, 1 * SIZE(CO2) - movlpd %xmm3, 2 * SIZE(CO2) - movhpd %xmm3, 3 * SIZE(CO2) -#endif - -#if defined(LN) || defined(LT) - movaps %xmm9, -16 * SIZE(BO) - movaps %xmm13,-14 * SIZE(BO) - movaps %xmm1, -12 * SIZE(BO) - movaps %xmm5, -10 * SIZE(BO) -#else - movaps %xmm0, -16 * SIZE(AO) - movaps %xmm1, -14 * SIZE(AO) - movaps %xmm2, -12 * SIZE(AO) - movaps %xmm3, -10 * SIZE(AO) -#endif - -#ifndef LN - addq $4 * SIZE, CO1 - addq $4 * SIZE, CO2 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 2), BO -#endif - -#ifdef LN - subq $4, KK -#endif - -#ifdef LT - addq $4, KK -#endif - -#ifdef RT - movq K, %rax - salq $2 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - - decq I # i -- - jg .L51 - ALIGN_4 - -.L60: - testq $2, M - je .L70 - -#ifdef LN - movq K, %rax - salq $1 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (BO, %rax, 2), BO -#endif - - movapd -16 * SIZE(AO), %xmm0 - pxor %xmm8, %xmm8 - movapd -12 * SIZE(AO), %xmm2 - pxor %xmm9, %xmm9 - movddup -16 * SIZE(BO), %xmm1 - pxor %xmm10, %xmm10 - movddup -15 * SIZE(BO), %xmm3 - pxor %xmm11, %xmm11 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 2), BO - negq %rax - NOBRANCH - je .L66 - ALIGN_4 - -.L62: - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm8 - movddup -14 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm0, %xmm3 - movapd -14 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm3, %xmm9 - movddup -13 * SIZE(BO, %rax, 2), %xmm3 - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm10 - movddup -12 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm0, %xmm3 - movapd -8 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm3, %xmm11 - movddup -11 * SIZE(BO, %rax, 2), %xmm3 - mulpd %xmm2, %xmm1 - addpd %xmm1, %xmm8 - movddup -10 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm2, %xmm3 - movapd -10 * SIZE(AO, %rax, 2), %xmm2 - addpd %xmm3, %xmm9 - movddup -9 * SIZE(BO, %rax, 2), %xmm3 - mulpd %xmm2, %xmm1 - addpd %xmm1, %xmm10 - movddup -8 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm2, %xmm3 - movapd -4 * SIZE(AO, %rax, 2), %xmm2 - addpd %xmm3, %xmm11 - movddup -7 * SIZE(BO, %rax, 2), %xmm3 - - addq $4 * SIZE, %rax - BRANCH - jl .L62 - ALIGN_4 - -.L66: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L69 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 2), BO - negq %rax - ALIGN_4 - -.L67: - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm8 - movddup -14 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm0, %xmm3 - movapd -14 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm3, %xmm9 - movddup -13 * SIZE(BO, %rax, 2), %xmm3 - - addq $SIZE, %rax - jl .L67 - ALIGN_4 - -.L69: - addpd %xmm10, %xmm8 - addpd %xmm11, %xmm9 - -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $2, %rax -#else - subq $2, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 2), AO - leaq (B, %rax, 2), BO -#endif - -#if defined(LN) || defined(LT) - movapd %xmm8, %xmm0 - unpcklpd %xmm9, %xmm8 - unpckhpd %xmm9, %xmm0 - - movapd -16 * SIZE(BO), %xmm9 - movapd -14 * SIZE(BO), %xmm13 - - subpd %xmm8, %xmm9 - subpd %xmm0, %xmm13 -#else - movapd -16 * SIZE(AO), %xmm0 - movapd -14 * SIZE(AO), %xmm2 - - subpd %xmm8, %xmm0 - subpd %xmm9, %xmm2 -#endif - - -#ifdef LN - movddup -13 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - - movddup -14 * SIZE(AO), %xmm10 - mulpd %xmm13, %xmm10 - subpd %xmm10, %xmm9 - - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 -#endif - -#ifdef LT - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 - - movddup -15 * SIZE(AO), %xmm10 - mulpd %xmm9, %xmm10 - subpd %xmm10, %xmm13 - - movddup -13 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 -#endif - -#ifdef RN - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 - - movddup -15 * SIZE(BO), %xmm9 - mulpd %xmm0, %xmm9 - subpd %xmm9, %xmm2 - - movddup -13 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 -#endif - -#ifdef RT - movddup -13 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - - movddup -14 * SIZE(BO), %xmm9 - mulpd %xmm2, %xmm9 - subpd %xmm9, %xmm0 - - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 -#endif - -#ifdef LN - subq $2 * SIZE, CO1 - subq $2 * SIZE, CO2 -#endif - -#if defined(LN) || defined(LT) - movlpd %xmm9, 0 * SIZE(CO1) - movlpd %xmm13, 1 * SIZE(CO1) - - movhpd %xmm9, 0 * SIZE(CO2) - movhpd %xmm13, 1 * SIZE(CO2) -#else - movlpd %xmm0, 0 * SIZE(CO1) - movhpd %xmm0, 1 * SIZE(CO1) - - movlpd %xmm2, 0 * SIZE(CO2) - movhpd %xmm2, 1 * SIZE(CO2) -#endif - -#if defined(LN) || defined(LT) - movaps %xmm9, -16 * SIZE(BO) - movaps %xmm13, -14 * SIZE(BO) -#else - movaps %xmm0, -16 * SIZE(AO) - movaps %xmm2, -14 * SIZE(AO) -#endif - -#ifndef LN - addq $2 * SIZE, CO1 - addq $2 * SIZE, CO2 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 2), BO -#endif - -#ifdef LN - subq $2, KK -#endif - -#ifdef LT - addq $2, KK -#endif - -#ifdef RT - movq K, %rax - salq $1 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - ALIGN_4 - -.L70: - testq $1, M - je .L79 - ALIGN_4 - -.L71: -#ifdef LN - movq K, %rax - salq $0 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - salq $1 + BASE_SHIFT, %rax - leaq (BO, %rax, 1), BO -#endif - - movddup -16 * SIZE(AO), %xmm0 - pxor %xmm8, %xmm8 - movddup -15 * SIZE(AO), %xmm1 - pxor %xmm9, %xmm9 - movddup -14 * SIZE(AO), %xmm2 - pxor %xmm10, %xmm10 - movddup -13 * SIZE(AO), %xmm3 - pxor %xmm11, %xmm11 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 2), BO - negq %rax - NOBRANCH - je .L76 - ALIGN_4 - -.L72: - mulpd -16 * SIZE(BO, %rax, 2), %xmm0 - addpd %xmm0, %xmm8 - movddup -12 * SIZE(AO, %rax, 1), %xmm0 - - mulpd -14 * SIZE(BO, %rax, 2), %xmm1 - addpd %xmm1, %xmm9 - movddup -11 * SIZE(AO, %rax, 1), %xmm1 - - mulpd -12 * SIZE(BO, %rax, 2), %xmm2 - addpd %xmm2, %xmm10 - movddup -10 * SIZE(AO, %rax, 1), %xmm2 - - mulpd -10 * SIZE(BO, %rax, 2), %xmm3 - addpd %xmm3, %xmm11 - movddup -9 * SIZE(AO, %rax, 1), %xmm3 - - addq $4 * SIZE, %rax - BRANCH - jl .L72 - ALIGN_4 - -.L76: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L78 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 2), BO - negq %rax - ALIGN_4 - -.L77: - mulpd -16 * SIZE(BO, %rax, 2), %xmm0 - addpd %xmm0, %xmm8 - movddup -15 * SIZE(AO, %rax, 1), %xmm0 - - addq $SIZE, %rax - jl .L77 - ALIGN_4 - -.L78: - addpd %xmm9, %xmm8 - addpd %xmm11, %xmm10 - addpd %xmm10, %xmm8 - -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $1, %rax -#else - subq $2, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 1), AO - leaq (B, %rax, 2), BO -#endif - -#if defined(LN) || defined(LT) - movapd -16 * SIZE(BO), %xmm2 -#else - movapd -16 * SIZE(AO), %xmm2 -#endif - - subpd %xmm8, %xmm2 - -#if defined(LN) || defined(LT) - movddup -16 * SIZE(AO), %xmm0 - - mulpd %xmm0, %xmm2 -#endif - -#ifdef RN - movapd %xmm2, %xmm0 - unpckhpd %xmm0, %xmm0 - - mulsd -16 * SIZE(BO), %xmm2 - movsd -15 * SIZE(BO), %xmm4 - mulsd %xmm2, %xmm4 - subsd %xmm4, %xmm0 - - mulsd -13 * SIZE(BO), %xmm0 - unpcklpd %xmm0, %xmm2 -#endif - -#ifdef RT - movapd %xmm2, %xmm0 - unpckhpd %xmm0, %xmm0 - - mulsd -13 * SIZE(BO), %xmm0 - - movlpd -14 * SIZE(BO), %xmm4 - mulsd %xmm0, %xmm4 - subsd %xmm4, %xmm2 - - mulsd -16 * SIZE(BO), %xmm2 - unpcklpd %xmm0, %xmm2 -#endif - -#ifdef LN - subq $1 * SIZE, CO1 - subq $1 * SIZE, CO2 -#endif - - movlpd %xmm2, 0 * SIZE(CO1) - movhpd %xmm2, 0 * SIZE(CO2) - -#if defined(LN) || defined(LT) - movaps %xmm2, -16 * SIZE(BO) -#else - movaps %xmm2, -16 * SIZE(AO) -#endif - -#ifndef LN - addq $1 * SIZE, CO1 - addq $1 * SIZE, CO2 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 2), BO -#endif - -#ifdef LN - subq $1, KK -#endif - -#ifdef LT - addq $1, KK -#endif - -#ifdef RT - movq K, %rax - salq $0 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - ALIGN_4 - -.L79: -#ifdef LN - leaq (, K, SIZE), %rax - leaq (B, %rax, 2), B -#endif - -#if defined(LT) || defined(RN) - movq BO, B -#endif - -#ifdef RN - addq $2, KK -#endif - -#ifdef RT - subq $2, KK -#endif - ALIGN_4 - -.L80: - movq N, J - sarq $2, J # j = (n >> 2) - jle .L999 - -.L01: -#if defined(LT) || defined(RN) - movq A, AO -#else - movq A, AORIG -#endif - -#ifdef RT - movq K, %rax - salq $2 + BASE_SHIFT, %rax - subq %rax, B - - leaq (, LDC, 4), %rax - subq %rax, C -#endif - - movq C, CO1 # coffset1 = c - leaq (C, LDC, 1), CO2 # coffset2 = c + ldc -#ifndef RT - leaq (C, LDC, 4), C -#endif - -#ifdef LN - movq OFFSET, %rax - addq M, %rax - movq %rax, KK -#endif - - movq K, %rax - salq $BASE_SHIFT + 2, %rax - movq B, BB - subq %rax, BB - -#if defined(LT) - movq OFFSET, %rax - movq %rax, KK -#endif - - movq M, I - sarq $2, I # i = (m >> 2) - jle .L20 - ALIGN_4 - -.L11: -#ifdef LN - movq K, %rax - salq $2 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (BO, %rax, 4), BO -#endif - - movapd -16 * SIZE(AO), %xmm0 - movddup -16 * SIZE(BO), %xmm1 - pxor %xmm8, %xmm8 - movddup -15 * SIZE(BO), %xmm3 - pxor %xmm9, %xmm9 - movapd -8 * SIZE(AO), %xmm4 - pxor %xmm10, %xmm10 - movddup -8 * SIZE(BO), %xmm5 - pxor %xmm11, %xmm11 - -#ifndef LN - prefetchw 3 * SIZE(CO1) - pxor %xmm12, %xmm12 - prefetchw 5 * SIZE(CO2) - pxor %xmm13, %xmm13 - prefetchw 3 * SIZE(CO1, LDC, 2) - pxor %xmm14, %xmm14 - prefetchw 5 * SIZE(CO2, LDC, 2) - pxor %xmm15, %xmm15 - movapd %xmm0, %xmm2 -#else - prefetchw -8 * SIZE(CO1) - pxor %xmm12, %xmm12 - prefetchw -8 * SIZE(CO2) - pxor %xmm13, %xmm13 - prefetchw -8 * SIZE(CO1, LDC, 2) - pxor %xmm14, %xmm14 - prefetchw -8 * SIZE(CO2, LDC, 2) - pxor %xmm15, %xmm15 - movapd %xmm0, %xmm2 -#endif - - subq $-16 * SIZE, BB - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - - andq $-8, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 4), BO - negq %rax - NOBRANCH - je .L15 - vmovups -16 * SIZE(AO, %rax, 4),%xmm6 - vmovddup -16 * SIZE(BO, %rax, 4), %xmm7 - vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 - - ALIGN_4 - -.L12: - - prefetcht0 A_PR1(AO,%rax,4) - prefetcht0 B_PR1(BO,%rax,4) - KERNEL1(16 * 0) - KERNEL2(16 * 0) - prefetcht0 A_PR1+64(AO,%rax,4) - prefetcht0 B_PR1+64(BO,%rax,4) - KERNEL3(16 * 0) - KERNEL4(16 * 0) - prefetcht0 A_PR1+128(AO,%rax,4) - prefetcht0 B_PR1+128(BO,%rax,4) - KERNEL5(16 * 0) - KERNEL6(16 * 0) - prefetcht0 A_PR1+192(AO,%rax,4) - prefetcht0 B_PR1+192(BO,%rax,4) - KERNEL7(16 * 0) - KERNEL8(16 * 0) - - addq $8 * SIZE, %rax - jl .L12 - ALIGN_4 - -.L15: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - testq $4, %rax - je .L16 - xorq %rax, %rax - ALIGN_4 - - KERNEL_SUB1(16 * 0) - KERNEL_SUB2(16 * 0) - KERNEL_SUB3(16 * 0) - KERNEL_SUB4(16 * 0) - - subq $-16 * SIZE, BO - subq $-16 * SIZE, AO - ALIGN_4 - -.L16: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L19 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 4), BO - negq %rax - ALIGN_4 - -.L17: - mulpd %xmm1, %xmm0 - mulpd -14 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm8 - movapd %xmm2, %xmm0 - addpd %xmm1, %xmm12 - movddup -14 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm3, %xmm2 - mulpd -14 * SIZE(AO, %rax, 4), %xmm3 - addpd %xmm2, %xmm9 - movapd %xmm0, %xmm2 - addpd %xmm3, %xmm13 - movddup -13 * SIZE(BO, %rax, 4), %xmm3 - mulpd %xmm1, %xmm0 - mulpd -14 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm10 - movapd -12 * SIZE(AO, %rax, 4), %xmm0 - addpd %xmm1, %xmm14 - movddup -12 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm3, %xmm2 - mulpd -14 * SIZE(AO, %rax, 4), %xmm3 - addpd %xmm2, %xmm11 - addpd %xmm3, %xmm15 - movddup -11 * SIZE(BO, %rax, 4), %xmm3 - movapd %xmm0, %xmm2 - - addq $SIZE, %rax - jl .L17 - ALIGN_4 - -.L19: -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $4, %rax -#else - subq $4, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 4), AO - leaq (B, %rax, 4), BO -#endif - -#if defined(LN) || defined(LT) - movapd %xmm8, %xmm0 - unpcklpd %xmm9, %xmm8 - unpckhpd %xmm9, %xmm0 - - movapd %xmm10, %xmm2 - unpcklpd %xmm11, %xmm10 - unpckhpd %xmm11, %xmm2 - - movapd %xmm12, %xmm4 - unpcklpd %xmm13, %xmm12 - unpckhpd %xmm13, %xmm4 - - movapd %xmm14, %xmm6 - unpcklpd %xmm15, %xmm14 - unpckhpd %xmm15, %xmm6 - - movapd -16 * SIZE(BO), %xmm9 - movapd -14 * SIZE(BO), %xmm11 - movapd -12 * SIZE(BO), %xmm13 - movapd -10 * SIZE(BO), %xmm15 - movapd -8 * SIZE(BO), %xmm1 - movapd -6 * SIZE(BO), %xmm3 - movapd -4 * SIZE(BO), %xmm5 - movapd -2 * SIZE(BO), %xmm7 - - subpd %xmm8, %xmm9 - subpd %xmm10, %xmm11 - subpd %xmm0, %xmm13 - subpd %xmm2, %xmm15 - subpd %xmm12, %xmm1 - subpd %xmm14, %xmm3 - subpd %xmm4, %xmm5 - subpd %xmm6, %xmm7 -#else - movapd -16 * SIZE(AO), %xmm0 - movapd -14 * SIZE(AO), %xmm1 - movapd -12 * SIZE(AO), %xmm2 - movapd -10 * SIZE(AO), %xmm3 - - movapd -8 * SIZE(AO), %xmm4 - movapd -6 * SIZE(AO), %xmm5 - movapd -4 * SIZE(AO), %xmm6 - movapd -2 * SIZE(AO), %xmm7 - - subpd %xmm8, %xmm0 - subpd %xmm12, %xmm1 - subpd %xmm9, %xmm2 - subpd %xmm13, %xmm3 - subpd %xmm10, %xmm4 - subpd %xmm14, %xmm5 - subpd %xmm11, %xmm6 - subpd %xmm15, %xmm7 -#endif - -#ifdef LN - movddup -1 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm5 - mulpd %xmm8, %xmm7 - - movddup -2 * SIZE(AO), %xmm10 - mulpd %xmm5, %xmm10 - subpd %xmm10, %xmm1 - movddup -2 * SIZE(AO), %xmm10 - mulpd %xmm7, %xmm10 - subpd %xmm10, %xmm3 - - movddup -3 * SIZE(AO), %xmm12 - mulpd %xmm5, %xmm12 - subpd %xmm12, %xmm13 - movddup -3 * SIZE(AO), %xmm12 - mulpd %xmm7, %xmm12 - subpd %xmm12, %xmm15 - - movddup -4 * SIZE(AO), %xmm14 - mulpd %xmm5, %xmm14 - subpd %xmm14, %xmm9 - movddup -4 * SIZE(AO), %xmm14 - mulpd %xmm7, %xmm14 - subpd %xmm14, %xmm11 - - movddup -6 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm1 - mulpd %xmm8, %xmm3 - - movddup -7 * SIZE(AO), %xmm10 - mulpd %xmm1, %xmm10 - subpd %xmm10, %xmm13 - movddup -7 * SIZE(AO), %xmm10 - mulpd %xmm3, %xmm10 - subpd %xmm10, %xmm15 - - movddup -8 * SIZE(AO), %xmm12 - mulpd %xmm1, %xmm12 - subpd %xmm12, %xmm9 - movddup -8 * SIZE(AO), %xmm12 - mulpd %xmm3, %xmm12 - subpd %xmm12, %xmm11 - - movddup -11 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - mulpd %xmm8, %xmm15 - - movddup -12 * SIZE(AO), %xmm10 - mulpd %xmm13, %xmm10 - subpd %xmm10, %xmm9 - movddup -12 * SIZE(AO), %xmm10 - mulpd %xmm15, %xmm10 - subpd %xmm10, %xmm11 - - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 - mulpd %xmm8, %xmm11 -#endif - -#ifdef LT - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 - mulpd %xmm8, %xmm11 - - movddup -15 * SIZE(AO), %xmm10 - mulpd %xmm9, %xmm10 - subpd %xmm10, %xmm13 - - movddup -15 * SIZE(AO), %xmm10 - mulpd %xmm11, %xmm10 - subpd %xmm10, %xmm15 - - movddup -14 * SIZE(AO), %xmm12 - mulpd %xmm9, %xmm12 - subpd %xmm12, %xmm1 - movddup -14 * SIZE(AO), %xmm12 - mulpd %xmm11, %xmm12 - subpd %xmm12, %xmm3 - - movddup -13 * SIZE(AO), %xmm14 - mulpd %xmm9, %xmm14 - subpd %xmm14, %xmm5 - movddup -13 * SIZE(AO), %xmm14 - mulpd %xmm11, %xmm14 - subpd %xmm14, %xmm7 - - movddup -11 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - mulpd %xmm8, %xmm15 - - movddup -10 * SIZE(AO), %xmm10 - mulpd %xmm13, %xmm10 - subpd %xmm10, %xmm1 - movddup -10 * SIZE(AO), %xmm10 - mulpd %xmm15, %xmm10 - subpd %xmm10, %xmm3 - - movddup -9 * SIZE(AO), %xmm12 - mulpd %xmm13, %xmm12 - subpd %xmm12, %xmm5 - movddup -9 * SIZE(AO), %xmm12 - mulpd %xmm15, %xmm12 - subpd %xmm12, %xmm7 - - movddup -6 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm1 - mulpd %xmm8, %xmm3 - - movddup -5 * SIZE(AO), %xmm10 - mulpd %xmm1, %xmm10 - subpd %xmm10, %xmm5 - movddup -5 * SIZE(AO), %xmm10 - mulpd %xmm3, %xmm10 - subpd %xmm10, %xmm7 - - movddup -1 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm5 - mulpd %xmm8, %xmm7 -#endif - -#ifdef RN - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 - mulpd %xmm8, %xmm1 - - movddup -15 * SIZE(BO), %xmm9 - mulpd %xmm0, %xmm9 - subpd %xmm9, %xmm2 - movddup -15 * SIZE(BO), %xmm9 - mulpd %xmm1, %xmm9 - subpd %xmm9, %xmm3 - - movddup -14 * SIZE(BO), %xmm10 - mulpd %xmm0, %xmm10 - subpd %xmm10, %xmm4 - movddup -14 * SIZE(BO), %xmm10 - mulpd %xmm1, %xmm10 - subpd %xmm10, %xmm5 - - movddup -13 * SIZE(BO), %xmm11 - mulpd %xmm0, %xmm11 - subpd %xmm11, %xmm6 - movddup -13 * SIZE(BO), %xmm11 - mulpd %xmm1, %xmm11 - subpd %xmm11, %xmm7 - - movddup -11 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - mulpd %xmm8, %xmm3 - - movddup -10 * SIZE(BO), %xmm9 - mulpd %xmm2, %xmm9 - subpd %xmm9, %xmm4 - movddup -10 * SIZE(BO), %xmm9 - mulpd %xmm3, %xmm9 - subpd %xmm9, %xmm5 - - movddup -9 * SIZE(BO), %xmm10 - mulpd %xmm2, %xmm10 - subpd %xmm10, %xmm6 - movddup -9 * SIZE(BO), %xmm10 - mulpd %xmm3, %xmm10 - subpd %xmm10, %xmm7 - - movddup -6 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm4 - mulpd %xmm8, %xmm5 - - movddup -5 * SIZE(BO), %xmm9 - mulpd %xmm4, %xmm9 - subpd %xmm9, %xmm6 - movddup -5 * SIZE(BO), %xmm9 - mulpd %xmm5, %xmm9 - subpd %xmm9, %xmm7 - - movddup -1 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm6 - mulpd %xmm8, %xmm7 -#endif - -#ifdef RT - movddup -1 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm6 - mulpd %xmm8, %xmm7 - - movddup -2 * SIZE(BO), %xmm9 - mulpd %xmm6, %xmm9 - subpd %xmm9, %xmm4 - movddup -2 * SIZE(BO), %xmm9 - mulpd %xmm7, %xmm9 - subpd %xmm9, %xmm5 - - movddup -3 * SIZE(BO), %xmm10 - mulpd %xmm6, %xmm10 - subpd %xmm10, %xmm2 - movddup -3 * SIZE(BO), %xmm10 - mulpd %xmm7, %xmm10 - subpd %xmm10, %xmm3 - - movddup -4 * SIZE(BO), %xmm11 - mulpd %xmm6, %xmm11 - subpd %xmm11, %xmm0 - movddup -4 * SIZE(BO), %xmm11 - mulpd %xmm7, %xmm11 - subpd %xmm11, %xmm1 - - movddup -6 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm4 - mulpd %xmm8, %xmm5 - - movddup -7 * SIZE(BO), %xmm9 - mulpd %xmm4, %xmm9 - subpd %xmm9, %xmm2 - movddup -7 * SIZE(BO), %xmm9 - mulpd %xmm5, %xmm9 - subpd %xmm9, %xmm3 - - movddup -8 * SIZE(BO), %xmm10 - mulpd %xmm4, %xmm10 - subpd %xmm10, %xmm0 - movddup -8 * SIZE(BO), %xmm10 - mulpd %xmm5, %xmm10 - subpd %xmm10, %xmm1 - - movddup -11 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - mulpd %xmm8, %xmm3 - - movddup -12 * SIZE(BO), %xmm9 - mulpd %xmm2, %xmm9 - subpd %xmm9, %xmm0 - movddup -12 * SIZE(BO), %xmm9 - mulpd %xmm3, %xmm9 - subpd %xmm9, %xmm1 - - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 - mulpd %xmm8, %xmm1 -#endif - -#ifdef LN - subq $4 * SIZE, CO1 - subq $4 * SIZE, CO2 -#endif - -#if defined(LN) || defined(LT) - movlpd %xmm9, 0 * SIZE(CO1) - movlpd %xmm13, 1 * SIZE(CO1) - movlpd %xmm1, 2 * SIZE(CO1) - movlpd %xmm5, 3 * SIZE(CO1) - - movhpd %xmm9, 0 * SIZE(CO2) - movhpd %xmm13, 1 * SIZE(CO2) - movhpd %xmm1, 2 * SIZE(CO2) - movhpd %xmm5, 3 * SIZE(CO2) - - movlpd %xmm11, 0 * SIZE(CO1, LDC, 2) - movlpd %xmm15, 1 * SIZE(CO1, LDC, 2) - movlpd %xmm3, 2 * SIZE(CO1, LDC, 2) - movlpd %xmm7, 3 * SIZE(CO1, LDC, 2) - - movhpd %xmm11, 0 * SIZE(CO2, LDC, 2) - movhpd %xmm15, 1 * SIZE(CO2, LDC, 2) - movhpd %xmm3, 2 * SIZE(CO2, LDC, 2) - movhpd %xmm7, 3 * SIZE(CO2, LDC, 2) -#else - movlpd %xmm0, 0 * SIZE(CO1) - movhpd %xmm0, 1 * SIZE(CO1) - movlpd %xmm1, 2 * SIZE(CO1) - movhpd %xmm1, 3 * SIZE(CO1) - - movlpd %xmm2, 0 * SIZE(CO2) - movhpd %xmm2, 1 * SIZE(CO2) - movlpd %xmm3, 2 * SIZE(CO2) - movhpd %xmm3, 3 * SIZE(CO2) - - movlpd %xmm4, 0 * SIZE(CO1, LDC, 2) - movhpd %xmm4, 1 * SIZE(CO1, LDC, 2) - movlpd %xmm5, 2 * SIZE(CO1, LDC, 2) - movhpd %xmm5, 3 * SIZE(CO1, LDC, 2) - - movlpd %xmm6, 0 * SIZE(CO2, LDC, 2) - movhpd %xmm6, 1 * SIZE(CO2, LDC, 2) - movlpd %xmm7, 2 * SIZE(CO2, LDC, 2) - movhpd %xmm7, 3 * SIZE(CO2, LDC, 2) -#endif - -#if defined(LN) || defined(LT) - movaps %xmm9, -16 * SIZE(BO) - movaps %xmm11, -14 * SIZE(BO) - movaps %xmm13, -12 * SIZE(BO) - movaps %xmm15, -10 * SIZE(BO) - movaps %xmm1, -8 * SIZE(BO) - movaps %xmm3, -6 * SIZE(BO) - movaps %xmm5, -4 * SIZE(BO) - movaps %xmm7, -2 * SIZE(BO) -#else - movaps %xmm0, -16 * SIZE(AO) - movaps %xmm1, -14 * SIZE(AO) - movaps %xmm2, -12 * SIZE(AO) - movaps %xmm3, -10 * SIZE(AO) - movaps %xmm4, -8 * SIZE(AO) - movaps %xmm5, -6 * SIZE(AO) - movaps %xmm6, -4 * SIZE(AO) - movaps %xmm7, -2 * SIZE(AO) -#endif - -#ifndef LN - addq $4 * SIZE, CO1 - addq $4 * SIZE, CO2 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 4), BO -#endif - -#ifdef LN - subq $4, KK -#endif - -#ifdef LT - addq $4, KK -#endif - -#ifdef RT - movq K, %rax - salq $2 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - - decq I # i -- - jg .L11 - ALIGN_4 - -.L20: - testq $3, M - je .L39 - - testq $2, M - je .L30 - ALIGN_4 - -.L21: -#ifdef LN - movq K, %rax - salq $1 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (BO, %rax, 4), BO -#endif - - movapd -16 * SIZE(AO), %xmm0 - pxor %xmm8, %xmm8 - movapd -12 * SIZE(AO), %xmm2 - pxor %xmm9, %xmm9 - movddup -16 * SIZE(BO), %xmm1 - pxor %xmm10, %xmm10 - movddup -15 * SIZE(BO), %xmm5 - pxor %xmm11, %xmm11 - movddup -8 * SIZE(BO), %xmm3 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 4), BO - negq %rax - NOBRANCH - je .L26 - ALIGN_4 - -.L22: - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm8 - movddup -14 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm0, %xmm5 - addpd %xmm5, %xmm9 - movddup -13 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm10 - movddup -12 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm0, %xmm5 - movapd -14 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm5, %xmm11 - movddup -11 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm8 - movddup -10 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm0, %xmm5 - addpd %xmm5, %xmm9 - movddup -9 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm10 - movddup (BO, %rax, 4), %xmm1 - mulpd %xmm0, %xmm5 - movapd -8 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm5, %xmm11 - movddup -7 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm2, %xmm3 - addpd %xmm3, %xmm8 - movddup -6 * SIZE(BO, %rax, 4), %xmm3 - mulpd %xmm2, %xmm5 - addpd %xmm5, %xmm9 - movddup -5 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm2, %xmm3 - addpd %xmm3, %xmm10 - movddup -4 * SIZE(BO, %rax, 4), %xmm3 - mulpd %xmm2, %xmm5 - movapd -10 * SIZE(AO, %rax, 2), %xmm2 - addpd %xmm5, %xmm11 - movddup -3 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm2, %xmm3 - addpd %xmm3, %xmm8 - movddup -2 * SIZE(BO, %rax, 4), %xmm3 - mulpd %xmm2, %xmm5 - addpd %xmm5, %xmm9 - movddup -1 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm2, %xmm3 - addpd %xmm3, %xmm10 - movddup 8 * SIZE(BO, %rax, 4), %xmm3 - mulpd %xmm2, %xmm5 - movapd -4 * SIZE(AO, %rax, 2), %xmm2 - addpd %xmm5, %xmm11 - movddup 1 * SIZE(BO, %rax, 4), %xmm5 - - addq $4 * SIZE, %rax - BRANCH - jl .L22 - ALIGN_4 - -.L26: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L29 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 4), BO - negq %rax - ALIGN_4 - -.L27: - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm8 - movddup -14 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm0, %xmm5 - addpd %xmm5, %xmm9 - movddup -13 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm10 - movddup -12 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm0, %xmm5 - movapd -14 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm5, %xmm11 - movddup -11 * SIZE(BO, %rax, 4), %xmm5 - - addq $SIZE, %rax - jl .L27 - ALIGN_4 - -.L29: -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $2, %rax -#else - subq $4, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 2), AO - leaq (B, %rax, 4), BO -#endif - -#if defined(LN) || defined(LT) - movapd %xmm8, %xmm0 - unpcklpd %xmm9, %xmm8 - unpckhpd %xmm9, %xmm0 - - movapd %xmm10, %xmm2 - unpcklpd %xmm11, %xmm10 - unpckhpd %xmm11, %xmm2 - - movapd -16 * SIZE(BO), %xmm9 - movapd -14 * SIZE(BO), %xmm11 - movapd -12 * SIZE(BO), %xmm13 - movapd -10 * SIZE(BO), %xmm15 - - subpd %xmm8, %xmm9 - subpd %xmm10, %xmm11 - subpd %xmm0, %xmm13 - subpd %xmm2, %xmm15 -#else - movapd -16 * SIZE(AO), %xmm0 - movapd -14 * SIZE(AO), %xmm2 - movapd -12 * SIZE(AO), %xmm4 - movapd -10 * SIZE(AO), %xmm6 - - subpd %xmm8, %xmm0 - subpd %xmm9, %xmm2 - subpd %xmm10, %xmm4 - subpd %xmm11, %xmm6 -#endif - -#ifdef LN - movddup -13 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - mulpd %xmm8, %xmm15 - - movddup -14 * SIZE(AO), %xmm10 - mulpd %xmm13, %xmm10 - subpd %xmm10, %xmm9 - movddup -14 * SIZE(AO), %xmm10 - mulpd %xmm15, %xmm10 - subpd %xmm10, %xmm11 - - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 - mulpd %xmm8, %xmm11 -#endif - -#ifdef LT - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 - mulpd %xmm8, %xmm11 - - movddup -15 * SIZE(AO), %xmm10 - mulpd %xmm9, %xmm10 - subpd %xmm10, %xmm13 - movddup -15 * SIZE(AO), %xmm10 - mulpd %xmm11, %xmm10 - subpd %xmm10, %xmm15 - - movddup -13 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - mulpd %xmm8, %xmm15 -#endif - -#ifdef RN - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 - - movddup -15 * SIZE(BO), %xmm9 - mulpd %xmm0, %xmm9 - subpd %xmm9, %xmm2 - movddup -14 * SIZE(BO), %xmm10 - mulpd %xmm0, %xmm10 - subpd %xmm10, %xmm4 - movddup -13 * SIZE(BO), %xmm11 - mulpd %xmm0, %xmm11 - subpd %xmm11, %xmm6 - - movddup -11 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - movddup -10 * SIZE(BO), %xmm9 - mulpd %xmm2, %xmm9 - subpd %xmm9, %xmm4 - movddup -9 * SIZE(BO), %xmm10 - mulpd %xmm2, %xmm10 - subpd %xmm10, %xmm6 - - movddup -6 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm4 - - movddup -5 * SIZE(BO), %xmm9 - mulpd %xmm4, %xmm9 - subpd %xmm9, %xmm6 - - movddup -1 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm6 -#endif - -#ifdef RT - movddup -1 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm6 - - movddup -2 * SIZE(BO), %xmm9 - mulpd %xmm6, %xmm9 - subpd %xmm9, %xmm4 - movddup -3 * SIZE(BO), %xmm10 - mulpd %xmm6, %xmm10 - subpd %xmm10, %xmm2 - movddup -4 * SIZE(BO), %xmm11 - mulpd %xmm6, %xmm11 - subpd %xmm11, %xmm0 - - movddup -6 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm4 - movddup -7 * SIZE(BO), %xmm9 - mulpd %xmm4, %xmm9 - subpd %xmm9, %xmm2 - movddup -8 * SIZE(BO), %xmm10 - mulpd %xmm4, %xmm10 - subpd %xmm10, %xmm0 - - movddup -11 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - movddup -12 * SIZE(BO), %xmm9 - mulpd %xmm2, %xmm9 - subpd %xmm9, %xmm0 - - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 -#endif - -#ifdef LN - subq $2 * SIZE, CO1 - subq $2 * SIZE, CO2 -#endif - -#if defined(LN) || defined(LT) - movlpd %xmm9, 0 * SIZE(CO1) - movlpd %xmm13, 1 * SIZE(CO1) - - movhpd %xmm9, 0 * SIZE(CO2) - movhpd %xmm13, 1 * SIZE(CO2) - - movlpd %xmm11, 0 * SIZE(CO1, LDC, 2) - movlpd %xmm15, 1 * SIZE(CO1, LDC, 2) - - movhpd %xmm11, 0 * SIZE(CO2, LDC, 2) - movhpd %xmm15, 1 * SIZE(CO2, LDC, 2) -#else - movlpd %xmm0, 0 * SIZE(CO1) - movhpd %xmm0, 1 * SIZE(CO1) - - movlpd %xmm2, 0 * SIZE(CO2) - movhpd %xmm2, 1 * SIZE(CO2) - - movlpd %xmm4, 0 * SIZE(CO1, LDC, 2) - movhpd %xmm4, 1 * SIZE(CO1, LDC, 2) - - movlpd %xmm6, 0 * SIZE(CO2, LDC, 2) - movhpd %xmm6, 1 * SIZE(CO2, LDC, 2) -#endif - -#if defined(LN) || defined(LT) - movaps %xmm9, -16 * SIZE(BO) - movaps %xmm11, -14 * SIZE(BO) - movaps %xmm13, -12 * SIZE(BO) - movaps %xmm15, -10 * SIZE(BO) -#else - movaps %xmm0, -16 * SIZE(AO) - movaps %xmm2, -14 * SIZE(AO) - movaps %xmm4, -12 * SIZE(AO) - movaps %xmm6, -10 * SIZE(AO) -#endif - -#ifndef LN - addq $2 * SIZE, CO1 - addq $2 * SIZE, CO2 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 4), BO -#endif - -#ifdef LN - subq $2, KK -#endif - -#ifdef LT - addq $2, KK -#endif - -#ifdef RT - movq K, %rax - salq $1 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - ALIGN_4 - -.L30: - testq $1, M - je .L39 - -#ifdef LN - movq K, %rax - salq $0 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (BO, %rax, 4), BO -#endif - - movddup -16 * SIZE(AO), %xmm0 - pxor %xmm8, %xmm8 - movddup -14 * SIZE(AO), %xmm2 - pxor %xmm9, %xmm9 - movddup -15 * SIZE(AO), %xmm4 - pxor %xmm10, %xmm10 - movapd -16 * SIZE(BO), %xmm1 - pxor %xmm11, %xmm11 - movapd -8 * SIZE(BO), %xmm3 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 4), BO - negq %rax - NOBRANCH - je .L36 - ALIGN_4 - -.L32: - mulpd %xmm0, %xmm1 - mulpd -14 * SIZE(BO, %rax, 4), %xmm0 - addpd %xmm1, %xmm8 - movapd -12 * SIZE(BO, %rax, 4), %xmm1 - addpd %xmm0, %xmm9 - movddup -12 * SIZE(AO, %rax, 1), %xmm0 - mulpd %xmm4, %xmm1 - mulpd -10 * SIZE(BO, %rax, 4), %xmm4 - addpd %xmm1, %xmm10 - movapd (BO, %rax, 4), %xmm1 - addpd %xmm4, %xmm11 - movddup -11 * SIZE(AO, %rax, 1), %xmm4 - mulpd %xmm2, %xmm3 - mulpd -6 * SIZE(BO, %rax, 4), %xmm2 - addpd %xmm3, %xmm8 - movapd -4 * SIZE(BO, %rax, 4), %xmm3 - addpd %xmm2, %xmm9 - movddup -13 * SIZE(AO, %rax, 1), %xmm2 - mulpd %xmm2, %xmm3 - mulpd -2 * SIZE(BO, %rax, 4), %xmm2 - addpd %xmm3, %xmm10 - movapd 8 * SIZE(BO, %rax, 4), %xmm3 - addpd %xmm2, %xmm11 - movddup -10 * SIZE(AO, %rax, 1), %xmm2 - - addq $4 * SIZE, %rax - BRANCH - jl .L32 - ALIGN_4 - -.L36: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L38 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 4), BO - negq %rax - ALIGN_4 - -.L37: - mulpd %xmm0, %xmm1 - mulpd -14 * SIZE(BO, %rax, 4), %xmm0 - addpd %xmm1, %xmm8 - movapd -12 * SIZE(BO, %rax, 4), %xmm1 - addpd %xmm0, %xmm9 - movddup -15 * SIZE(AO, %rax, 1), %xmm0 - - addq $SIZE, %rax - jl .L37 - ALIGN_4 - -.L38: - addpd %xmm10, %xmm8 - addpd %xmm11, %xmm9 - -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $1, %rax -#else - subq $4, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 1), AO - leaq (B, %rax, 4), BO -#endif - -#if defined(LN) || defined(LT) - movapd -16 * SIZE(BO), %xmm2 - movapd -14 * SIZE(BO), %xmm3 - - subpd %xmm8, %xmm2 - subpd %xmm9, %xmm3 -#else - movapd -16 * SIZE(AO), %xmm2 - movapd -14 * SIZE(AO), %xmm3 - - subpd %xmm8, %xmm2 - subpd %xmm9, %xmm3 -#endif - -#if defined(LN) || defined(LT) - movddup -16 * SIZE(AO), %xmm0 - mulpd %xmm0, %xmm2 - mulpd %xmm0, %xmm3 -#endif - -#ifdef RN - movapd %xmm2, %xmm0 - unpckhpd %xmm0, %xmm0 - - movapd %xmm3, %xmm1 - unpckhpd %xmm1, %xmm1 - - movsd -16 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm2 - - movsd -15 * SIZE(BO), %xmm5 - mulsd %xmm2, %xmm5 - subsd %xmm5, %xmm0 - movsd -14 * SIZE(BO), %xmm6 - mulsd %xmm2, %xmm6 - subsd %xmm6, %xmm3 - movsd -13 * SIZE(BO), %xmm7 - mulsd %xmm2, %xmm7 - subsd %xmm7, %xmm1 - - movsd -11 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm0 - - movsd -10 * SIZE(BO), %xmm5 - mulsd %xmm0, %xmm5 - subsd %xmm5, %xmm3 - movsd -9 * SIZE(BO), %xmm6 - mulsd %xmm0, %xmm6 - subsd %xmm6, %xmm1 - - movsd -6 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm3 - - movsd -5 * SIZE(BO), %xmm5 - mulsd %xmm3, %xmm5 - subsd %xmm5, %xmm1 - - movsd -1 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm1 - - unpcklpd %xmm0, %xmm2 - unpcklpd %xmm1, %xmm3 -#endif - -#ifdef RT - movapd %xmm2, %xmm0 - unpckhpd %xmm0, %xmm0 - - movapd %xmm3, %xmm1 - unpckhpd %xmm1, %xmm1 - - movsd -1 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm1 - - movsd -2 * SIZE(BO), %xmm5 - mulsd %xmm1, %xmm5 - subsd %xmm5, %xmm3 - movsd -3 * SIZE(BO), %xmm6 - mulsd %xmm1, %xmm6 - subsd %xmm6, %xmm0 - movsd -4 * SIZE(BO), %xmm7 - mulsd %xmm1, %xmm7 - subsd %xmm7, %xmm2 - - movsd -6 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm3 - - movsd -7 * SIZE(BO), %xmm5 - mulsd %xmm3, %xmm5 - subsd %xmm5, %xmm0 - movsd -8 * SIZE(BO), %xmm6 - mulsd %xmm3, %xmm6 - subsd %xmm6, %xmm2 - - movsd -11 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm0 - - movsd -12 * SIZE(BO), %xmm5 - mulsd %xmm0, %xmm5 - subsd %xmm5, %xmm2 - - movsd -16 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm2 - - unpcklpd %xmm0, %xmm2 - unpcklpd %xmm1, %xmm3 - -#endif - -#ifdef LN - subq $1 * SIZE, CO1 - subq $1 * SIZE, CO2 -#endif - -#if defined(LN) || defined(LT) - movlpd %xmm2, 0 * SIZE(CO1) - movhpd %xmm2, 0 * SIZE(CO2) - movlpd %xmm3, 0 * SIZE(CO1, LDC, 2) - movhpd %xmm3, 0 * SIZE(CO2, LDC, 2) -#else - movlpd %xmm2, 0 * SIZE(CO1) - movhpd %xmm2, 0 * SIZE(CO2) - movlpd %xmm3, 0 * SIZE(CO1, LDC, 2) - movhpd %xmm3, 0 * SIZE(CO2, LDC, 2) -#endif - -#if defined(LN) || defined(LT) - movaps %xmm2, -16 * SIZE(BO) - movaps %xmm3, -14 * SIZE(BO) -#else - movaps %xmm2, -16 * SIZE(AO) - movaps %xmm3, -14 * SIZE(AO) -#endif - -#ifndef LN - addq $1 * SIZE, CO1 - addq $1 * SIZE, CO2 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 4), BO -#endif - -#ifdef LN - subq $1, KK -#endif - -#ifdef LT - addq $1, KK -#endif - -#ifdef RT - movq K, %rax - salq $0 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - ALIGN_4 - -.L39: -#ifdef LN - leaq (, K, SIZE), %rax - leaq (B, %rax, 4), B -#endif - -#if defined(LT) || defined(RN) - movq BO, B -#endif - -#ifdef RN - addq $4, KK -#endif - -#ifdef RT - subq $4, KK -#endif - - decq J # j -- - jg .L01 - ALIGN_4 - -.L999: - movq (%rsp), %rbx - movq 8(%rsp), %rbp - movq 16(%rsp), %r12 - movq 24(%rsp), %r13 - movq 32(%rsp), %r14 - movq 40(%rsp), %r15 - -#ifdef WINDOWS_ABI - movq 48(%rsp), %rdi - movq 56(%rsp), %rsi - movups 64(%rsp), %xmm6 - movups 80(%rsp), %xmm7 - movups 96(%rsp), %xmm8 - movups 112(%rsp), %xmm9 - movups 128(%rsp), %xmm10 - movups 144(%rsp), %xmm11 - movups 160(%rsp), %xmm12 - movups 176(%rsp), %xmm13 - movups 192(%rsp), %xmm14 - movups 208(%rsp), %xmm15 -#endif - - addq $STACKSIZE, %rsp - ret - - EPILOGUE From 69aa6c8fb1056d41b2217696baae618dd49313ea Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sun, 28 Apr 2013 11:14:23 +0200 Subject: [PATCH 06/15] bad performance with some data --- kernel/x86_64/KERNEL.BULLDOZER | 4 +- kernel/x86_64/trsm_kernel_LT_4x4_bulldozer.S | 3263 ------------------ 2 files changed, 2 insertions(+), 3265 deletions(-) delete mode 100644 kernel/x86_64/trsm_kernel_LT_4x4_bulldozer.S diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index e0b8a71e4..2ac035fe0 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -44,8 +44,8 @@ STRSMKERNEL_RN = trsm_kernel_LT_8x4_sse.S STRSMKERNEL_RT = trsm_kernel_RT_8x4_sse.S DTRSMKERNEL_LN = trsm_kernel_LN_4x4_barcelona.S -DTRSMKERNEL_LT = trsm_kernel_LT_4x4_bulldozer.S -DTRSMKERNEL_RN = trsm_kernel_LT_4x4_bulldozer.S +DTRSMKERNEL_LT = trsm_kernel_LT_4x4_barcelona.S +DTRSMKERNEL_RN = trsm_kernel_LT_4x4_barcelona.S DTRSMKERNEL_RT = trsm_kernel_RT_4x4_barcelona.S CTRSMKERNEL_LN = ztrsm_kernel_LN_4x2_sse.S diff --git a/kernel/x86_64/trsm_kernel_LT_4x4_bulldozer.S b/kernel/x86_64/trsm_kernel_LT_4x4_bulldozer.S deleted file mode 100644 index 5f3f8f7f8..000000000 --- a/kernel/x86_64/trsm_kernel_LT_4x4_bulldozer.S +++ /dev/null @@ -1,3263 +0,0 @@ -/*********************************************************************/ -/* Copyright 2009, 2010 The University of Texas at Austin. */ -/* All rights reserved. */ -/* */ -/* Redistribution and use in source and binary forms, with or */ -/* without modification, are permitted provided that the following */ -/* conditions are met: */ -/* */ -/* 1. Redistributions of source code must retain the above */ -/* copyright notice, this list of conditions and the following */ -/* disclaimer. */ -/* */ -/* 2. Redistributions in binary form must reproduce the above */ -/* copyright notice, this list of conditions and the following */ -/* disclaimer in the documentation and/or other materials */ -/* provided with the distribution. */ -/* */ -/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ -/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ -/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ -/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ -/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ -/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ -/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ -/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ -/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ -/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ -/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ -/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ -/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ -/* POSSIBILITY OF SUCH DAMAGE. */ -/* */ -/* The views and conclusions contained in the software and */ -/* documentation are those of the authors and should not be */ -/* interpreted as representing official policies, either expressed */ -/* or implied, of The University of Texas at Austin. */ -/*********************************************************************/ - -#define ASSEMBLER -#include "common.h" - -#define OLD_M %rdi -#define OLD_N %rsi -#define M %r13 -#define N %r14 -#define K %rdx - -#define A %rcx -#define B %r8 -#define C %r9 -#define LDC %r10 - -#define I %r11 -#define AO %rdi -#define BO %rsi -#define CO1 %r15 -#define CO2 %r12 -#define BB %rbp -#define J %rbx - -#ifndef WINDOWS_ABI - -#define STACKSIZE 96 - -#define OFFSET 48(%rsp) -#define AORIG 56(%rsp) -#define KK 64(%rsp) -#define KKK 72(%rsp) - -#else - -#define STACKSIZE 256 - -#define OLD_A 40 + STACKSIZE(%rsp) -#define OLD_B 48 + STACKSIZE(%rsp) -#define OLD_C 56 + STACKSIZE(%rsp) -#define OLD_LDC 64 + STACKSIZE(%rsp) -#define OLD_OFFSET 72 + STACKSIZE(%rsp) - -#define OFFSET 224(%rsp) -#define AORIG 232(%rsp) -#define KK 240(%rsp) -#define KKK 248(%rsp) - -#endif - - -#define movlpd movsd -#define movapd movups -#define movupd movups - -#define A_PR1 224 -#define B_PR1 224 - -#define KERNEL1(xx) \ - vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6,%xmm7,%xmm8 ;\ - vmovddup -14 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2,%xmm7,%xmm12 ;\ - vmovddup -13 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\ - vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups -12 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\ - -#define KERNEL2(xx) \ - vmovups -10 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup -10 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -9 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL3(xx) \ - vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8, %xmm6, %xmm7, %xmm8 ;\ - vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ - vmovddup -5 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -4 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups -4 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\ - -#define KERNEL4(xx) \ - vmovups -2 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ - vmovups (AO, %rax, 4), %xmm6 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1 ,%xmm12;\ - vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -1 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vmovddup (BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL5(xx) \ - vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ - vmovddup 2 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ - vmovddup 3 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups 4 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ - -#define KERNEL6(xx) \ - vmovups 6 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ - vmovups 8 * SIZE(AO, %rax, 4), %xmm6 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup 6 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup 7 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vmovddup 8 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL7(xx) \ - vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ - vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ - vmovddup 11 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup 12 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups 12 * SIZE(AO, %rax, 4), %xmm0 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ - -#define KERNEL8(xx) \ - vmovups 14 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ - vmovups 16 * SIZE(AO, %rax, 4),%xmm6 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ - vmovddup 15 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vmovddup 16 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL_SUB1(xx) \ - vmovups -16 * SIZE(AO),%xmm0 ;\ - vmovups -14 * SIZE(AO),%xmm2 ;\ - vmovddup -16 * SIZE(BO), %xmm1 ;\ - vmovddup -15 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm12, %xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ - vmovddup -14 * SIZE(BO), %xmm1 ;\ - vmovddup -13 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm10, %xmm0, %xmm1,%xmm10 ;\ - vfmaddpd %xmm11, %xmm0, %xmm3,%xmm11 ;\ - vfmaddpd %xmm14, %xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm15, %xmm2, %xmm3,%xmm15 ;\ - - -#define KERNEL_SUB2(xx) \ - vmovups -12 * SIZE(AO), %xmm0 ;\ - vmovups -10 * SIZE(AO), %xmm2 ;\ - vmovddup -12 * SIZE(BO), %xmm1 ;\ - vmovddup -11 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -10 * SIZE(BO), %xmm1 ;\ - vmovddup -9 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ - vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ - vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ - -#define KERNEL_SUB3(xx) \ - vmovups -8 * SIZE(AO),%xmm0 ;\ - vmovups -6 * SIZE(AO),%xmm2 ;\ - vmovddup -8 * SIZE(BO), %xmm1 ;\ - vmovddup -7 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -6 * SIZE(BO), %xmm1 ;\ - vmovddup -5 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ - vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ - vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ - -#define KERNEL_SUB4(xx) \ - vmovups -4 * SIZE(AO), %xmm0 ;\ - vmovups -2 * SIZE(AO), %xmm2 ;\ - vmovddup -4 * SIZE(BO), %xmm1 ;\ - vmovddup -3 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -2 * SIZE(BO), %xmm1 ;\ - vmovddup -1 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ - vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ - vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ - vmovups (AO), %xmm0 ;\ - vmovddup (BO), %xmm1 ;\ - vmovddup 1 * SIZE(BO), %xmm3 ;\ - vmovaps %xmm0, %xmm2 - - - - - PROLOGUE - PROFCODE - - subq $STACKSIZE, %rsp - movq %rbx, (%rsp) - movq %rbp, 8(%rsp) - movq %r12, 16(%rsp) - movq %r13, 24(%rsp) - movq %r14, 32(%rsp) - movq %r15, 40(%rsp) - -#ifdef WINDOWS_ABI - movq %rdi, 48(%rsp) - movq %rsi, 56(%rsp) - movups %xmm6, 64(%rsp) - movups %xmm7, 80(%rsp) - movups %xmm8, 96(%rsp) - movups %xmm9, 112(%rsp) - movups %xmm10, 128(%rsp) - movups %xmm11, 144(%rsp) - movups %xmm12, 160(%rsp) - movups %xmm13, 176(%rsp) - movups %xmm14, 192(%rsp) - movups %xmm15, 208(%rsp) - - movq ARG1, OLD_M - movq ARG2, OLD_N - movq ARG3, K - movq OLD_A, A - movq OLD_B, B - movq OLD_C, C - movq OLD_LDC, LDC - movsd OLD_OFFSET, %xmm12 -#else - movq STACKSIZE + 8(%rsp), LDC - movsd STACKSIZE + 16(%rsp), %xmm12 -#endif - - movq OLD_M, M - movq OLD_N, N - - subq $-16 * SIZE, A - subq $-16 * SIZE, B - - movsd %xmm12, OFFSET - movsd %xmm12, KK - - leaq (, LDC, SIZE), LDC - -#ifdef LN - leaq (, M, SIZE), %rax - addq %rax, C - imulq K, %rax - addq %rax, A -#endif - -#ifdef RT - leaq (, N, SIZE), %rax - imulq K, %rax - addq %rax, B - movq N, %rax - imulq LDC, %rax - addq %rax, C -#endif - -#ifdef RN - negq KK -#endif - -#ifdef RT - movq N, %rax - subq OFFSET, %rax - movq %rax, KK -#endif - - movq N, J - sarq $2, J # j = (n >> 2) - jle .L40 - -.L01: -#if defined(LT) || defined(RN) - movq A, AO -#else - movq A, AORIG -#endif - -#ifdef RT - movq K, %rax - salq $2 + BASE_SHIFT, %rax - subq %rax, B - - leaq (, LDC, 4), %rax - subq %rax, C -#endif - - movq C, CO1 # coffset1 = c - leaq (C, LDC, 1), CO2 # coffset2 = c + ldc -#ifndef RT - leaq (C, LDC, 4), C -#endif - -#ifdef LN - movq OFFSET, %rax - addq M, %rax - movq %rax, KK -#endif - - movq K, %rax - salq $BASE_SHIFT + 2, %rax - leaq (B, %rax), BB - -#if defined(LT) - movq OFFSET, %rax - movq %rax, KK -#endif - - movq M, I - sarq $2, I # i = (m >> 2) - jle .L20 - ALIGN_4 - -.L11: -#ifdef LN - movq K, %rax - salq $2 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (BO, %rax, 4), BO -#endif - - vxorpd %xmm8, %xmm8,%xmm8 - vxorpd %xmm9, %xmm9,%xmm9 - vxorpd %xmm10, %xmm10,%xmm10 - vxorpd %xmm11, %xmm11,%xmm11 - vxorpd %xmm12, %xmm12,%xmm12 - vxorpd %xmm13, %xmm13,%xmm13 - vxorpd %xmm14, %xmm14,%xmm14 - vxorpd %xmm15, %xmm15,%xmm15 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - - andq $-8, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 4), BO - negq %rax - NOBRANCH - je .L15 - - vmovups -16 * SIZE(AO, %rax, 4),%xmm6 - vmovddup -16 * SIZE(BO, %rax, 4), %xmm7 - vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 - - - ALIGN_4 - -.L12: - prefetcht0 A_PR1(AO,%rax,4) - prefetcht0 B_PR1(BO,%rax,4) - KERNEL1(16 * 0) - KERNEL2(16 * 0) - prefetcht0 A_PR1+64(AO,%rax,4) - prefetcht0 B_PR1+64(BO,%rax,4) - KERNEL3(16 * 0) - KERNEL4(16 * 0) - prefetcht0 A_PR1+128(AO,%rax,4) - prefetcht0 B_PR1+128(BO,%rax,4) - KERNEL5(16 * 0) - KERNEL6(16 * 0) - prefetcht0 A_PR1+192(AO,%rax,4) - prefetcht0 B_PR1+192(BO,%rax,4) - KERNEL7(16 * 0) - KERNEL8(16 * 0) - - addq $8 * SIZE, %rax - je .L15 - jmp .L12 - .align 16 - -.L15: - // prefetch -8 * SIZE(BB) - subq $-16 * SIZE, BB - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - testq $4, %rax - je .L16 - xorq %rax, %rax - ALIGN_4 - - KERNEL_SUB1(16 * 0) - KERNEL_SUB2(16 * 0) - KERNEL_SUB3(16 * 0) - KERNEL_SUB4(16 * 0) - - subq $-16 * SIZE, BO - subq $-16 * SIZE, AO - ALIGN_4 - -.L16: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L19 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 4), BO - negq %rax - ALIGN_4 - -.L17: - mulpd %xmm1, %xmm0 - mulpd -14 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm8 - movapd %xmm2, %xmm0 - addpd %xmm1, %xmm12 - movddup -14 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm3, %xmm2 - mulpd -14 * SIZE(AO, %rax, 4), %xmm3 - addpd %xmm2, %xmm9 - movapd %xmm0, %xmm2 - addpd %xmm3, %xmm13 - movddup -13 * SIZE(BO, %rax, 4), %xmm3 - mulpd %xmm1, %xmm0 - mulpd -14 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm10 - movapd -12 * SIZE(AO, %rax, 4), %xmm0 - addpd %xmm1, %xmm14 - movddup -12 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm3, %xmm2 - mulpd -14 * SIZE(AO, %rax, 4), %xmm3 - addpd %xmm2, %xmm11 - addpd %xmm3, %xmm15 - movddup -11 * SIZE(BO, %rax, 4), %xmm3 - movapd %xmm0, %xmm2 - - addq $SIZE, %rax - jl .L17 - ALIGN_4 - -.L19: -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $4, %rax -#else - subq $4, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 4), AO - leaq (B, %rax, 4), BO -#endif - -#if defined(LN) || defined(LT) - movapd %xmm8, %xmm0 - unpcklpd %xmm9, %xmm8 - unpckhpd %xmm9, %xmm0 - - movapd %xmm10, %xmm2 - unpcklpd %xmm11, %xmm10 - unpckhpd %xmm11, %xmm2 - - movapd %xmm12, %xmm4 - unpcklpd %xmm13, %xmm12 - unpckhpd %xmm13, %xmm4 - - movapd %xmm14, %xmm6 - unpcklpd %xmm15, %xmm14 - unpckhpd %xmm15, %xmm6 - - movapd -16 * SIZE(BO), %xmm9 - movapd -14 * SIZE(BO), %xmm11 - movapd -12 * SIZE(BO), %xmm13 - movapd -10 * SIZE(BO), %xmm15 - movapd -8 * SIZE(BO), %xmm1 - movapd -6 * SIZE(BO), %xmm3 - movapd -4 * SIZE(BO), %xmm5 - movapd -2 * SIZE(BO), %xmm7 - - subpd %xmm8, %xmm9 - subpd %xmm10, %xmm11 - subpd %xmm0, %xmm13 - subpd %xmm2, %xmm15 - subpd %xmm12, %xmm1 - subpd %xmm14, %xmm3 - subpd %xmm4, %xmm5 - subpd %xmm6, %xmm7 -#else - movapd -16 * SIZE(AO), %xmm0 - movapd -14 * SIZE(AO), %xmm1 - movapd -12 * SIZE(AO), %xmm2 - movapd -10 * SIZE(AO), %xmm3 - - movapd -8 * SIZE(AO), %xmm4 - movapd -6 * SIZE(AO), %xmm5 - movapd -4 * SIZE(AO), %xmm6 - movapd -2 * SIZE(AO), %xmm7 - - subpd %xmm8, %xmm0 - subpd %xmm12, %xmm1 - subpd %xmm9, %xmm2 - subpd %xmm13, %xmm3 - subpd %xmm10, %xmm4 - subpd %xmm14, %xmm5 - subpd %xmm11, %xmm6 - subpd %xmm15, %xmm7 -#endif - -#ifdef LN - movddup -1 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm5 - mulpd %xmm8, %xmm7 - - movddup -2 * SIZE(AO), %xmm10 - mulpd %xmm5, %xmm10 - subpd %xmm10, %xmm1 - movddup -2 * SIZE(AO), %xmm10 - mulpd %xmm7, %xmm10 - subpd %xmm10, %xmm3 - - movddup -3 * SIZE(AO), %xmm12 - mulpd %xmm5, %xmm12 - subpd %xmm12, %xmm13 - movddup -3 * SIZE(AO), %xmm12 - mulpd %xmm7, %xmm12 - subpd %xmm12, %xmm15 - - movddup -4 * SIZE(AO), %xmm14 - mulpd %xmm5, %xmm14 - subpd %xmm14, %xmm9 - movddup -4 * SIZE(AO), %xmm14 - mulpd %xmm7, %xmm14 - subpd %xmm14, %xmm11 - - movddup -6 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm1 - mulpd %xmm8, %xmm3 - - movddup -7 * SIZE(AO), %xmm10 - mulpd %xmm1, %xmm10 - subpd %xmm10, %xmm13 - movddup -7 * SIZE(AO), %xmm10 - mulpd %xmm3, %xmm10 - subpd %xmm10, %xmm15 - - movddup -8 * SIZE(AO), %xmm12 - mulpd %xmm1, %xmm12 - subpd %xmm12, %xmm9 - movddup -8 * SIZE(AO), %xmm12 - mulpd %xmm3, %xmm12 - subpd %xmm12, %xmm11 - - movddup -11 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - mulpd %xmm8, %xmm15 - - movddup -12 * SIZE(AO), %xmm10 - mulpd %xmm13, %xmm10 - subpd %xmm10, %xmm9 - movddup -12 * SIZE(AO), %xmm10 - mulpd %xmm15, %xmm10 - subpd %xmm10, %xmm11 - - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 - mulpd %xmm8, %xmm11 -#endif - -#ifdef LT - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 - mulpd %xmm8, %xmm11 - - movddup -15 * SIZE(AO), %xmm10 - mulpd %xmm9, %xmm10 - subpd %xmm10, %xmm13 - - movddup -15 * SIZE(AO), %xmm10 - mulpd %xmm11, %xmm10 - subpd %xmm10, %xmm15 - - movddup -14 * SIZE(AO), %xmm12 - mulpd %xmm9, %xmm12 - subpd %xmm12, %xmm1 - movddup -14 * SIZE(AO), %xmm12 - mulpd %xmm11, %xmm12 - subpd %xmm12, %xmm3 - - movddup -13 * SIZE(AO), %xmm14 - mulpd %xmm9, %xmm14 - subpd %xmm14, %xmm5 - movddup -13 * SIZE(AO), %xmm14 - mulpd %xmm11, %xmm14 - subpd %xmm14, %xmm7 - - movddup -11 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - mulpd %xmm8, %xmm15 - - movddup -10 * SIZE(AO), %xmm10 - mulpd %xmm13, %xmm10 - subpd %xmm10, %xmm1 - movddup -10 * SIZE(AO), %xmm10 - mulpd %xmm15, %xmm10 - subpd %xmm10, %xmm3 - - movddup -9 * SIZE(AO), %xmm12 - mulpd %xmm13, %xmm12 - subpd %xmm12, %xmm5 - movddup -9 * SIZE(AO), %xmm12 - mulpd %xmm15, %xmm12 - subpd %xmm12, %xmm7 - - movddup -6 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm1 - mulpd %xmm8, %xmm3 - - movddup -5 * SIZE(AO), %xmm10 - mulpd %xmm1, %xmm10 - subpd %xmm10, %xmm5 - movddup -5 * SIZE(AO), %xmm10 - mulpd %xmm3, %xmm10 - subpd %xmm10, %xmm7 - - movddup -1 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm5 - mulpd %xmm8, %xmm7 -#endif - -#ifdef RN - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 - mulpd %xmm8, %xmm1 - - movddup -15 * SIZE(BO), %xmm9 - mulpd %xmm0, %xmm9 - subpd %xmm9, %xmm2 - movddup -15 * SIZE(BO), %xmm9 - mulpd %xmm1, %xmm9 - subpd %xmm9, %xmm3 - - movddup -14 * SIZE(BO), %xmm10 - mulpd %xmm0, %xmm10 - subpd %xmm10, %xmm4 - movddup -14 * SIZE(BO), %xmm10 - mulpd %xmm1, %xmm10 - subpd %xmm10, %xmm5 - - movddup -13 * SIZE(BO), %xmm11 - mulpd %xmm0, %xmm11 - subpd %xmm11, %xmm6 - movddup -13 * SIZE(BO), %xmm11 - mulpd %xmm1, %xmm11 - subpd %xmm11, %xmm7 - - movddup -11 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - mulpd %xmm8, %xmm3 - - movddup -10 * SIZE(BO), %xmm9 - mulpd %xmm2, %xmm9 - subpd %xmm9, %xmm4 - movddup -10 * SIZE(BO), %xmm9 - mulpd %xmm3, %xmm9 - subpd %xmm9, %xmm5 - - movddup -9 * SIZE(BO), %xmm10 - mulpd %xmm2, %xmm10 - subpd %xmm10, %xmm6 - movddup -9 * SIZE(BO), %xmm10 - mulpd %xmm3, %xmm10 - subpd %xmm10, %xmm7 - - movddup -6 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm4 - mulpd %xmm8, %xmm5 - - movddup -5 * SIZE(BO), %xmm9 - mulpd %xmm4, %xmm9 - subpd %xmm9, %xmm6 - movddup -5 * SIZE(BO), %xmm9 - mulpd %xmm5, %xmm9 - subpd %xmm9, %xmm7 - - movddup -1 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm6 - mulpd %xmm8, %xmm7 -#endif - -#ifdef RT - movddup -1 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm6 - mulpd %xmm8, %xmm7 - - movddup -2 * SIZE(BO), %xmm9 - mulpd %xmm6, %xmm9 - subpd %xmm9, %xmm4 - movddup -2 * SIZE(BO), %xmm9 - mulpd %xmm7, %xmm9 - subpd %xmm9, %xmm5 - - movddup -3 * SIZE(BO), %xmm10 - mulpd %xmm6, %xmm10 - subpd %xmm10, %xmm2 - movddup -3 * SIZE(BO), %xmm10 - mulpd %xmm7, %xmm10 - subpd %xmm10, %xmm3 - - movddup -4 * SIZE(BO), %xmm11 - mulpd %xmm6, %xmm11 - subpd %xmm11, %xmm0 - movddup -4 * SIZE(BO), %xmm11 - mulpd %xmm7, %xmm11 - subpd %xmm11, %xmm1 - - movddup -6 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm4 - mulpd %xmm8, %xmm5 - - movddup -7 * SIZE(BO), %xmm9 - mulpd %xmm4, %xmm9 - subpd %xmm9, %xmm2 - movddup -7 * SIZE(BO), %xmm9 - mulpd %xmm5, %xmm9 - subpd %xmm9, %xmm3 - - movddup -8 * SIZE(BO), %xmm10 - mulpd %xmm4, %xmm10 - subpd %xmm10, %xmm0 - movddup -8 * SIZE(BO), %xmm10 - mulpd %xmm5, %xmm10 - subpd %xmm10, %xmm1 - - movddup -11 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - mulpd %xmm8, %xmm3 - - movddup -12 * SIZE(BO), %xmm9 - mulpd %xmm2, %xmm9 - subpd %xmm9, %xmm0 - movddup -12 * SIZE(BO), %xmm9 - mulpd %xmm3, %xmm9 - subpd %xmm9, %xmm1 - - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 - mulpd %xmm8, %xmm1 -#endif - -#ifdef LN - subq $4 * SIZE, CO1 - subq $4 * SIZE, CO2 -#endif - -#if defined(LN) || defined(LT) - movlpd %xmm9, 0 * SIZE(CO1) - movlpd %xmm13, 1 * SIZE(CO1) - movlpd %xmm1, 2 * SIZE(CO1) - movlpd %xmm5, 3 * SIZE(CO1) - - movhpd %xmm9, 0 * SIZE(CO2) - movhpd %xmm13, 1 * SIZE(CO2) - movhpd %xmm1, 2 * SIZE(CO2) - movhpd %xmm5, 3 * SIZE(CO2) - - movlpd %xmm11, 0 * SIZE(CO1, LDC, 2) - movlpd %xmm15, 1 * SIZE(CO1, LDC, 2) - movlpd %xmm3, 2 * SIZE(CO1, LDC, 2) - movlpd %xmm7, 3 * SIZE(CO1, LDC, 2) - - movhpd %xmm11, 0 * SIZE(CO2, LDC, 2) - movhpd %xmm15, 1 * SIZE(CO2, LDC, 2) - movhpd %xmm3, 2 * SIZE(CO2, LDC, 2) - movhpd %xmm7, 3 * SIZE(CO2, LDC, 2) -#else - movlpd %xmm0, 0 * SIZE(CO1) - movhpd %xmm0, 1 * SIZE(CO1) - movlpd %xmm1, 2 * SIZE(CO1) - movhpd %xmm1, 3 * SIZE(CO1) - - movlpd %xmm2, 0 * SIZE(CO2) - movhpd %xmm2, 1 * SIZE(CO2) - movlpd %xmm3, 2 * SIZE(CO2) - movhpd %xmm3, 3 * SIZE(CO2) - - movlpd %xmm4, 0 * SIZE(CO1, LDC, 2) - movhpd %xmm4, 1 * SIZE(CO1, LDC, 2) - movlpd %xmm5, 2 * SIZE(CO1, LDC, 2) - movhpd %xmm5, 3 * SIZE(CO1, LDC, 2) - - movlpd %xmm6, 0 * SIZE(CO2, LDC, 2) - movhpd %xmm6, 1 * SIZE(CO2, LDC, 2) - movlpd %xmm7, 2 * SIZE(CO2, LDC, 2) - movhpd %xmm7, 3 * SIZE(CO2, LDC, 2) -#endif - -#if defined(LN) || defined(LT) - movaps %xmm9, -16 * SIZE(BO) - movaps %xmm11, -14 * SIZE(BO) - movaps %xmm13, -12 * SIZE(BO) - movaps %xmm15, -10 * SIZE(BO) - movaps %xmm1, -8 * SIZE(BO) - movaps %xmm3, -6 * SIZE(BO) - movaps %xmm5, -4 * SIZE(BO) - movaps %xmm7, -2 * SIZE(BO) -#else - movaps %xmm0, -16 * SIZE(AO) - movaps %xmm1, -14 * SIZE(AO) - movaps %xmm2, -12 * SIZE(AO) - movaps %xmm3, -10 * SIZE(AO) - movaps %xmm4, -8 * SIZE(AO) - movaps %xmm5, -6 * SIZE(AO) - movaps %xmm6, -4 * SIZE(AO) - movaps %xmm7, -2 * SIZE(AO) -#endif - -#ifndef LN - addq $4 * SIZE, CO1 - addq $4 * SIZE, CO2 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 4), BO -#endif - -#ifdef LN - subq $4, KK -#endif - -#ifdef LT - addq $4, KK -#endif - -#ifdef RT - movq K, %rax - salq $2 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - - decq I # i -- - jg .L11 - ALIGN_4 - -.L20: - testq $3, M - je .L39 - - testq $2, M - je .L30 - ALIGN_4 - -.L21: -#ifdef LN - movq K, %rax - salq $1 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (BO, %rax, 4), BO -#endif - - movapd -16 * SIZE(AO), %xmm0 - pxor %xmm8, %xmm8 - movapd -12 * SIZE(AO), %xmm2 - pxor %xmm9, %xmm9 - movddup -16 * SIZE(BO), %xmm1 - pxor %xmm10, %xmm10 - movddup -15 * SIZE(BO), %xmm5 - pxor %xmm11, %xmm11 - movddup -8 * SIZE(BO), %xmm3 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 4), BO - negq %rax - NOBRANCH - je .L26 - ALIGN_4 - -.L22: - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm8 - movddup -14 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm0, %xmm5 - addpd %xmm5, %xmm9 - movddup -13 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm10 - movddup -12 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm0, %xmm5 - movapd -14 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm5, %xmm11 - movddup -11 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm8 - movddup -10 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm0, %xmm5 - addpd %xmm5, %xmm9 - movddup -9 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm10 - movddup (BO, %rax, 4), %xmm1 - mulpd %xmm0, %xmm5 - movapd -8 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm5, %xmm11 - movddup -7 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm2, %xmm3 - addpd %xmm3, %xmm8 - movddup -6 * SIZE(BO, %rax, 4), %xmm3 - mulpd %xmm2, %xmm5 - addpd %xmm5, %xmm9 - movddup -5 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm2, %xmm3 - addpd %xmm3, %xmm10 - movddup -4 * SIZE(BO, %rax, 4), %xmm3 - mulpd %xmm2, %xmm5 - movapd -10 * SIZE(AO, %rax, 2), %xmm2 - addpd %xmm5, %xmm11 - movddup -3 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm2, %xmm3 - addpd %xmm3, %xmm8 - movddup -2 * SIZE(BO, %rax, 4), %xmm3 - mulpd %xmm2, %xmm5 - addpd %xmm5, %xmm9 - movddup -1 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm2, %xmm3 - addpd %xmm3, %xmm10 - movddup 8 * SIZE(BO, %rax, 4), %xmm3 - mulpd %xmm2, %xmm5 - movapd -4 * SIZE(AO, %rax, 2), %xmm2 - addpd %xmm5, %xmm11 - movddup 1 * SIZE(BO, %rax, 4), %xmm5 - - addq $4 * SIZE, %rax - BRANCH - jl .L22 - ALIGN_4 - -.L26: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L29 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 4), BO - negq %rax - ALIGN_4 - -.L27: - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm8 - movddup -14 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm0, %xmm5 - addpd %xmm5, %xmm9 - movddup -13 * SIZE(BO, %rax, 4), %xmm5 - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm10 - movddup -12 * SIZE(BO, %rax, 4), %xmm1 - mulpd %xmm0, %xmm5 - movapd -14 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm5, %xmm11 - movddup -11 * SIZE(BO, %rax, 4), %xmm5 - - addq $SIZE, %rax - jl .L27 - ALIGN_4 - -.L29: -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $2, %rax -#else - subq $4, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 2), AO - leaq (B, %rax, 4), BO -#endif - -#if defined(LN) || defined(LT) - movapd %xmm8, %xmm0 - unpcklpd %xmm9, %xmm8 - unpckhpd %xmm9, %xmm0 - - movapd %xmm10, %xmm2 - unpcklpd %xmm11, %xmm10 - unpckhpd %xmm11, %xmm2 - - movapd -16 * SIZE(BO), %xmm9 - movapd -14 * SIZE(BO), %xmm11 - movapd -12 * SIZE(BO), %xmm13 - movapd -10 * SIZE(BO), %xmm15 - - subpd %xmm8, %xmm9 - subpd %xmm10, %xmm11 - subpd %xmm0, %xmm13 - subpd %xmm2, %xmm15 -#else - movapd -16 * SIZE(AO), %xmm0 - movapd -14 * SIZE(AO), %xmm2 - movapd -12 * SIZE(AO), %xmm4 - movapd -10 * SIZE(AO), %xmm6 - - subpd %xmm8, %xmm0 - subpd %xmm9, %xmm2 - subpd %xmm10, %xmm4 - subpd %xmm11, %xmm6 -#endif - -#ifdef LN - movddup -13 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - mulpd %xmm8, %xmm15 - - movddup -14 * SIZE(AO), %xmm10 - mulpd %xmm13, %xmm10 - subpd %xmm10, %xmm9 - movddup -14 * SIZE(AO), %xmm10 - mulpd %xmm15, %xmm10 - subpd %xmm10, %xmm11 - - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 - mulpd %xmm8, %xmm11 -#endif - -#ifdef LT - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 - mulpd %xmm8, %xmm11 - - movddup -15 * SIZE(AO), %xmm10 - mulpd %xmm9, %xmm10 - subpd %xmm10, %xmm13 - movddup -15 * SIZE(AO), %xmm10 - mulpd %xmm11, %xmm10 - subpd %xmm10, %xmm15 - - movddup -13 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - mulpd %xmm8, %xmm15 -#endif - -#ifdef RN - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 - - movddup -15 * SIZE(BO), %xmm9 - mulpd %xmm0, %xmm9 - subpd %xmm9, %xmm2 - movddup -14 * SIZE(BO), %xmm10 - mulpd %xmm0, %xmm10 - subpd %xmm10, %xmm4 - movddup -13 * SIZE(BO), %xmm11 - mulpd %xmm0, %xmm11 - subpd %xmm11, %xmm6 - - movddup -11 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - movddup -10 * SIZE(BO), %xmm9 - mulpd %xmm2, %xmm9 - subpd %xmm9, %xmm4 - movddup -9 * SIZE(BO), %xmm10 - mulpd %xmm2, %xmm10 - subpd %xmm10, %xmm6 - - movddup -6 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm4 - - movddup -5 * SIZE(BO), %xmm9 - mulpd %xmm4, %xmm9 - subpd %xmm9, %xmm6 - - movddup -1 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm6 -#endif - -#ifdef RT - movddup -1 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm6 - - movddup -2 * SIZE(BO), %xmm9 - mulpd %xmm6, %xmm9 - subpd %xmm9, %xmm4 - movddup -3 * SIZE(BO), %xmm10 - mulpd %xmm6, %xmm10 - subpd %xmm10, %xmm2 - movddup -4 * SIZE(BO), %xmm11 - mulpd %xmm6, %xmm11 - subpd %xmm11, %xmm0 - - movddup -6 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm4 - movddup -7 * SIZE(BO), %xmm9 - mulpd %xmm4, %xmm9 - subpd %xmm9, %xmm2 - movddup -8 * SIZE(BO), %xmm10 - mulpd %xmm4, %xmm10 - subpd %xmm10, %xmm0 - - movddup -11 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - movddup -12 * SIZE(BO), %xmm9 - mulpd %xmm2, %xmm9 - subpd %xmm9, %xmm0 - - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 -#endif - -#ifdef LN - subq $2 * SIZE, CO1 - subq $2 * SIZE, CO2 -#endif - -#if defined(LN) || defined(LT) - movlpd %xmm9, 0 * SIZE(CO1) - movlpd %xmm13, 1 * SIZE(CO1) - - movhpd %xmm9, 0 * SIZE(CO2) - movhpd %xmm13, 1 * SIZE(CO2) - - movlpd %xmm11, 0 * SIZE(CO1, LDC, 2) - movlpd %xmm15, 1 * SIZE(CO1, LDC, 2) - - movhpd %xmm11, 0 * SIZE(CO2, LDC, 2) - movhpd %xmm15, 1 * SIZE(CO2, LDC, 2) -#else - movlpd %xmm0, 0 * SIZE(CO1) - movhpd %xmm0, 1 * SIZE(CO1) - - movlpd %xmm2, 0 * SIZE(CO2) - movhpd %xmm2, 1 * SIZE(CO2) - - movlpd %xmm4, 0 * SIZE(CO1, LDC, 2) - movhpd %xmm4, 1 * SIZE(CO1, LDC, 2) - - movlpd %xmm6, 0 * SIZE(CO2, LDC, 2) - movhpd %xmm6, 1 * SIZE(CO2, LDC, 2) -#endif - -#if defined(LN) || defined(LT) - movaps %xmm9, -16 * SIZE(BO) - movaps %xmm11, -14 * SIZE(BO) - movaps %xmm13, -12 * SIZE(BO) - movaps %xmm15, -10 * SIZE(BO) -#else - movaps %xmm0, -16 * SIZE(AO) - movaps %xmm2, -14 * SIZE(AO) - movaps %xmm4, -12 * SIZE(AO) - movaps %xmm6, -10 * SIZE(AO) -#endif - -#ifndef LN - addq $2 * SIZE, CO1 - addq $2 * SIZE, CO2 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 4), BO -#endif - -#ifdef LN - subq $2, KK -#endif - -#ifdef LT - addq $2, KK -#endif - -#ifdef RT - movq K, %rax - salq $1 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - ALIGN_4 - -.L30: - testq $1, M - je .L39 - -#ifdef LN - movq K, %rax - salq $0 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (BO, %rax, 4), BO -#endif - - movddup -16 * SIZE(AO), %xmm0 - pxor %xmm8, %xmm8 - movddup -14 * SIZE(AO), %xmm2 - pxor %xmm9, %xmm9 - movddup -15 * SIZE(AO), %xmm4 - pxor %xmm10, %xmm10 - movapd -16 * SIZE(BO), %xmm1 - pxor %xmm11, %xmm11 - movapd -8 * SIZE(BO), %xmm3 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 4), BO - negq %rax - NOBRANCH - je .L36 - ALIGN_4 - -.L32: - mulpd %xmm0, %xmm1 - mulpd -14 * SIZE(BO, %rax, 4), %xmm0 - addpd %xmm1, %xmm8 - movapd -12 * SIZE(BO, %rax, 4), %xmm1 - addpd %xmm0, %xmm9 - movddup -12 * SIZE(AO, %rax, 1), %xmm0 - mulpd %xmm4, %xmm1 - mulpd -10 * SIZE(BO, %rax, 4), %xmm4 - addpd %xmm1, %xmm10 - movapd (BO, %rax, 4), %xmm1 - addpd %xmm4, %xmm11 - movddup -11 * SIZE(AO, %rax, 1), %xmm4 - mulpd %xmm2, %xmm3 - mulpd -6 * SIZE(BO, %rax, 4), %xmm2 - addpd %xmm3, %xmm8 - movapd -4 * SIZE(BO, %rax, 4), %xmm3 - addpd %xmm2, %xmm9 - movddup -13 * SIZE(AO, %rax, 1), %xmm2 - mulpd %xmm2, %xmm3 - mulpd -2 * SIZE(BO, %rax, 4), %xmm2 - addpd %xmm3, %xmm10 - movapd 8 * SIZE(BO, %rax, 4), %xmm3 - addpd %xmm2, %xmm11 - movddup -10 * SIZE(AO, %rax, 1), %xmm2 - - addq $4 * SIZE, %rax - BRANCH - jl .L32 - ALIGN_4 - -.L36: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L38 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 4), BO - negq %rax - ALIGN_4 - -.L37: - mulpd %xmm0, %xmm1 - mulpd -14 * SIZE(BO, %rax, 4), %xmm0 - addpd %xmm1, %xmm8 - movapd -12 * SIZE(BO, %rax, 4), %xmm1 - addpd %xmm0, %xmm9 - movddup -15 * SIZE(AO, %rax, 1), %xmm0 - - addq $SIZE, %rax - jl .L37 - ALIGN_4 - -.L38: - addpd %xmm10, %xmm8 - addpd %xmm11, %xmm9 - -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $1, %rax -#else - subq $4, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 1), AO - leaq (B, %rax, 4), BO -#endif - -#if defined(LN) || defined(LT) - movapd -16 * SIZE(BO), %xmm2 - movapd -14 * SIZE(BO), %xmm3 - - subpd %xmm8, %xmm2 - subpd %xmm9, %xmm3 -#else - movapd -16 * SIZE(AO), %xmm2 - movapd -14 * SIZE(AO), %xmm3 - - subpd %xmm8, %xmm2 - subpd %xmm9, %xmm3 -#endif - -#if defined(LN) || defined(LT) - movddup -16 * SIZE(AO), %xmm0 - mulpd %xmm0, %xmm2 - mulpd %xmm0, %xmm3 -#endif - -#ifdef RN - movapd %xmm2, %xmm0 - unpckhpd %xmm0, %xmm0 - - movapd %xmm3, %xmm1 - unpckhpd %xmm1, %xmm1 - - movsd -16 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm2 - - movsd -15 * SIZE(BO), %xmm5 - mulsd %xmm2, %xmm5 - subsd %xmm5, %xmm0 - movsd -14 * SIZE(BO), %xmm6 - mulsd %xmm2, %xmm6 - subsd %xmm6, %xmm3 - movsd -13 * SIZE(BO), %xmm7 - mulsd %xmm2, %xmm7 - subsd %xmm7, %xmm1 - - movsd -11 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm0 - - movsd -10 * SIZE(BO), %xmm5 - mulsd %xmm0, %xmm5 - subsd %xmm5, %xmm3 - movsd -9 * SIZE(BO), %xmm6 - mulsd %xmm0, %xmm6 - subsd %xmm6, %xmm1 - - movsd -6 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm3 - - movsd -5 * SIZE(BO), %xmm5 - mulsd %xmm3, %xmm5 - subsd %xmm5, %xmm1 - - movsd -1 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm1 - - unpcklpd %xmm0, %xmm2 - unpcklpd %xmm1, %xmm3 -#endif - -#ifdef RT - movapd %xmm2, %xmm0 - unpckhpd %xmm0, %xmm0 - - movapd %xmm3, %xmm1 - unpckhpd %xmm1, %xmm1 - - movsd -1 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm1 - - movsd -2 * SIZE(BO), %xmm5 - mulsd %xmm1, %xmm5 - subsd %xmm5, %xmm3 - movsd -3 * SIZE(BO), %xmm6 - mulsd %xmm1, %xmm6 - subsd %xmm6, %xmm0 - movsd -4 * SIZE(BO), %xmm7 - mulsd %xmm1, %xmm7 - subsd %xmm7, %xmm2 - - movsd -6 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm3 - - movsd -7 * SIZE(BO), %xmm5 - mulsd %xmm3, %xmm5 - subsd %xmm5, %xmm0 - movsd -8 * SIZE(BO), %xmm6 - mulsd %xmm3, %xmm6 - subsd %xmm6, %xmm2 - - movsd -11 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm0 - - movsd -12 * SIZE(BO), %xmm5 - mulsd %xmm0, %xmm5 - subsd %xmm5, %xmm2 - - movsd -16 * SIZE(BO), %xmm4 - mulsd %xmm4, %xmm2 - - unpcklpd %xmm0, %xmm2 - unpcklpd %xmm1, %xmm3 - -#endif - -#ifdef LN - subq $1 * SIZE, CO1 - subq $1 * SIZE, CO2 -#endif - -#if defined(LN) || defined(LT) - movlpd %xmm2, 0 * SIZE(CO1) - movhpd %xmm2, 0 * SIZE(CO2) - movlpd %xmm3, 0 * SIZE(CO1, LDC, 2) - movhpd %xmm3, 0 * SIZE(CO2, LDC, 2) -#else - movlpd %xmm2, 0 * SIZE(CO1) - movhpd %xmm2, 0 * SIZE(CO2) - movlpd %xmm3, 0 * SIZE(CO1, LDC, 2) - movhpd %xmm3, 0 * SIZE(CO2, LDC, 2) -#endif - -#if defined(LN) || defined(LT) - movaps %xmm2, -16 * SIZE(BO) - movaps %xmm3, -14 * SIZE(BO) -#else - movaps %xmm2, -16 * SIZE(AO) - movaps %xmm3, -14 * SIZE(AO) -#endif - -#ifndef LN - addq $1 * SIZE, CO1 - addq $1 * SIZE, CO2 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 4), BO -#endif - -#ifdef LN - subq $1, KK -#endif - -#ifdef LT - addq $1, KK -#endif - -#ifdef RT - movq K, %rax - salq $0 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - ALIGN_4 - -.L39: -#ifdef LN - leaq (, K, SIZE), %rax - leaq (B, %rax, 4), B -#endif - -#if defined(LT) || defined(RN) - movq BO, B -#endif - -#ifdef RN - addq $4, KK -#endif - -#ifdef RT - subq $4, KK -#endif - - decq J # j -- - jg .L01 - ALIGN_4 - -.L40: - testq $2, N - je .L80 - -#if defined(LT) || defined(RN) - movq A, AO -#else - movq A, AORIG -#endif - -#ifdef RT - movq K, %rax - salq $1 + BASE_SHIFT, %rax - subq %rax, B - - leaq (, LDC, 2), %rax - subq %rax, C -#endif - - movq C, CO1 # coffset1 = c - leaq (C, LDC, 1), CO2 # coffset2 = c + ldc -#ifndef RT - leaq (C, LDC, 2), C -#endif - -#ifdef LN - movq OFFSET, %rax - addq M, %rax - movq %rax, KK -#endif - -#if defined(LT) - movq OFFSET, %rax - movq %rax, KK -#endif - - movq M, I - sarq $2, I # i = (m >> 2) - jle .L60 - ALIGN_4 - -.L51: -#ifdef LN - movq K, %rax - salq $2 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (BO, %rax, 2), BO -#endif - - movddup -16 * SIZE(BO), %xmm1 - movddup -15 * SIZE(BO), %xmm5 - pxor %xmm8, %xmm8 - movddup -12 * SIZE(BO), %xmm3 - pxor %xmm9, %xmm9 - movapd -16 * SIZE(AO), %xmm0 - pxor %xmm12, %xmm12 - movapd -8 * SIZE(AO), %xmm4 - pxor %xmm13, %xmm13 - - movapd %xmm0, %xmm2 - - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 2), BO - negq %rax - NOBRANCH - je .L56 - ALIGN_4 - -.L52: - mulpd %xmm1, %xmm0 - mulpd -14 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm8 - movapd -12 * SIZE(AO, %rax, 4), %xmm0 - addpd %xmm1, %xmm12 - movddup -14 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm5, %xmm2 - mulpd -14 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm2, %xmm9 - addpd %xmm5, %xmm13 - movddup -13 * SIZE(BO, %rax, 2), %xmm5 - movapd %xmm0, %xmm2 - mulpd %xmm1, %xmm0 - mulpd -10 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm8 - movapd (AO, %rax, 4), %xmm0 - addpd %xmm1, %xmm12 - movddup -8 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm5, %xmm2 - mulpd -10 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm2, %xmm9 - addpd %xmm5, %xmm13 - movddup -11 * SIZE(BO, %rax, 2), %xmm5 - movapd %xmm4, %xmm2 - mulpd %xmm3, %xmm4 - mulpd -6 * SIZE(AO, %rax, 4), %xmm3 - addpd %xmm4, %xmm8 - movapd -4 * SIZE(AO, %rax, 4), %xmm4 - addpd %xmm3, %xmm12 - movddup -10 * SIZE(BO, %rax, 2), %xmm3 - mulpd %xmm5, %xmm2 - mulpd -6 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm2, %xmm9 - addpd %xmm5, %xmm13 - movddup -9 * SIZE(BO, %rax, 2), %xmm5 - movapd %xmm4, %xmm2 - mulpd %xmm3, %xmm4 - mulpd -2 * SIZE(AO, %rax, 4), %xmm3 - addpd %xmm4, %xmm8 - movapd 8 * SIZE(AO, %rax, 4), %xmm4 - addpd %xmm3, %xmm12 - movddup -4 * SIZE(BO, %rax, 2), %xmm3 - mulpd %xmm5, %xmm2 - mulpd -2 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm2, %xmm9 - addpd %xmm5, %xmm13 - movddup -7 * SIZE(BO, %rax, 2), %xmm5 - movapd %xmm0, %xmm2 - - addq $4 * SIZE, %rax - BRANCH - jl .L52 - ALIGN_4 - -.L56: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L59 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 2), BO - negq %rax - ALIGN_4 - -.L57: - mulpd %xmm1, %xmm0 - mulpd -14 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm8 - movapd -12 * SIZE(AO, %rax, 4), %xmm0 - addpd %xmm1, %xmm12 - movddup -14 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm5, %xmm2 - mulpd -14 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm2, %xmm9 - addpd %xmm5, %xmm13 - movddup -13 * SIZE(BO, %rax, 2), %xmm5 - movapd %xmm0, %xmm2 - - addq $SIZE, %rax - jl .L57 - ALIGN_4 - -.L59: -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $4, %rax -#else - subq $2, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 4), AO - leaq (B, %rax, 2), BO -#endif - -#if defined(LN) || defined(LT) - movapd %xmm8, %xmm0 - unpcklpd %xmm9, %xmm8 - unpckhpd %xmm9, %xmm0 - - movapd %xmm12, %xmm4 - unpcklpd %xmm13, %xmm12 - unpckhpd %xmm13, %xmm4 - - movapd -16 * SIZE(BO), %xmm9 - movapd -14 * SIZE(BO), %xmm13 - movapd -12 * SIZE(BO), %xmm1 - movapd -10 * SIZE(BO), %xmm5 - - subpd %xmm8, %xmm9 - subpd %xmm0, %xmm13 - subpd %xmm12, %xmm1 - subpd %xmm4, %xmm5 -#else - movapd -16 * SIZE(AO), %xmm0 - movapd -14 * SIZE(AO), %xmm1 - movapd -12 * SIZE(AO), %xmm2 - movapd -10 * SIZE(AO), %xmm3 - - subpd %xmm8, %xmm0 - subpd %xmm12, %xmm1 - subpd %xmm9, %xmm2 - subpd %xmm13, %xmm3 -#endif - -#ifdef LN - movddup -1 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm5 - movddup -2 * SIZE(AO), %xmm10 - mulpd %xmm5, %xmm10 - subpd %xmm10, %xmm1 - movddup -3 * SIZE(AO), %xmm12 - mulpd %xmm5, %xmm12 - subpd %xmm12, %xmm13 - movddup -4 * SIZE(AO), %xmm14 - mulpd %xmm5, %xmm14 - subpd %xmm14, %xmm9 - - movddup -6 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm1 - movddup -7 * SIZE(AO), %xmm10 - mulpd %xmm1, %xmm10 - subpd %xmm10, %xmm13 - movddup -8 * SIZE(AO), %xmm12 - mulpd %xmm1, %xmm12 - subpd %xmm12, %xmm9 - - movddup -11 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - movddup -12 * SIZE(AO), %xmm10 - mulpd %xmm13, %xmm10 - subpd %xmm10, %xmm9 - - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 -#endif - -#ifdef LT - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 - movddup -15 * SIZE(AO), %xmm10 - mulpd %xmm9, %xmm10 - subpd %xmm10, %xmm13 - movddup -14 * SIZE(AO), %xmm12 - mulpd %xmm9, %xmm12 - subpd %xmm12, %xmm1 - movddup -13 * SIZE(AO), %xmm14 - mulpd %xmm9, %xmm14 - subpd %xmm14, %xmm5 - - - movddup -11 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - - movddup -10 * SIZE(AO), %xmm10 - mulpd %xmm13, %xmm10 - subpd %xmm10, %xmm1 - movddup -9 * SIZE(AO), %xmm12 - mulpd %xmm13, %xmm12 - subpd %xmm12, %xmm5 - - movddup -6 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm1 - movddup -5 * SIZE(AO), %xmm10 - mulpd %xmm1, %xmm10 - subpd %xmm10, %xmm5 - - movddup -1 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm5 -#endif - -#ifdef RN - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 - mulpd %xmm8, %xmm1 - - movddup -15 * SIZE(BO), %xmm9 - mulpd %xmm0, %xmm9 - subpd %xmm9, %xmm2 - movddup -15 * SIZE(BO), %xmm9 - mulpd %xmm1, %xmm9 - subpd %xmm9, %xmm3 - - movddup -13 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - mulpd %xmm8, %xmm3 -#endif - -#ifdef RT - movddup -13 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - mulpd %xmm8, %xmm3 - - movddup -14 * SIZE(BO), %xmm9 - mulpd %xmm2, %xmm9 - subpd %xmm9, %xmm0 - movddup -14 * SIZE(BO), %xmm9 - mulpd %xmm3, %xmm9 - subpd %xmm9, %xmm1 - - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 - mulpd %xmm8, %xmm1 -#endif - -#ifdef LN - subq $4 * SIZE, CO1 - subq $4 * SIZE, CO2 -#endif - -#if defined(LN) || defined(LT) - movlpd %xmm9, 0 * SIZE(CO1) - movlpd %xmm13, 1 * SIZE(CO1) - movlpd %xmm1, 2 * SIZE(CO1) - movlpd %xmm5, 3 * SIZE(CO1) - - movhpd %xmm9, 0 * SIZE(CO2) - movhpd %xmm13, 1 * SIZE(CO2) - movhpd %xmm1, 2 * SIZE(CO2) - movhpd %xmm5, 3 * SIZE(CO2) -#else - movlpd %xmm0, 0 * SIZE(CO1) - movhpd %xmm0, 1 * SIZE(CO1) - movlpd %xmm1, 2 * SIZE(CO1) - movhpd %xmm1, 3 * SIZE(CO1) - - movlpd %xmm2, 0 * SIZE(CO2) - movhpd %xmm2, 1 * SIZE(CO2) - movlpd %xmm3, 2 * SIZE(CO2) - movhpd %xmm3, 3 * SIZE(CO2) -#endif - -#if defined(LN) || defined(LT) - movaps %xmm9, -16 * SIZE(BO) - movaps %xmm13,-14 * SIZE(BO) - movaps %xmm1, -12 * SIZE(BO) - movaps %xmm5, -10 * SIZE(BO) -#else - movaps %xmm0, -16 * SIZE(AO) - movaps %xmm1, -14 * SIZE(AO) - movaps %xmm2, -12 * SIZE(AO) - movaps %xmm3, -10 * SIZE(AO) -#endif - -#ifndef LN - addq $4 * SIZE, CO1 - addq $4 * SIZE, CO2 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 2), BO -#endif - -#ifdef LN - subq $4, KK -#endif - -#ifdef LT - addq $4, KK -#endif - -#ifdef RT - movq K, %rax - salq $2 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - - decq I # i -- - jg .L51 - ALIGN_4 - -.L60: - testq $2, M - je .L70 - -#ifdef LN - movq K, %rax - salq $1 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (BO, %rax, 2), BO -#endif - - movapd -16 * SIZE(AO), %xmm0 - pxor %xmm8, %xmm8 - movapd -12 * SIZE(AO), %xmm2 - pxor %xmm9, %xmm9 - movddup -16 * SIZE(BO), %xmm1 - pxor %xmm10, %xmm10 - movddup -15 * SIZE(BO), %xmm3 - pxor %xmm11, %xmm11 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 2), BO - negq %rax - NOBRANCH - je .L66 - ALIGN_4 - -.L62: - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm8 - movddup -14 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm0, %xmm3 - movapd -14 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm3, %xmm9 - movddup -13 * SIZE(BO, %rax, 2), %xmm3 - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm10 - movddup -12 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm0, %xmm3 - movapd -8 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm3, %xmm11 - movddup -11 * SIZE(BO, %rax, 2), %xmm3 - mulpd %xmm2, %xmm1 - addpd %xmm1, %xmm8 - movddup -10 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm2, %xmm3 - movapd -10 * SIZE(AO, %rax, 2), %xmm2 - addpd %xmm3, %xmm9 - movddup -9 * SIZE(BO, %rax, 2), %xmm3 - mulpd %xmm2, %xmm1 - addpd %xmm1, %xmm10 - movddup -8 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm2, %xmm3 - movapd -4 * SIZE(AO, %rax, 2), %xmm2 - addpd %xmm3, %xmm11 - movddup -7 * SIZE(BO, %rax, 2), %xmm3 - - addq $4 * SIZE, %rax - BRANCH - jl .L62 - ALIGN_4 - -.L66: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L69 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 2), BO - negq %rax - ALIGN_4 - -.L67: - mulpd %xmm0, %xmm1 - addpd %xmm1, %xmm8 - movddup -14 * SIZE(BO, %rax, 2), %xmm1 - mulpd %xmm0, %xmm3 - movapd -14 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm3, %xmm9 - movddup -13 * SIZE(BO, %rax, 2), %xmm3 - - addq $SIZE, %rax - jl .L67 - ALIGN_4 - -.L69: - addpd %xmm10, %xmm8 - addpd %xmm11, %xmm9 - -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $2, %rax -#else - subq $2, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 2), AO - leaq (B, %rax, 2), BO -#endif - -#if defined(LN) || defined(LT) - movapd %xmm8, %xmm0 - unpcklpd %xmm9, %xmm8 - unpckhpd %xmm9, %xmm0 - - movapd -16 * SIZE(BO), %xmm9 - movapd -14 * SIZE(BO), %xmm13 - - subpd %xmm8, %xmm9 - subpd %xmm0, %xmm13 -#else - movapd -16 * SIZE(AO), %xmm0 - movapd -14 * SIZE(AO), %xmm2 - - subpd %xmm8, %xmm0 - subpd %xmm9, %xmm2 -#endif - - -#ifdef LN - movddup -13 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 - - movddup -14 * SIZE(AO), %xmm10 - mulpd %xmm13, %xmm10 - subpd %xmm10, %xmm9 - - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 -#endif - -#ifdef LT - movddup -16 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm9 - - movddup -15 * SIZE(AO), %xmm10 - mulpd %xmm9, %xmm10 - subpd %xmm10, %xmm13 - - movddup -13 * SIZE(AO), %xmm8 - mulpd %xmm8, %xmm13 -#endif - -#ifdef RN - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 - - movddup -15 * SIZE(BO), %xmm9 - mulpd %xmm0, %xmm9 - subpd %xmm9, %xmm2 - - movddup -13 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 -#endif - -#ifdef RT - movddup -13 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm2 - - movddup -14 * SIZE(BO), %xmm9 - mulpd %xmm2, %xmm9 - subpd %xmm9, %xmm0 - - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm0 -#endif - -#ifdef LN - subq $2 * SIZE, CO1 - subq $2 * SIZE, CO2 -#endif - -#if defined(LN) || defined(LT) - movlpd %xmm9, 0 * SIZE(CO1) - movlpd %xmm13, 1 * SIZE(CO1) - - movhpd %xmm9, 0 * SIZE(CO2) - movhpd %xmm13, 1 * SIZE(CO2) -#else - movlpd %xmm0, 0 * SIZE(CO1) - movhpd %xmm0, 1 * SIZE(CO1) - - movlpd %xmm2, 0 * SIZE(CO2) - movhpd %xmm2, 1 * SIZE(CO2) -#endif - -#if defined(LN) || defined(LT) - movaps %xmm9, -16 * SIZE(BO) - movaps %xmm13, -14 * SIZE(BO) -#else - movaps %xmm0, -16 * SIZE(AO) - movaps %xmm2, -14 * SIZE(AO) -#endif - -#ifndef LN - addq $2 * SIZE, CO1 - addq $2 * SIZE, CO2 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 2), BO -#endif - -#ifdef LN - subq $2, KK -#endif - -#ifdef LT - addq $2, KK -#endif - -#ifdef RT - movq K, %rax - salq $1 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - ALIGN_4 - -.L70: - testq $1, M - je .L79 - ALIGN_4 - -.L71: -#ifdef LN - movq K, %rax - salq $0 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - salq $1 + BASE_SHIFT, %rax - leaq (BO, %rax, 1), BO -#endif - - movddup -16 * SIZE(AO), %xmm0 - pxor %xmm8, %xmm8 - movddup -15 * SIZE(AO), %xmm1 - pxor %xmm9, %xmm9 - movddup -14 * SIZE(AO), %xmm2 - pxor %xmm10, %xmm10 - movddup -13 * SIZE(AO), %xmm3 - pxor %xmm11, %xmm11 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 2), BO - negq %rax - NOBRANCH - je .L76 - ALIGN_4 - -.L72: - mulpd -16 * SIZE(BO, %rax, 2), %xmm0 - addpd %xmm0, %xmm8 - movddup -12 * SIZE(AO, %rax, 1), %xmm0 - - mulpd -14 * SIZE(BO, %rax, 2), %xmm1 - addpd %xmm1, %xmm9 - movddup -11 * SIZE(AO, %rax, 1), %xmm1 - - mulpd -12 * SIZE(BO, %rax, 2), %xmm2 - addpd %xmm2, %xmm10 - movddup -10 * SIZE(AO, %rax, 1), %xmm2 - - mulpd -10 * SIZE(BO, %rax, 2), %xmm3 - addpd %xmm3, %xmm11 - movddup -9 * SIZE(AO, %rax, 1), %xmm3 - - addq $4 * SIZE, %rax - BRANCH - jl .L72 - ALIGN_4 - -.L76: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L78 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 2), BO - negq %rax - ALIGN_4 - -.L77: - mulpd -16 * SIZE(BO, %rax, 2), %xmm0 - addpd %xmm0, %xmm8 - movddup -15 * SIZE(AO, %rax, 1), %xmm0 - - addq $SIZE, %rax - jl .L77 - ALIGN_4 - -.L78: - addpd %xmm9, %xmm8 - addpd %xmm11, %xmm10 - addpd %xmm10, %xmm8 - -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $1, %rax -#else - subq $2, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 1), AO - leaq (B, %rax, 2), BO -#endif - -#if defined(LN) || defined(LT) - movapd -16 * SIZE(BO), %xmm2 -#else - movapd -16 * SIZE(AO), %xmm2 -#endif - - subpd %xmm8, %xmm2 - -#if defined(LN) || defined(LT) - movddup -16 * SIZE(AO), %xmm0 - - mulpd %xmm0, %xmm2 -#endif - -#ifdef RN - movapd %xmm2, %xmm0 - unpckhpd %xmm0, %xmm0 - - mulsd -16 * SIZE(BO), %xmm2 - movsd -15 * SIZE(BO), %xmm4 - mulsd %xmm2, %xmm4 - subsd %xmm4, %xmm0 - - mulsd -13 * SIZE(BO), %xmm0 - unpcklpd %xmm0, %xmm2 -#endif - -#ifdef RT - movapd %xmm2, %xmm0 - unpckhpd %xmm0, %xmm0 - - mulsd -13 * SIZE(BO), %xmm0 - - movlpd -14 * SIZE(BO), %xmm4 - mulsd %xmm0, %xmm4 - subsd %xmm4, %xmm2 - - mulsd -16 * SIZE(BO), %xmm2 - unpcklpd %xmm0, %xmm2 -#endif - -#ifdef LN - subq $1 * SIZE, CO1 - subq $1 * SIZE, CO2 -#endif - - movlpd %xmm2, 0 * SIZE(CO1) - movhpd %xmm2, 0 * SIZE(CO2) - -#if defined(LN) || defined(LT) - movaps %xmm2, -16 * SIZE(BO) -#else - movaps %xmm2, -16 * SIZE(AO) -#endif - -#ifndef LN - addq $1 * SIZE, CO1 - addq $1 * SIZE, CO2 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 2), BO -#endif - -#ifdef LN - subq $1, KK -#endif - -#ifdef LT - addq $1, KK -#endif - -#ifdef RT - movq K, %rax - salq $0 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - ALIGN_4 - -.L79: -#ifdef LN - leaq (, K, SIZE), %rax - leaq (B, %rax, 2), B -#endif - -#if defined(LT) || defined(RN) - movq BO, B -#endif - -#ifdef RN - addq $2, KK -#endif - -#ifdef RT - subq $2, KK -#endif - ALIGN_4 - -.L80: - testq $1, N - je .L999 - -#if defined(LT) || defined(RN) - movq A, AO -#else - movq A, AORIG -#endif - -#ifdef RT - movq K, %rax - salq $0 + BASE_SHIFT, %rax - subq %rax, B - - subq LDC, C -#endif - - movq C, CO1 # coffset1 = c -#ifndef RT - addq LDC, C -#endif - -#ifdef LN - movq OFFSET, %rax - addq M, %rax - movq %rax, KK -#endif - -#ifdef LT - movq OFFSET, %rax - movq %rax, KK -#endif - - movq M, I - sarq $2, I # i = (m >> 2) - jle .L100 - ALIGN_4 - -.L91: -#ifdef LN - movq K, %rax - salq $2 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (BO, %rax, SIZE), BO -#endif - - movapd -16 * SIZE(AO), %xmm0 - pxor %xmm8, %xmm8 - movapd -8 * SIZE(AO), %xmm2 - pxor %xmm9, %xmm9 - movddup -16 * SIZE(BO), %xmm1 - pxor %xmm10, %xmm10 - movddup -15 * SIZE(BO), %xmm5 - pxor %xmm11, %xmm11 - movddup -14 * SIZE(BO), %xmm3 - - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 1), BO - negq %rax - NOBRANCH - je .L96 - ALIGN_4 - -.L92: - mulpd %xmm1, %xmm0 - mulpd -14 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm8 - movapd -12 * SIZE(AO, %rax, 4), %xmm0 - addpd %xmm1, %xmm9 - movddup -12 * SIZE(BO, %rax, 1), %xmm1 - mulpd %xmm5, %xmm0 - mulpd -10 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm0, %xmm10 - movapd (AO, %rax, 4), %xmm0 - addpd %xmm5, %xmm11 - movddup -13 * SIZE(BO, %rax, 1), %xmm5 - mulpd %xmm3, %xmm2 - mulpd -6 * SIZE(AO, %rax, 4), %xmm3 - addpd %xmm2, %xmm8 - movapd -4 * SIZE(AO, %rax, 4), %xmm2 - addpd %xmm3, %xmm9 - movddup -10 * SIZE(BO, %rax, 1), %xmm3 - mulpd %xmm5, %xmm2 - mulpd -2 * SIZE(AO, %rax, 4), %xmm5 - addpd %xmm2, %xmm10 - movapd 8 * SIZE(AO, %rax, 4), %xmm2 - addpd %xmm5, %xmm11 - movddup -11 * SIZE(BO, %rax, 1), %xmm5 - - addq $4 * SIZE, %rax - BRANCH - jl .L92 - ALIGN_4 - -.L96: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L99 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 1), BO - negq %rax - ALIGN_4 - -.L97: - mulpd %xmm1, %xmm0 - mulpd -14 * SIZE(AO, %rax, 4), %xmm1 - addpd %xmm0, %xmm8 - movapd -12 * SIZE(AO, %rax, 4), %xmm0 - addpd %xmm1, %xmm9 - movddup -15 * SIZE(BO, %rax, 1), %xmm1 - - addq $SIZE, %rax - jl .L97 - ALIGN_4 -.L99: - addpd %xmm10, %xmm8 - addpd %xmm11, %xmm9 - -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $4, %rax -#else - subq $1, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 4), AO - leaq (B, %rax, 1), BO -#endif - -#if defined(LN) || defined(LT) - movapd -16 * SIZE(BO), %xmm10 - movapd -14 * SIZE(BO), %xmm11 - - subpd %xmm8, %xmm10 - subpd %xmm9, %xmm11 -#else - movapd -16 * SIZE(AO), %xmm10 - movapd -14 * SIZE(AO), %xmm11 - - subpd %xmm8, %xmm10 - subpd %xmm9, %xmm11 -#endif - -#ifdef LN - movapd %xmm10, %xmm8 - unpckhpd %xmm8, %xmm8 - - movapd %xmm11, %xmm9 - unpckhpd %xmm9, %xmm9 - - movsd -1 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm9 - - movsd -2 * SIZE(AO), %xmm13 - mulsd %xmm9, %xmm13 - subsd %xmm13, %xmm11 - movsd -3 * SIZE(AO), %xmm14 - mulsd %xmm9, %xmm14 - subsd %xmm14, %xmm8 - movsd -4 * SIZE(AO), %xmm15 - mulsd %xmm9, %xmm15 - subsd %xmm15, %xmm10 - - movsd -6 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm11 - - movsd -7 * SIZE(AO), %xmm13 - mulsd %xmm11, %xmm13 - subsd %xmm13, %xmm8 - movsd -8 * SIZE(AO), %xmm14 - mulsd %xmm11, %xmm14 - subsd %xmm14, %xmm10 - - movsd -11 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm8 - - movsd -12 * SIZE(AO), %xmm13 - mulsd %xmm8, %xmm13 - subsd %xmm13, %xmm10 - - movsd -16 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm10 - - unpcklpd %xmm8, %xmm10 - unpcklpd %xmm9, %xmm11 -#endif - -#ifdef LT - movapd %xmm10, %xmm8 - unpckhpd %xmm8, %xmm8 - - movapd %xmm11, %xmm9 - unpckhpd %xmm9, %xmm9 - - movsd -16 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm10 - - movsd -15 * SIZE(AO), %xmm13 - mulsd %xmm10, %xmm13 - subsd %xmm13, %xmm8 - movsd -14 * SIZE(AO), %xmm14 - mulsd %xmm10, %xmm14 - subsd %xmm14, %xmm11 - movsd -13 * SIZE(AO), %xmm15 - mulsd %xmm10, %xmm15 - subsd %xmm15, %xmm9 - - movsd -11 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm8 - - movsd -10 * SIZE(AO), %xmm13 - mulsd %xmm8, %xmm13 - subsd %xmm13, %xmm11 - movsd -9 * SIZE(AO), %xmm14 - mulsd %xmm8, %xmm14 - subsd %xmm14, %xmm9 - - movsd -6 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm11 - - movsd -5 * SIZE(AO), %xmm13 - mulsd %xmm11, %xmm13 - subsd %xmm13, %xmm9 - - movsd -1 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm9 - - unpcklpd %xmm8, %xmm10 - unpcklpd %xmm9, %xmm11 -#endif - -#ifdef RN - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm10 - mulpd %xmm8, %xmm11 -#endif - -#ifdef RT - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm10 - mulpd %xmm8, %xmm11 -#endif - -#ifdef LN - subq $4 * SIZE, CO1 -#endif - - movlpd %xmm10, 0 * SIZE(CO1) - movhpd %xmm10, 1 * SIZE(CO1) - movlpd %xmm11, 2 * SIZE(CO1) - movhpd %xmm11, 3 * SIZE(CO1) - -#if defined(LN) || defined(LT) - movaps %xmm10, -16 * SIZE(BO) - movaps %xmm11, -14 * SIZE(BO) -#else - movaps %xmm10, -16 * SIZE(AO) - movaps %xmm11, -14 * SIZE(AO) -#endif - -#ifndef LN - addq $4 * SIZE, CO1 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 4), AO - addq %rax, BO -#endif - -#ifdef LN - subq $4, KK -#endif - -#ifdef LT - addq $4, KK -#endif - -#ifdef RT - movq K, %rax - salq $2 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - - decq I # i -- - jg .L91 - ALIGN_4 - -.L100: - testq $2, M - je .L110 - -#ifdef LN - movq K, %rax - salq $1 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (BO, %rax, SIZE), BO -#endif - - movddup -16 * SIZE(BO), %xmm0 - pxor %xmm8, %xmm8 - movddup -15 * SIZE(BO), %xmm1 - pxor %xmm9, %xmm9 - movddup -14 * SIZE(BO), %xmm2 - pxor %xmm10, %xmm10 - movddup -13 * SIZE(BO), %xmm3 - pxor %xmm11, %xmm11 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 1), BO - negq %rax - NOBRANCH - je .L106 - ALIGN_4 - -.L102: - mulpd -16 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm0, %xmm8 - movddup -12 * SIZE(BO, %rax, 1), %xmm0 - - mulpd -14 * SIZE(AO, %rax, 2), %xmm1 - addpd %xmm1, %xmm9 - movddup -11 * SIZE(BO, %rax, 1), %xmm1 - - mulpd -12 * SIZE(AO, %rax, 2), %xmm2 - addpd %xmm2, %xmm10 - movddup -10 * SIZE(BO, %rax, 1), %xmm2 - - mulpd -10 * SIZE(AO, %rax, 2), %xmm3 - addpd %xmm3, %xmm11 - movddup -9 * SIZE(BO, %rax, 1), %xmm3 - - addq $4 * SIZE, %rax - BRANCH - jl .L102 - ALIGN_4 - -.L106: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L109 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 1), BO - negq %rax - ALIGN_4 - -.L107: - movddup -16 * SIZE(BO, %rax, 1), %xmm0 - mulpd -16 * SIZE(AO, %rax, 2), %xmm0 - addpd %xmm0, %xmm8 - - addq $SIZE, %rax - jl .L107 - ALIGN_4 - -.L109: - addpd %xmm9, %xmm8 - addpd %xmm11, %xmm10 - addpd %xmm10, %xmm8 - -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $2, %rax -#else - subq $1, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 2), AO - leaq (B, %rax, 1), BO -#endif - -#if defined(LN) || defined(LT) - movapd -16 * SIZE(BO), %xmm10 - subpd %xmm8, %xmm10 -#else - movapd -16 * SIZE(AO), %xmm10 - subpd %xmm8, %xmm10 -#endif - -#ifdef LN - movapd %xmm10, %xmm8 - unpckhpd %xmm8, %xmm8 - - movsd -13 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm8 - - movsd -14 * SIZE(AO), %xmm13 - mulsd %xmm8, %xmm13 - subsd %xmm13, %xmm10 - - movsd -16 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm10 - - unpcklpd %xmm8, %xmm10 -#endif - -#ifdef LT - movapd %xmm10, %xmm8 - unpckhpd %xmm8, %xmm8 - - movsd -16 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm10 - - movsd -15 * SIZE(AO), %xmm13 - mulsd %xmm10, %xmm13 - subsd %xmm13, %xmm8 - - movsd -13 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm8 - - unpcklpd %xmm8, %xmm10 -#endif - -#ifdef RN - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm10 -#endif - -#ifdef RT - movddup -16 * SIZE(BO), %xmm8 - mulpd %xmm8, %xmm10 -#endif - -#ifdef LN - subq $2 * SIZE, CO1 -#endif - -#if defined(LN) || defined(LT) - movlpd %xmm10, 0 * SIZE(CO1) - movhpd %xmm10, 1 * SIZE(CO1) -#else - movlpd %xmm10, 0 * SIZE(CO1) - movhpd %xmm10, 1 * SIZE(CO1) -#endif - -#if defined(LN) || defined(LT) - movaps %xmm10, -16 * SIZE(BO) -#else - movaps %xmm10, -16 * SIZE(AO) -#endif - -#ifndef LN - addq $2 * SIZE, CO1 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 2), AO - addq %rax, BO -#endif - -#ifdef LN - subq $2, KK -#endif - -#ifdef LT - addq $2, KK -#endif - -#ifdef RT - movq K, %rax - salq $1 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - ALIGN_4 - -.L110: - testq $1, M - je .L119 - ALIGN_4 - -.L111: -#ifdef LN - movq K, %rax - salq $0 + BASE_SHIFT, %rax - subq %rax, AORIG -#endif - -#if defined(LN) || defined(RT) - movq KK, %rax - movq AORIG, AO - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO -#endif - - movq B, BO - -#if defined(LN) || defined(RT) - movq KK, %rax - leaq (BO, %rax, SIZE), BO -#endif - - movapd -16 * SIZE(AO), %xmm0 - pxor %xmm8, %xmm8 - movapd -14 * SIZE(AO), %xmm1 - pxor %xmm9, %xmm9 - -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 1), BO - negq %rax - NOBRANCH - je .L116 - ALIGN_4 - -.L112: - mulpd -16 * SIZE(BO, %rax, 1), %xmm0 - addpd %xmm0, %xmm8 - movapd -12 * SIZE(AO, %rax, 1), %xmm0 - - mulpd -14 * SIZE(BO, %rax, 1), %xmm1 - addpd %xmm1, %xmm9 - movapd -10 * SIZE(AO, %rax, 1), %xmm1 - - addq $4 * SIZE, %rax - BRANCH - jl .L112 - ALIGN_4 - -.L116: -#if defined(LT) || defined(RN) - movq KK, %rax -#else - movq K, %rax - subq KK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L118 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 1), BO - negq %rax - ALIGN_4 - -.L117: - mulsd -16 * SIZE(BO, %rax, 1), %xmm0 - addsd %xmm0, %xmm8 - movsd -15 * SIZE(AO, %rax, 1), %xmm0 - - addq $SIZE, %rax - jl .L117 - ALIGN_4 - -.L118: - addpd %xmm9, %xmm8 - haddpd %xmm8, %xmm8 - -#if defined(LN) || defined(RT) - movq KK, %rax -#ifdef LN - subq $1, %rax -#else - subq $1, %rax -#endif - - leaq (, %rax, SIZE), %rax - - movq AORIG, AO - leaq (AO, %rax, 1), AO - leaq (B, %rax, 1), BO -#endif - -#if defined(LN) || defined(LT) - movsd -16 * SIZE(BO), %xmm10 - subsd %xmm8, %xmm10 -#else - movsd -16 * SIZE(AO), %xmm10 - subsd %xmm8, %xmm10 -#endif - -#if defined(LN) || defined(LT) - movsd -16 * SIZE(AO), %xmm12 - mulsd %xmm12, %xmm10 -#endif - -#if defined(RN) || defined(RT) - movsd -16 * SIZE(BO), %xmm8 - mulsd %xmm8, %xmm10 -#endif - -#ifdef LN - subq $1 * SIZE, CO1 -#endif - - movsd %xmm10, 0 * SIZE(CO1) - -#if defined(LN) || defined(LT) - movlpd %xmm10, -16 * SIZE(BO) -#else - movlpd %xmm10, -16 * SIZE(AO) -#endif - -#ifndef LN - addq $1 * SIZE, CO1 -#endif - -#if defined(LT) || defined(RN) - movq K, %rax - subq KK, %rax - leaq (,%rax, SIZE), %rax - addq %rax, AO - addq %rax, BO -#endif - -#ifdef LN - subq $1, KK -#endif - -#ifdef LT - addq $1, KK -#endif - -#ifdef RT - movq K, %rax - salq $0 + BASE_SHIFT, %rax - addq %rax, AORIG -#endif - ALIGN_4 - -.L119: -#ifdef LN - leaq (B, K, SIZE), B -#endif - -#if defined(LT) || defined(RN) - movq BO, B -#endif - -#ifdef RN - addq $1, KK -#endif - -#ifdef RT - subq $1, KK -#endif - ALIGN_4 - - -.L999: - movq (%rsp), %rbx - movq 8(%rsp), %rbp - movq 16(%rsp), %r12 - movq 24(%rsp), %r13 - movq 32(%rsp), %r14 - movq 40(%rsp), %r15 - -#ifdef WINDOWS_ABI - movq 48(%rsp), %rdi - movq 56(%rsp), %rsi - movups 64(%rsp), %xmm6 - movups 80(%rsp), %xmm7 - movups 96(%rsp), %xmm8 - movups 112(%rsp), %xmm9 - movups 128(%rsp), %xmm10 - movups 144(%rsp), %xmm11 - movups 160(%rsp), %xmm12 - movups 176(%rsp), %xmm13 - movups 192(%rsp), %xmm14 - movups 208(%rsp), %xmm15 -#endif - - addq $STACKSIZE, %rsp - ret - - EPILOGUE From 731220f8707c97286238ac9f7b9065d28c2ad7fa Mon Sep 17 00:00:00 2001 From: wernsaar Date: Tue, 30 Apr 2013 10:07:17 +0200 Subject: [PATCH 07/15] changed DGEMM_DEFAULT_P and DGEMM_DEFAULT_Q to 248 for BULLDOZER 64bit --- param.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/param.h b/param.h index 5b6a19ad5..d6c3a9b00 100644 --- a/param.h +++ b/param.h @@ -193,14 +193,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else #define SGEMM_DEFAULT_P 448 + +#if defined(BULLDOZER) && defined(ARCH_X86_64) +#define DGEMM_DEFAULT_P 248 +#else #define DGEMM_DEFAULT_P 224 +#endif + #define QGEMM_DEFAULT_P 112 #define CGEMM_DEFAULT_P 224 #define ZGEMM_DEFAULT_P 112 #define XGEMM_DEFAULT_P 56 #define SGEMM_DEFAULT_Q 224 + +#if defined(BULLDOZER) && defined(ARCH_X86_64) +#define DGEMM_DEFAULT_Q 248 +#else #define DGEMM_DEFAULT_Q 224 +#endif + #define QGEMM_DEFAULT_Q 224 #define CGEMM_DEFAULT_Q 224 #define ZGEMM_DEFAULT_Q 224 From 25491e42f9ebc25f9bea03961600b8a505ec8021 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sat, 8 Jun 2013 09:40:17 +0200 Subject: [PATCH 08/15] New dgemm kernel for BULLDOZER: dgemm_kernel_8x2_bulldozer.S --- driver/level3/level3.c | 15 +- driver/level3/level3_thread.c | 16 +- kernel/x86_64/KERNEL.BULLDOZER | 56 +- kernel/x86_64/dgemm_kernel_4x4_bulldozer.S | 1959 ---------- kernel/x86_64/dgemm_kernel_8x2_bulldozer.S | 3854 ++++++++++++++++++++ param.h | 24 +- 6 files changed, 3925 insertions(+), 1999 deletions(-) delete mode 100644 kernel/x86_64/dgemm_kernel_4x4_bulldozer.S create mode 100644 kernel/x86_64/dgemm_kernel_8x2_bulldozer.S diff --git a/driver/level3/level3.c b/driver/level3/level3.c index 20e811cd0..27e503b0e 100644 --- a/driver/level3/level3.c +++ b/driver/level3/level3.c @@ -332,7 +332,20 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, #else for(jjs = js; jjs < js + min_j; jjs += min_jj){ min_jj = min_j + js - jjs; - if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; + +#if defined(BULLDOZER) && defined(ARCH_X86_64) && defined(DOUBLE) && !defined(COMPLEX) + if (min_jj >= 12*GEMM_UNROLL_N) min_jj = 12*GEMM_UNROLL_N; + else + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; + else + if (min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N; + else + if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; +#else + + if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; +#endif + START_RPCC(); diff --git a/driver/level3/level3_thread.c b/driver/level3/level3_thread.c index 000d42397..87a32898c 100644 --- a/driver/level3/level3_thread.c +++ b/driver/level3/level3_thread.c @@ -360,8 +360,20 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, for(jjs = xxx; jjs < MIN(n_to, xxx + div_n); jjs += min_jj){ min_jj = MIN(n_to, xxx + div_n) - jjs; + +#if defined(BULLDOZER) && defined(ARCH_X86_64) && defined(DOUBLE) && !defined(COMPLEX) + if (min_jj >= 12*GEMM_UNROLL_N) min_jj = 12*GEMM_UNROLL_N; + else + if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N; + else + if (min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N; + else + if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; +#else + if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; - +#endif + START_RPCC(); OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs, @@ -634,7 +646,7 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG num_cpu_n ++; } - + for (j = 0; j < num_cpu_m; j++) { for (i = 0; i < num_cpu_m; i++) { for (k = 0; k < DIVIDE_RATE; k++) { diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index 2ac035fe0..70ae51f6d 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -10,13 +10,13 @@ SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) -DGEMMKERNEL = dgemm_kernel_4x4_bulldozer.S -DGEMMINCOPY = -DGEMMITCOPY = -DGEMMONCOPY = ../generic/gemm_ncopy_4.c -DGEMMOTCOPY = ../generic/gemm_tcopy_4.c -DGEMMINCOPYOBJ = -DGEMMITCOPYOBJ = +DGEMMKERNEL = dgemm_kernel_8x2_bulldozer.S +DGEMMINCOPY = ../generic/gemm_ncopy_8.c +DGEMMITCOPY = ../generic/gemm_tcopy_8.c +DGEMMONCOPY = ../generic/gemm_ncopy_2.c +DGEMMOTCOPY = ../generic/gemm_tcopy_2.c +DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) +DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) CGEMMKERNEL = zgemm_kernel_4x2_barcelona.S @@ -38,25 +38,27 @@ ZGEMMITCOPYOBJ = ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) -STRSMKERNEL_LN = trsm_kernel_LN_8x4_sse.S -STRSMKERNEL_LT = trsm_kernel_LT_8x4_sse.S -STRSMKERNEL_RN = trsm_kernel_LT_8x4_sse.S -STRSMKERNEL_RT = trsm_kernel_RT_8x4_sse.S - -DTRSMKERNEL_LN = trsm_kernel_LN_4x4_barcelona.S -DTRSMKERNEL_LT = trsm_kernel_LT_4x4_barcelona.S -DTRSMKERNEL_RN = trsm_kernel_LT_4x4_barcelona.S -DTRSMKERNEL_RT = trsm_kernel_RT_4x4_barcelona.S - -CTRSMKERNEL_LN = ztrsm_kernel_LN_4x2_sse.S -CTRSMKERNEL_LT = ztrsm_kernel_LT_4x2_sse.S -CTRSMKERNEL_RN = ztrsm_kernel_LT_4x2_sse.S -CTRSMKERNEL_RT = ztrsm_kernel_RT_4x2_sse.S - -ZTRSMKERNEL_LN = ztrsm_kernel_LN_2x2_sse2.S -ZTRSMKERNEL_LT = ztrsm_kernel_LT_2x2_sse2.S -ZTRSMKERNEL_RN = ztrsm_kernel_LT_2x2_sse2.S -ZTRSMKERNEL_RT = ztrsm_kernel_RT_2x2_sse2.S - CGEMM3MKERNEL = zgemm3m_kernel_8x4_barcelona.S ZGEMM3MKERNEL = zgemm3m_kernel_4x4_barcelona.S + +STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + + diff --git a/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S b/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S deleted file mode 100644 index 91cd49291..000000000 --- a/kernel/x86_64/dgemm_kernel_4x4_bulldozer.S +++ /dev/null @@ -1,1959 +0,0 @@ -/*********************************************************************/ -/* Copyright 2009, 2010 The University of Texas at Austin. */ -/* All rights reserved. */ -/* */ -/* Redistribution and use in source and binary forms, with or */ -/* without modification, are permitted provided that the following */ -/* conditions are met: */ -/* */ -/* 1. Redistributions of source code must retain the above */ -/* copyright notice, this list of conditions and the following */ -/* disclaimer. */ -/* */ -/* 2. Redistributions in binary form must reproduce the above */ -/* copyright notice, this list of conditions and the following */ -/* disclaimer in the documentation and/or other materials */ -/* provided with the distribution. */ -/* */ -/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ -/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ -/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ -/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ -/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ -/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ -/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ -/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ -/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ -/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ -/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ -/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ -/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ -/* POSSIBILITY OF SUCH DAMAGE. */ -/* */ -/* The views and conclusions contained in the software and */ -/* documentation are those of the authors and should not be */ -/* interpreted as representing official policies, either expressed */ -/* or implied, of The University of Texas at Austin. */ -/*********************************************************************/ - -/********************************************************************* -* Changelog: -* -* 2013/04/15 Saar -* Prefetch for A and B -* unroll of inner Loop -* using generic versions for ncopy and tcopy -* moved vmovddup ALPHA, %xmm7 down -* define A_PR1 192 -* define B_PR1 512 -* -* 2013/04/27 Saar -* define A_PR1 224 -* define B_PR1 224 -* created 2 different Kernels -**********************************************************************/ - -/********************************************************************* -* 2013/04/12 Saar -* Performance: -* 3584x3584 89 GFLOPS with 8 threads on 4 modules -* 76 GFLOPS with 4 threads on 4 modules -* 53 GFLOPS with 4 threads on 2 modules -* 46 GFLOPS with 2 threads on 2 modules -* 28 GFLOPS with 2 threads on 1 module -* 23,1 GFLOPS with 1 thread on 1 module -*********************************************************************/ - -#define ASSEMBLER -#include "common.h" - -#define OLD_M %rdi -#define OLD_N %rsi -#define M %r13 -#define N %r14 -#define K %rdx - -#define A %rcx -#define B %r8 -#define C %r9 -#define LDC %r10 - -#define I %r11 -#define AO %rdi -#define BO %rsi -#define CO1 %r15 -#define CO2 %r12 -#define BB %rbp -#define J %rbx - -#ifndef WINDOWS_ABI - -#define STACKSIZE 96 - -#define ALPHA 48(%rsp) -#define OFFSET 56(%rsp) -#define KK 64(%rsp) -#define KKK 72(%rsp) - -#else - -#define STACKSIZE 256 - -#define OLD_A 40 + STACKSIZE(%rsp) -#define OLD_B 48 + STACKSIZE(%rsp) -#define OLD_C 56 + STACKSIZE(%rsp) -#define OLD_LDC 64 + STACKSIZE(%rsp) -#define OLD_OFFSET 72 + STACKSIZE(%rsp) - -#define ALPHA 224(%rsp) -#define OFFSET 232(%rsp) -#define KK 240(%rsp) -#define KKK 248(%rsp) - -#endif - -#define movapd movaps -#define movupd movups - -#define A_PR1 224 -#define B_PR1 224 - - -#if defined(OPTBYMODULE) || !defined(SMP) - -#define KERNEL1(xx) \ - vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6,%xmm7,%xmm8 ;\ - vmovddup -14 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2,%xmm7,%xmm12 ;\ - vmovddup -13 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\ - vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups -12 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\ - -#define KERNEL2(xx) \ - vmovups -10 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup -10 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -9 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL3(xx) \ - vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8, %xmm6, %xmm7, %xmm8 ;\ - vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ - vmovddup -5 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -4 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups -4 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\ - -#define KERNEL4(xx) \ - vmovups -2 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ - vmovups (AO, %rax, 4), %xmm6 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1 ,%xmm12;\ - vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -1 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vmovddup (BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL5(xx) \ - vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ - vmovddup 2 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ - vmovddup 3 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups 4 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ - -#define KERNEL6(xx) \ - vmovups 6 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ - vmovups 8 * SIZE(AO, %rax, 4), %xmm6 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup 6 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup 7 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vmovddup 8 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL7(xx) \ - vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ - vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ - vmovddup 11 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup 12 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups 12 * SIZE(AO, %rax, 4), %xmm0 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ - -#define KERNEL8(xx) \ - vmovups 14 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ - vmovups 16 * SIZE(AO, %rax, 4),%xmm6 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ - vmovddup 15 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vmovddup 16 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#else - -#define KERNEL1(xx) \ - vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6,%xmm7,%xmm8 ;\ - vmovddup -14 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2,%xmm7,%xmm12 ;\ - vmovddup -13 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6,%xmm3,%xmm9 ;\ - vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\ - vmovddup -8 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm10,%xmm6,%xmm5,%xmm10 ;\ - vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm6,%xmm4,%xmm11 ;\ - vmovups -12 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - vmovups -8 * SIZE(AO, %rax, 4),%xmm6 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - -#define KERNEL2(xx) \ - vmovups -10 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup -10 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -9 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL3(xx) \ - vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8, %xmm6, %xmm7, %xmm8 ;\ - vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ - vmovddup -5 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9, %xmm6, %xmm3,%xmm9 ;\ - vmovddup (BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -4 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4, %xmm11 ;\ - vmovups -4 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovups (AO, %rax, 4), %xmm6 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL4(xx) \ - vmovups -2 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1 ,%xmm12;\ - vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -1 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL5(xx) \ - vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ - vmovddup 2 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ - vmovddup 3 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ - vmovddup 8 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ - vmovups 4 * SIZE(AO, %rax, 4),%xmm0 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovups 8 * SIZE(AO, %rax, 4), %xmm6 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL6(xx) \ - vmovups 6 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup 6 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup 7 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL7(xx) \ - vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\ - vfmaddpd %xmm8,%xmm6, %xmm7,%xmm8 ;\ - vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm12,%xmm2, %xmm7,%xmm12 ;\ - vmovddup 11 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\ - vmovddup 16 * SIZE(BO, %rax, 4), %xmm7 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup 12 * SIZE(BO, %rax, 4), %xmm1 ;\ - vfmaddpd %xmm10,%xmm6, %xmm5,%xmm10 ;\ - vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm6, %xmm4,%xmm11 ;\ - vmovups 12 * SIZE(AO, %rax, 4), %xmm0 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovups 16 * SIZE(AO, %rax, 4),%xmm6 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#define KERNEL8(xx) \ - vmovups 14 * SIZE(AO, %rax, 4), %xmm2 ;\ - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\ - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ - vmovddup 15 * SIZE(BO, %rax, 4), %xmm4 ;\ - vfmaddpd %xmm10,%xmm0, %xmm5,%xmm10 ;\ - vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ - vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ - vfmaddpd %xmm11,%xmm0, %xmm4,%xmm11 ;\ - vfmaddpd %xmm15,%xmm2, %xmm4,%xmm15 ;\ - -#endif - -#define KERNEL_SUB1(xx) \ - vmovups -16 * SIZE(AO),%xmm0 ;\ - vmovups -14 * SIZE(AO),%xmm2 ;\ - vmovddup -16 * SIZE(BO), %xmm1 ;\ - vmovddup -15 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm12, %xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ - vmovddup -14 * SIZE(BO), %xmm1 ;\ - vmovddup -13 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm10, %xmm0, %xmm1,%xmm10 ;\ - vfmaddpd %xmm11, %xmm0, %xmm3,%xmm11 ;\ - vfmaddpd %xmm14, %xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm15, %xmm2, %xmm3,%xmm15 ;\ - - -#define KERNEL_SUB2(xx) \ - vmovups -12 * SIZE(AO), %xmm0 ;\ - vmovups -10 * SIZE(AO), %xmm2 ;\ - vmovddup -12 * SIZE(BO), %xmm1 ;\ - vmovddup -11 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -10 * SIZE(BO), %xmm1 ;\ - vmovddup -9 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ - vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ - vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ - -#define KERNEL_SUB3(xx) \ - vmovups -8 * SIZE(AO),%xmm0 ;\ - vmovups -6 * SIZE(AO),%xmm2 ;\ - vmovddup -8 * SIZE(BO), %xmm1 ;\ - vmovddup -7 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -6 * SIZE(BO), %xmm1 ;\ - vmovddup -5 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ - vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ - vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ - -#define KERNEL_SUB4(xx) \ - vmovups -4 * SIZE(AO), %xmm0 ;\ - vmovups -2 * SIZE(AO), %xmm2 ;\ - vmovddup -4 * SIZE(BO), %xmm1 ;\ - vmovddup -3 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\ - vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\ - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ - vmovddup -2 * SIZE(BO), %xmm1 ;\ - vmovddup -1 * SIZE(BO), %xmm3 ;\ - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\ - vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\ - vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ - vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ - vmovups (AO), %xmm0 ;\ - vmovddup (BO), %xmm1 ;\ - vmovddup 1 * SIZE(BO), %xmm3 ;\ - vmovaps %xmm0, %xmm2 - - PROLOGUE - PROFCODE - - subq $STACKSIZE, %rsp - movq %rbx, (%rsp) - movq %rbp, 8(%rsp) - movq %r12, 16(%rsp) - movq %r13, 24(%rsp) - movq %r14, 32(%rsp) - movq %r15, 40(%rsp) - - vzeroupper - -#ifdef WINDOWS_ABI - movq %rdi, 48(%rsp) - movq %rsi, 56(%rsp) - movups %xmm6, 64(%rsp) - movups %xmm7, 80(%rsp) - movups %xmm8, 96(%rsp) - movups %xmm9, 112(%rsp) - movups %xmm10, 128(%rsp) - movups %xmm11, 144(%rsp) - movups %xmm12, 160(%rsp) - movups %xmm13, 176(%rsp) - movups %xmm14, 192(%rsp) - movups %xmm15, 208(%rsp) - - movq ARG1, OLD_M - movq ARG2, OLD_N - movq ARG3, K - movq OLD_A, A - movq OLD_B, B - movq OLD_C, C - movq OLD_LDC, LDC -#ifdef TRMMKERNEL - movsd OLD_OFFSET, %xmm12 -#endif - vmovaps %xmm3, %xmm0 - -#else - movq STACKSIZE + 8(%rsp), LDC -#ifdef TRMMKERNEL - movsd STACKSIZE + 16(%rsp), %xmm12 -#endif - -#endif - - movq OLD_M, M - movq OLD_N, N - - subq $-16 * SIZE, A - subq $-16 * SIZE, B - - vmovsd %xmm0, ALPHA - - salq $BASE_SHIFT, LDC # LDC << 3 # LDC * 8 - -#ifdef TRMMKERNEL - vmovsd %xmm12, OFFSET - vmovsd %xmm12, KK -#ifndef LEFT - negq KK -#endif -#endif - movq N, J - sarq $2, J # j = (n >> 2) # j = n / 4 - jle .L40 - ALIGN_4 - -.L01: - movq C, CO1 # coffset1 = c - leaq (C, LDC, 2), CO2 # coffset2 = c + ldc - - leaq (C, LDC, 4), C # c += 4 * ldc - -#if defined(TRMMKERNEL) && defined(LEFT) - movq OFFSET, %rax - movq %rax, KK -#endif - - movq A, AO # aoffset = a - - movq K, %rax - salq $BASE_SHIFT + 2, %rax # k << 5 # K * 32 - leaq (B, %rax), BB - - movq M, I - sarq $2, I # i = (m >> 2) - jle .L20 - ALIGN_4 - - .align 16 -.L11: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq B, BO -#else - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (B, %rax, 4), BO -#endif - - - - vxorpd %xmm8, %xmm8,%xmm8 - vxorpd %xmm9, %xmm9,%xmm9 - vxorpd %xmm10, %xmm10,%xmm10 - vxorpd %xmm11, %xmm11,%xmm11 - vxorpd %xmm12, %xmm12,%xmm12 - vxorpd %xmm13, %xmm13,%xmm13 - vxorpd %xmm14, %xmm14,%xmm14 - vxorpd %xmm15, %xmm15,%xmm15 - - prefetchw (CO1) - // prefetchw (CO1,LDC) - // prefetchw (CO2) - // prefetchw (CO2,LDC) - -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $4, %rax -#else - addq $4, %rax -#endif - movq %rax, KKK -#endif - - andq $-8, %rax - salq $BASE_SHIFT, %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 4), BO - negq %rax - je .L15 - // ALIGN_4 - - vmovups -16 * SIZE(AO, %rax, 4),%xmm6 - vmovddup -16 * SIZE(BO, %rax, 4), %xmm7 - vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 - - .align 32 - -.L12: - -#if defined(OPTBYMODULE) || !defined(SMP) - - prefetcht0 A_PR1(AO,%rax,4) - prefetcht0 B_PR1(BO,%rax,4) - KERNEL1(16 * 0) - KERNEL2(16 * 0) - prefetcht0 A_PR1+64(AO,%rax,4) - prefetcht0 B_PR1+64(BO,%rax,4) - KERNEL3(16 * 0) - KERNEL4(16 * 0) - prefetcht0 A_PR1+128(AO,%rax,4) - prefetcht0 B_PR1+128(BO,%rax,4) - KERNEL5(16 * 0) - KERNEL6(16 * 0) - prefetcht0 A_PR1+192(AO,%rax,4) - prefetcht0 B_PR1+192(BO,%rax,4) - KERNEL7(16 * 0) - KERNEL8(16 * 0) - - addq $8 * SIZE, %rax - je .L15 - jmp .L12 - .align 16 - -#else - KERNEL1(16 * 0) - KERNEL2(16 * 0) - KERNEL3(16 * 0) - KERNEL4(16 * 0) - KERNEL5(16 * 0) - KERNEL6(16 * 0) - KERNEL7(16 * 0) - KERNEL8(16 * 0) - - addq $8 * SIZE, %rax - je .L15 - jmp .L12 - .align 16 - -#endif - - -.L15: - -#ifndef TRMMKERNEL - movq K, %rax -#else - movq KKK, %rax -#endif - testq $4, %rax - je .L16 - ALIGN_4 - - KERNEL_SUB1(16 * 0) - KERNEL_SUB2(16 * 0) - KERNEL_SUB3(16 * 0) - KERNEL_SUB4(16 * 0) - - subq $-16 * SIZE, BO - subq $-16 * SIZE, AO - ALIGN_4 - -.L16: -#ifndef TRMMKERNEL - movq K, %rax -#else - movq KKK, %rax -#endif - vmovddup ALPHA, %xmm7 - andq $3, %rax # if (k & 1) - je .L19 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 4), BO - negq %rax - ALIGN_4 - -.L17: - vmovups -16 * SIZE(AO, %rax, 4), %xmm0 - vmovups -14 * SIZE(AO, %rax, 4), %xmm2 - vmovddup -16 * SIZE(BO, %rax, 4), %xmm1 - vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 - vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 - vmovddup -14 * SIZE(BO, %rax, 4), %xmm1 - vmovddup -13 * SIZE(BO, %rax, 4), %xmm3 - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 - vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 - vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 - vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 -/* - vmovups -12 * SIZE(AO, %rax, 4), %xmm0 - vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 - vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 - vmovaps %xmm0, %xmm2 -*/ - addq $SIZE, %rax - jl .L17 - ALIGN_4 - -.L19: - // prefetch -8 * SIZE(BB) - subq $-16 * SIZE, BB - -#ifndef TRMMKERNEL - - vfmaddpd (CO1),%xmm7, %xmm8,%xmm8 - vfmaddpd 2 * SIZE(CO1),%xmm7, %xmm12,%xmm12 - vfmaddpd (CO1, LDC),%xmm7, %xmm9,%xmm9 - vfmaddpd 2 * SIZE(CO1, LDC),%xmm7, %xmm13,%xmm13 - vfmaddpd (CO2),%xmm7, %xmm10,%xmm10 - vfmaddpd 2 * SIZE(CO2),%xmm7, %xmm14,%xmm14 - vfmaddpd (CO2, LDC),%xmm7, %xmm11,%xmm11 - vfmaddpd 2 * SIZE(CO2, LDC),%xmm7, %xmm15,%xmm15 - -#else - vmulpd %xmm7, %xmm8,%xmm8 - vmulpd %xmm7, %xmm12,%xmm12 - vmulpd %xmm7, %xmm9,%xmm9 - vmulpd %xmm7, %xmm13,%xmm13 - vmulpd %xmm7, %xmm10,%xmm10 - vmulpd %xmm7, %xmm14,%xmm14 - vmulpd %xmm7, %xmm11,%xmm11 - vmulpd %xmm7, %xmm15,%xmm15 - -#endif - - vmovups %xmm8, (CO1) - vmovups %xmm12, 2 * SIZE(CO1) - vmovups %xmm9, (CO1, LDC) - vmovups %xmm13, 2 * SIZE(CO1, LDC) - vmovups %xmm10, (CO2) - vmovups %xmm14, 2 * SIZE(CO2) - vmovups %xmm11, (CO2, LDC) - vmovups %xmm15, 2 * SIZE(CO2, LDC) - -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 4), BO -#endif - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $4, KK -#endif - - addq $4 * SIZE, CO1 # coffset += 4 - addq $4 * SIZE, CO2 # coffset += 4 - decq I # i -- - BRANCH - jg .L11 - ALIGN_4 - -.L20: - testq $3, M - je .L39 - - testq $2, M - je .L30 - ALIGN_4 - -.L21: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq B, BO -#else - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (B, %rax, 4), BO -#endif - - vmovups -16 * SIZE(AO), %xmm0 - vxorps %xmm8, %xmm8, %xmm8 - vmovups -12 * SIZE(AO), %xmm2 - vxorps %xmm9, %xmm9 ,%xmm9 - vmovddup -16 * SIZE(BO), %xmm1 - vxorps %xmm10, %xmm10, %xmm10 - vmovddup -15 * SIZE(BO), %xmm5 - vxorps %xmm11, %xmm11, %xmm11 - vmovddup -8 * SIZE(BO), %xmm3 - -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $2, %rax -#else - addq $4, %rax -#endif - movq %rax, KKK -#endif - - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 4), BO - negq %rax - NOBRANCH - je .L26 - ALIGN_4 - -.L22: - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 - vfmaddpd %xmm9,%xmm0, %xmm5,%xmm9 - vmovddup -14 * SIZE(BO, %rax, 4), %xmm1 - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 - vmovddup -13 * SIZE(BO, %rax, 4), %xmm5 - vfmaddpd %xmm11,%xmm0, %xmm5,%xmm11 - vmovups -14 * SIZE(AO, %rax, 2), %xmm0 - vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 - vmovddup -11 * SIZE(BO, %rax, 4), %xmm5 - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 - vfmaddpd %xmm9,%xmm0, %xmm5,%xmm9 - vmovddup -10 * SIZE(BO, %rax, 4), %xmm1 - vmovddup -9 * SIZE(BO, %rax, 4), %xmm5 - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 - vfmaddpd %xmm11,%xmm0, %xmm5,%xmm11 - vmovddup (BO, %rax, 4), %xmm1 - vmovddup -7 * SIZE(BO, %rax, 4), %xmm5 - vmovups -8 * SIZE(AO, %rax, 2), %xmm0 - vfmaddpd %xmm8,%xmm2, %xmm3,%xmm8 - vfmaddpd %xmm9,%xmm2, %xmm5,%xmm9 - vmovddup -6 * SIZE(BO, %rax, 4), %xmm3 - vmovddup -5 * SIZE(BO, %rax, 4), %xmm5 - vfmaddpd %xmm10,%xmm2, %xmm3,%xmm10 - vfmaddpd %xmm11,%xmm2, %xmm5,%xmm11 - vmovups -10 * SIZE(AO, %rax, 2), %xmm2 - vmovddup -4 * SIZE(BO, %rax, 4), %xmm3 - vmovddup -3 * SIZE(BO, %rax, 4), %xmm5 - vfmaddpd %xmm8,%xmm2, %xmm3,%xmm8 - vfmaddpd %xmm9,%xmm2, %xmm5,%xmm9 - vmovddup -2 * SIZE(BO, %rax, 4), %xmm3 - vmovddup -1 * SIZE(BO, %rax, 4), %xmm5 - vfmaddpd %xmm10,%xmm2, %xmm3,%xmm10 - vfmaddpd %xmm11,%xmm2, %xmm5,%xmm11 - vmovddup 8 * SIZE(BO, %rax, 4), %xmm3 - vmovups -4 * SIZE(AO, %rax, 2), %xmm2 - vmovddup 1 * SIZE(BO, %rax, 4), %xmm5 - - addq $4 * SIZE, %rax - BRANCH - jl .L22 - ALIGN_4 - -.L26: - vmovddup ALPHA, %xmm7 - -#ifndef TRMMKERNEL - movq K, %rax -#else - movq KKK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L29 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 4), BO - negq %rax - ALIGN_4 - -.L27: - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 - vmovddup -14 * SIZE(BO, %rax, 4), %xmm1 - vfmaddpd %xmm9,%xmm0, %xmm5,%xmm9 - vmovddup -13 * SIZE(BO, %rax, 4), %xmm5 - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 - vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 - vfmaddpd %xmm11,%xmm0, %xmm5,%xmm11 - vmovups -14 * SIZE(AO, %rax, 2), %xmm0 - vmovddup -11 * SIZE(BO, %rax, 4), %xmm5 - - addq $SIZE, %rax - jl .L27 - ALIGN_4 - -.L29: -#ifndef TRMMKERNEL - - vfmaddpd (CO1),%xmm7, %xmm8,%xmm8 - vfmaddpd (CO1, LDC),%xmm7, %xmm9,%xmm9 - vfmaddpd (CO2),%xmm7, %xmm10,%xmm10 - vfmaddpd (CO2, LDC),%xmm7, %xmm11,%xmm11 - -#else - vmulpd %xmm7, %xmm8,%xmm8 - vmulpd %xmm7, %xmm9,%xmm9 - vmulpd %xmm7, %xmm10,%xmm10 - vmulpd %xmm7, %xmm11,%xmm11 - -#endif - - vmovups %xmm8, (CO1) - vmovups %xmm9, (CO1, LDC) - - vmovups %xmm10, (CO2) - vmovups %xmm11, (CO2, LDC) - -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 4), BO -#endif - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $2, KK -#endif - - addq $2 * SIZE, CO1 - addq $2 * SIZE, CO2 - ALIGN_4 - -.L30: - testq $1, M - je .L39 - -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq B, BO -#else - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (B, %rax, 4), BO -#endif - - vmovddup -16 * SIZE(AO), %xmm0 - vxorps %xmm8, %xmm8, %xmm8 - vmovddup -14 * SIZE(AO), %xmm2 - vxorps %xmm9, %xmm9, %xmm9 - vmovddup -15 * SIZE(AO), %xmm4 - vxorps %xmm10, %xmm10,%xmm10 - vmovups -16 * SIZE(BO), %xmm1 - vxorps %xmm11, %xmm11,%xmm11 - vmovups -8 * SIZE(BO), %xmm3 - -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $1, %rax -#else - addq $4, %rax -#endif - movq %rax, KKK -#endif - - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 4), BO - negq %rax - NOBRANCH - je .L36 - ALIGN_4 - -.L32: - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 - vfmaddpd %xmm9,-14 * SIZE(BO, %rax, 4), %xmm0,%xmm9 - vmovups -12 * SIZE(BO, %rax, 4), %xmm1 - vmovddup -12 * SIZE(AO, %rax, 1), %xmm0 - vfmaddpd %xmm10,%xmm4, %xmm1,%xmm10 - vfmaddpd %xmm11,-10 * SIZE(BO, %rax, 4), %xmm4,%xmm11 - vmovups (BO, %rax, 4), %xmm1 - vmovddup -11 * SIZE(AO, %rax, 1), %xmm4 - vfmaddpd %xmm8,%xmm2, %xmm3,%xmm8 - vfmaddpd %xmm9,-6 * SIZE(BO, %rax, 4), %xmm2,%xmm9 - vmovups -4 * SIZE(BO, %rax, 4), %xmm3 - vmovddup -13 * SIZE(AO, %rax, 1), %xmm2 - vfmaddpd %xmm10,%xmm2, %xmm3,%xmm10 - vfmaddpd %xmm11,-2 * SIZE(BO, %rax, 4), %xmm2,%xmm11 - vmovups 8 * SIZE(BO, %rax, 4), %xmm3 - vmovddup -10 * SIZE(AO, %rax, 1), %xmm2 - - addq $4 * SIZE, %rax - BRANCH - jl .L32 - ALIGN_4 - -.L36: - vmovddup ALPHA, %xmm7 - -#ifndef TRMMKERNEL - movq K, %rax -#else - movq KKK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L38 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 4), BO - negq %rax - ALIGN_4 - -.L37: - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 - vfmaddpd %xmm9,-14 * SIZE(BO, %rax, 4), %xmm0,%xmm9 - vmovups -12 * SIZE(BO, %rax, 4), %xmm1 - vmovddup -15 * SIZE(AO, %rax, 1), %xmm0 - - addq $SIZE, %rax - jl .L37 - ALIGN_4 - -.L38: - vaddpd %xmm10, %xmm8,%xmm8 - vaddpd %xmm11, %xmm9,%xmm9 - -#ifndef TRMMKERNEL - vmovsd (CO1), %xmm0 - vmovhpd (CO1, LDC), %xmm0,%xmm0 - vmovsd (CO2), %xmm1 - vmovhpd (CO2, LDC), %xmm1,%xmm1 - - - vfmaddpd %xmm0, %xmm7,%xmm8,%xmm8 - vfmaddpd %xmm1, %xmm7,%xmm9,%xmm9 -#else - - vmulpd %xmm7, %xmm8,%xmm8 - vmulpd %xmm7, %xmm9,%xmm9 - -#endif - - vmovsd %xmm8, (CO1) - vmovhpd %xmm8, (CO1, LDC) - vmovsd %xmm9, (CO2) - vmovhpd %xmm9, (CO2, LDC) - -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 4), BO -#endif - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $1, KK -#endif - ALIGN_4 - -.L39: -#if defined(TRMMKERNEL) && !defined(LEFT) - addq $4, KK -#endif - - movq BO, B - - decq J # j -- - jg .L01 - ALIGN_4 - -.L40: # N % 4 - testq $3, N # N % 4 == 3 - je .L999 # Jump to end if N % 4 == 0 - - testq $2, N # N % 4 == 2 - je .L80 - ALIGN_4 - -.L41: # N % 4 > 1 -#if defined(TRMMKERNEL) && defined(LEFT) - movq OFFSET, %rax - movq %rax, KK -#endif - - movq C, CO1 # coffset1 = c - leaq (C, LDC, 1), CO2 # coffset2 = c + ldc - movq A, AO # aoffset = a - - movq K, %rax - salq $BASE_SHIFT + 1, %rax # k << 4 - leaq (B, %rax), BB - - movq M, I - sarq $2, I # i = (m >> 2) - jle .L60 - ALIGN_4 - -.L51: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq B, BO -#else - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (B, %rax, 2), BO -#endif - - vmovddup -16 * SIZE(BO), %xmm1 - vmovddup -15 * SIZE(BO), %xmm5 - vmovddup -12 * SIZE(BO), %xmm3 - vxorps %xmm8, %xmm8,%xmm8 - vxorps %xmm9, %xmm9,%xmm9 - vxorps %xmm12, %xmm12,%xmm12 - vxorps %xmm13, %xmm13,%xmm13 - vmovups -16 * SIZE(AO), %xmm0 - vmovups -8 * SIZE(AO), %xmm4 - vmovups %xmm0, %xmm2 - subq $-8 * SIZE, BB - -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $4, %rax -#else - addq $2, %rax -#endif - movq %rax, KKK -#endif - - - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 2), BO - negq %rax - NOBRANCH - je .L56 - ALIGN_4 - -.L52: # Loop for (N % 4) == 2 - vfmaddpd %xmm8,%xmm1, %xmm0,%xmm8 - vfmaddpd %xmm9,%xmm5, %xmm2,%xmm9 - vmovups -14 * SIZE(AO, %rax, 4),%xmm2 - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 - vmovups -12 * SIZE(AO, %rax, 4), %xmm0 - vmovddup -14 * SIZE(BO, %rax, 2), %xmm1 - vfmaddpd %xmm13,%xmm2, %xmm5,%xmm13 - vmovddup -13 * SIZE(BO, %rax, 2), %xmm5 - vmovups -10 * SIZE(AO, %rax, 4), %xmm2 - vfmaddpd %xmm8,%xmm1, %xmm0,%xmm8 - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 - vfmaddpd %xmm9,%xmm5, %xmm0,%xmm9 - vmovups (AO, %rax, 4), %xmm0 - vmovddup -8 * SIZE(BO, %rax, 2), %xmm1 - vfmaddpd %xmm13,%xmm2, %xmm5,%xmm13 - vmovddup -11 * SIZE(BO, %rax, 2), %xmm5 - vmovups -6 * SIZE(AO, %rax, 4), %xmm2 - vfmaddpd %xmm8,%xmm3, %xmm4,%xmm8 - vfmaddpd %xmm12,%xmm2, %xmm3,%xmm12 - vfmaddpd %xmm9,%xmm5, %xmm4,%xmm9 - vmovups -4 * SIZE(AO, %rax, 4), %xmm4 - vmovddup -10 * SIZE(BO, %rax, 2), %xmm3 - vfmaddpd %xmm13,%xmm2, %xmm5,%xmm13 - vmovddup -9 * SIZE(BO, %rax, 2), %xmm5 - vmovups -2 * SIZE(AO, %rax, 4), %xmm2 - vfmaddpd %xmm8,%xmm3, %xmm4,%xmm8 - vfmaddpd %xmm12,%xmm2, %xmm3,%xmm12 - vfmaddpd %xmm9,%xmm5, %xmm4,%xmm9 - vfmaddpd %xmm13,%xmm2, %xmm5,%xmm13 - vmovups 8 * SIZE(AO, %rax, 4), %xmm4 - vmovddup -4 * SIZE(BO, %rax, 2), %xmm3 - vmovddup -7 * SIZE(BO, %rax, 2), %xmm5 - vmovaps %xmm0, %xmm2 - - addq $4 * SIZE, %rax - BRANCH - jl .L52 - ALIGN_4 - -.L56: - vmovddup ALPHA, %xmm7 - -#ifndef TRMMKERNEL - movq K, %rax -#else - movq KKK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L59 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 2), BO - negq %rax - ALIGN_4 - -.L57: - vfmaddpd %xmm8,%xmm1, %xmm0,%xmm8 - vfmaddpd %xmm9,%xmm5, %xmm2,%xmm9 - vmovups -14 * SIZE(AO, %rax, 4),%xmm2 - vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 - vmovups -12 * SIZE(AO, %rax, 4), %xmm0 - vmovddup -14 * SIZE(BO, %rax, 2), %xmm1 - vfmaddpd %xmm13,%xmm2, %xmm5,%xmm13 - vmovddup -13 * SIZE(BO, %rax, 2), %xmm5 - vmovaps %xmm0, %xmm2 - - addq $SIZE, %rax - jl .L57 - ALIGN_4 - -.L59: -#ifndef TRMMKERNEL - vfmaddpd (CO1),%xmm7, %xmm8, %xmm8 - vfmaddpd 2 * SIZE(CO1),%xmm7, %xmm12, %xmm12 - vfmaddpd (CO2),%xmm7, %xmm9, %xmm9 - vfmaddpd 2 * SIZE(CO2),%xmm7, %xmm13, %xmm13 - -#else - vmulpd %xmm7, %xmm8,%xmm8 - vmulpd %xmm7, %xmm9,%xmm9 - vmulpd %xmm7, %xmm12,%xmm12 - vmulpd %xmm7, %xmm13,%xmm13 - -#endif - - vmovups %xmm8, (CO1) - vmovups %xmm12, 2 * SIZE(CO1) - - vmovups %xmm9, (CO2) - vmovups %xmm13, 2 * SIZE(CO2) - -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 2), BO -#endif - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $4, KK -#endif - - addq $4 * SIZE, CO1 # coffset += 4 - addq $4 * SIZE, CO2 # coffset += 4 - decq I # i -- - jg .L51 - ALIGN_4 - -.L60: - testq $2, M - je .L70 - ALIGN_4 - -.L61: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq B, BO -#else - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (B, %rax, 2), BO -#endif - - vmovups -16 * SIZE(AO), %xmm0 - vxorps %xmm8, %xmm8,%xmm8 - vmovups -12 * SIZE(AO), %xmm2 - vxorps %xmm9, %xmm9,%xmm9 - vmovddup -16 * SIZE(BO), %xmm1 - vxorps %xmm10, %xmm10,%xmm10 - vmovddup -15 * SIZE(BO), %xmm3 - vxorps %xmm11, %xmm11,%xmm11 - -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $2, %rax -#else - addq $2, %rax -#endif - movq %rax, KKK -#endif - - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 2), BO - negq %rax - NOBRANCH - je .L66 - ALIGN_4 - -.L62: - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 - vmovddup -14 * SIZE(BO, %rax, 2), %xmm1 - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 - vmovups -14 * SIZE(AO, %rax, 2), %xmm0 - vmovddup -13 * SIZE(BO, %rax, 2), %xmm3 - vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 - vmovddup -12 * SIZE(BO, %rax, 2), %xmm1 - vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 - vmovups -8 * SIZE(AO, %rax, 2), %xmm0 - vmovddup -11 * SIZE(BO, %rax, 2), %xmm3 - vfmaddpd %xmm8,%xmm2, %xmm1,%xmm8 - vmovddup -10 * SIZE(BO, %rax, 2), %xmm1 - vfmaddpd %xmm9,%xmm2, %xmm3,%xmm9 - vmovups -10 * SIZE(AO, %rax, 2), %xmm2 - vmovddup -9 * SIZE(BO, %rax, 2), %xmm3 - vfmaddpd %xmm10,%xmm2, %xmm1,%xmm10 - vmovddup -8 * SIZE(BO, %rax, 2), %xmm1 - vfmaddpd %xmm11,%xmm2, %xmm3,%xmm11 - vmovups -4 * SIZE(AO, %rax, 2), %xmm2 - vmovddup -7 * SIZE(BO, %rax, 2), %xmm3 - - addq $4 * SIZE, %rax - BRANCH - jl .L62 - ALIGN_4 - -.L66: - vmovddup ALPHA, %xmm7 - -#ifndef TRMMKERNEL - movq K, %rax -#else - movq KKK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L69 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 2), BO - negq %rax - ALIGN_4 - -.L67: - vfmaddpd %xmm8,%xmm0, %xmm1,%xmm8 - vmovddup -14 * SIZE(BO, %rax, 2), %xmm1 - vfmaddpd %xmm9,%xmm0, %xmm3,%xmm9 - vmovups -14 * SIZE(AO, %rax, 2), %xmm0 - vmovddup -13 * SIZE(BO, %rax, 2), %xmm3 - - addq $SIZE, %rax - jl .L67 - ALIGN_4 - -.L69: - vaddpd %xmm10, %xmm8,%xmm8 - vaddpd %xmm11, %xmm9,%xmm9 - -#ifndef TRMMKERNEL - - vfmaddpd (CO1),%xmm7, %xmm8,%xmm8 - vfmaddpd (CO2),%xmm7, %xmm9,%xmm9 - -#else - - vmulpd %xmm7, %xmm8,%xmm8 - vmulpd %xmm7, %xmm9,%xmm9 - -#endif - - vmovups %xmm8, (CO1) - vmovups %xmm9, (CO2) - -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 2), BO -#endif - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $2, KK -#endif - - addq $2 * SIZE, CO1 # coffset += 4 - addq $2 * SIZE, CO2 # coffset += 4 - ALIGN_4 - -.L70: - testq $1, M - je .L79 - ALIGN_4 - -.L71: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq B, BO -#else - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (B, %rax, 2), BO -#endif - - vmovddup -16 * SIZE(AO), %xmm0 - vxorps %xmm8, %xmm8,%xmm8 - vmovddup -15 * SIZE(AO), %xmm1 - vxorps %xmm9, %xmm9,%xmm9 - vmovddup -14 * SIZE(AO), %xmm2 - vxorps %xmm10, %xmm10,%xmm10 - vmovddup -13 * SIZE(AO), %xmm3 - vxorps %xmm11, %xmm11,%xmm11 - -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $1, %rax -#else - addq $2, %rax -#endif - movq %rax, KKK -#endif - - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 2), BO - negq %rax - NOBRANCH - je .L76 - ALIGN_4 - -.L72: - vfmaddpd %xmm8,-16 * SIZE(BO, %rax, 2), %xmm0,%xmm8 - vmovddup -12 * SIZE(AO, %rax, 1), %xmm0 - - vfmaddpd %xmm9,-14 * SIZE(BO, %rax, 2), %xmm1,%xmm9 - vmovddup -11 * SIZE(AO, %rax, 1), %xmm1 - - vfmaddpd %xmm10,-12 * SIZE(BO, %rax, 2), %xmm2,%xmm10 - vmovddup -10 * SIZE(AO, %rax, 1), %xmm2 - - vfmaddpd %xmm11,-10 * SIZE(BO, %rax, 2), %xmm3,%xmm11 - vmovddup -9 * SIZE(AO, %rax, 1), %xmm3 - - addq $4 * SIZE, %rax - BRANCH - jl .L72 - ALIGN_4 - -.L76: - vmovddup ALPHA, %xmm7 - -#ifndef TRMMKERNEL - movq K, %rax -#else - movq KKK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L78 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 2), BO - negq %rax - ALIGN_4 - -.L77: - vfmaddpd %xmm8,-16 * SIZE(BO, %rax, 2), %xmm0,%xmm8 - vmovddup -15 * SIZE(AO, %rax, 1), %xmm0 - - addq $SIZE, %rax - jl .L77 - ALIGN_4 - -.L78: - vaddpd %xmm9, %xmm8,%xmm8 - vaddpd %xmm11, %xmm10,%xmm10 - vaddpd %xmm10, %xmm8,%xmm8 - -#ifndef TRMMKERNEL - vmovsd (CO1), %xmm0 - vmovhpd (CO2), %xmm0,%xmm0 -#endif - - vmulpd %xmm7, %xmm8,%xmm8 - -#ifndef TRMMKERNEL - vaddpd %xmm0, %xmm8,%xmm8 -#endif - - vmovsd %xmm8, (CO1) - vmovhpd %xmm8, (CO2) - -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 2), BO -#endif - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $1, KK -#endif - ALIGN_4 - -.L79: -#if defined(TRMMKERNEL) && !defined(LEFT) - addq $2, KK -#endif - - movq BO, B - - leaq (C, LDC, 2), C - ALIGN_4 - -.L80: - testq $1, N # N % 4 == 1 - je .L999 # Jump to end if N % 4 == 0 - ALIGN_4 - -.L81: -#if defined(TRMMKERNEL) && defined(LEFT) - movq OFFSET, %rax - movq %rax, KK -#endif - - movq C, CO1 # coffset1 = c - movq A, AO # aoffset = a - - movq M, I - sarq $2, I # i = (m >> 2) - jle .L100 - ALIGN_4 - -.L91: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq B, BO -#else - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (B, %rax, 1), BO -#endif - - vmovups -8 * SIZE(AO), %xmm2 - vxorps %xmm8, %xmm8,%xmm8 - vmovups -16 * SIZE(AO), %xmm0 - vxorps %xmm9, %xmm9,%xmm9 - vmovddup -16 * SIZE(BO), %xmm1 - vxorps %xmm12, %xmm12,%xmm12 - vmovddup -14 * SIZE(BO), %xmm3 - vxorps %xmm13, %xmm13,%xmm13 - vmovddup -15 * SIZE(BO), %xmm5 - - // prefetchw 3 * SIZE(CO1) - -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $4, %rax -#else - addq $1, %rax -#endif - movq %rax, KKK -#endif - - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 1), BO - negq %rax - NOBRANCH - je .L96 - ALIGN_4 - -.L92: - vfmaddpd %xmm8,%xmm1, %xmm0,%xmm8 - vfmaddpd %xmm12,-14 * SIZE(AO, %rax, 4), %xmm1,%xmm12 - vmovapd -12 * SIZE(AO, %rax, 4), %xmm0 - vmovddup -12 * SIZE(BO, %rax, 1), %xmm1 - vfmaddpd %xmm9,%xmm5, %xmm0,%xmm9 - vfmaddpd %xmm13,-10 * SIZE(AO, %rax, 4), %xmm5,%xmm13 - vmovapd (AO, %rax, 4), %xmm0 - vmovddup -13 * SIZE(BO, %rax, 1), %xmm5 - vfmaddpd %xmm8,%xmm3, %xmm2,%xmm8 - vfmaddpd %xmm12,-6 * SIZE(AO, %rax, 4), %xmm3,%xmm12 - vmovapd -4 * SIZE(AO, %rax, 4), %xmm2 - vmovddup -10 * SIZE(BO, %rax, 1), %xmm3 - vfmaddpd %xmm9,%xmm5, %xmm2,%xmm9 - vfmaddpd %xmm13,-2 * SIZE(AO, %rax, 4), %xmm5,%xmm13 - vmovapd 8 * SIZE(AO, %rax, 4), %xmm2 - vmovddup -11 * SIZE(BO, %rax, 1), %xmm5 - - addq $4 * SIZE, %rax - BRANCH - jl .L92 - ALIGN_4 - -.L96: - vmovddup ALPHA, %xmm7 - -#ifndef TRMMKERNEL - movq K, %rax -#else - movq KKK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L99 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 1), BO - negq %rax - ALIGN_4 - -.L97: - vfmaddpd %xmm8,%xmm1, %xmm0,%xmm8 - vfmaddpd %xmm12,-14 * SIZE(AO, %rax, 4), %xmm1,%xmm12 - vmovups -12 * SIZE(AO, %rax, 4), %xmm0 - vmovddup -15 * SIZE(BO, %rax, 1), %xmm1 - - addq $SIZE, %rax - jl .L97 - ALIGN_4 - -.L99: - vaddpd %xmm9, %xmm8,%xmm8 - vaddpd %xmm13, %xmm12,%xmm12 - -#ifndef TRMMKERNEL - - vfmaddpd (CO1),%xmm7, %xmm8,%xmm8 - vfmaddpd 2 * SIZE(CO1),%xmm7,%xmm12,%xmm12 - -#else - vmulpd %xmm7, %xmm8,%xmm8 - vmulpd %xmm7, %xmm12,%xmm12 - -#endif - - vmovups %xmm8, (CO1) - vmovups %xmm12, 2 * SIZE(CO1) - -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 4), AO - leaq (BO, %rax, 1), BO -#endif - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $4, KK -#endif - - addq $4 * SIZE, CO1 # coffset += 4 - decq I # i -- - jg .L91 - ALIGN_4 - -.L100: - testq $2, M - je .L110 - ALIGN_4 - -.L101: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq B, BO -#else - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (B, %rax, 1), BO -#endif - - vmovddup -16 * SIZE(BO), %xmm0 - vxorps %xmm8, %xmm8,%xmm8 - vmovddup -15 * SIZE(BO), %xmm1 - vxorps %xmm9, %xmm9,%xmm9 - vmovddup -14 * SIZE(BO), %xmm2 - vxorps %xmm10, %xmm10,%xmm10 - vmovddup -13 * SIZE(BO), %xmm3 - vxorps %xmm11, %xmm11,%xmm11 - -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $2, %rax -#else - addq $1, %rax -#endif - movq %rax, KKK -#endif - - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 1), BO - negq %rax - NOBRANCH - je .L106 - ALIGN_4 - -.L102: - vfmaddpd %xmm8,-16 * SIZE(AO, %rax, 2), %xmm0,%xmm8 - vmovddup -12 * SIZE(BO, %rax, 1), %xmm0 - - vfmaddpd %xmm9,-14 * SIZE(AO, %rax, 2), %xmm1,%xmm9 - vmovddup -11 * SIZE(BO, %rax, 1), %xmm1 - - vfmaddpd %xmm10,-12 * SIZE(AO, %rax, 2), %xmm2,%xmm10 - vmovddup -10 * SIZE(BO, %rax, 1), %xmm2 - - vfmaddpd %xmm11,-10 * SIZE(AO, %rax, 2), %xmm3,%xmm11 - vmovddup -9 * SIZE(BO, %rax, 1), %xmm3 - - addq $4 * SIZE, %rax - BRANCH - jl .L102 - ALIGN_4 - -.L106: - vmovddup ALPHA, %xmm7 - -#ifndef TRMMKERNEL - movq K, %rax -#else - movq KKK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L109 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 1), BO - negq %rax - ALIGN_4 - -.L107: - vmovddup -16 * SIZE(BO, %rax, 1), %xmm0 - vfmaddpd %xmm8,-16 * SIZE(AO, %rax, 2), %xmm0,%xmm8 - - addq $SIZE, %rax - jl .L107 - ALIGN_4 - -.L109: - vaddpd %xmm9, %xmm8,%xmm8 - vaddpd %xmm11, %xmm10,%xmm10 - vaddpd %xmm10, %xmm8,%xmm8 - -#ifndef TRMMKERNEL - - vfmaddpd (CO1),%xmm7, %xmm8,%xmm8 -#else - vmulpd %xmm7, %xmm8,%xmm8 - -#endif - - vmovups %xmm8, (CO1) - -#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq K, %rax - subq KKK, %rax - leaq (,%rax, SIZE), %rax - leaq (AO, %rax, 2), AO - leaq (BO, %rax, 1), BO -#endif - -#if defined(TRMMKERNEL) && defined(LEFT) - addq $2, KK -#endif - - addq $2 * SIZE, CO1 # coffset += 4 - - ALIGN_4 - -.L110: - testq $1, M - je .L999 - ALIGN_4 - -.L111: -#if !defined(TRMMKERNEL) || \ - (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ - (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) - movq B, BO -#else - movq KK, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (B, %rax, 1), BO -#endif - - vmovups -16 * SIZE(AO), %xmm0 - vxorps %xmm8, %xmm8,%xmm8 - movups -14 * SIZE(AO), %xmm1 - vxorps %xmm9, %xmm9,%xmm9 - -#ifndef TRMMKERNEL - movq K, %rax -#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) - movq K, %rax - subq KK, %rax - movq %rax, KKK -#else - movq KK, %rax -#ifdef LEFT - addq $1, %rax -#else - addq $1, %rax -#endif - movq %rax, KKK -#endif - - andq $-4, %rax - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 1), BO - negq %rax - NOBRANCH - je .L116 - ALIGN_4 - -.L112: - vfmaddpd %xmm8,-16 * SIZE(BO, %rax, 1), %xmm0,%xmm8 - vmovups -12 * SIZE(AO, %rax, 1), %xmm0 - - vfmaddpd %xmm9,-14 * SIZE(BO, %rax, 1), %xmm1,%xmm9 - vmovups -10 * SIZE(AO, %rax, 1), %xmm1 - - addq $4 * SIZE, %rax - BRANCH - jl .L112 - ALIGN_4 - -.L116: - vmovddup ALPHA, %xmm7 - -#ifndef TRMMKERNEL - movq K, %rax -#else - movq KKK, %rax -#endif - andq $3, %rax # if (k & 1) - je .L118 - - leaq (, %rax, SIZE), %rax - leaq (AO, %rax, 1), AO - leaq (BO, %rax, 1), BO - negq %rax - ALIGN_4 - -.L117: - vmulsd -16 * SIZE(BO, %rax, 1), %xmm0,%xmm0 - vaddsd %xmm0, %xmm8,%xmm8 - vmovsd -15 * SIZE(AO, %rax, 1), %xmm0 - - addq $SIZE, %rax - jl .L117 - ALIGN_4 - -.L118: - vaddpd %xmm9, %xmm8,%xmm8 - vhaddpd %xmm8, %xmm8,%xmm8 - -#ifndef TRMMKERNEL - vmovsd (CO1), %xmm0 -#endif - - vmulsd %xmm7, %xmm8,%xmm8 - -#ifndef TRMMKERNEL - vaddsd %xmm0, %xmm8,%xmm8 -#endif - - vmovsd %xmm8, (CO1) - ALIGN_4 - -.L999: - movq (%rsp), %rbx - movq 8(%rsp), %rbp - movq 16(%rsp), %r12 - movq 24(%rsp), %r13 - movq 32(%rsp), %r14 - movq 40(%rsp), %r15 - -#ifdef WINDOWS_ABI - movq 48(%rsp), %rdi - movq 56(%rsp), %rsi - movups 64(%rsp), %xmm6 - movups 80(%rsp), %xmm7 - movups 96(%rsp), %xmm8 - movups 112(%rsp), %xmm9 - movups 128(%rsp), %xmm10 - movups 144(%rsp), %xmm11 - movups 160(%rsp), %xmm12 - movups 176(%rsp), %xmm13 - movups 192(%rsp), %xmm14 - movups 208(%rsp), %xmm15 -#endif - - addq $STACKSIZE, %rsp - ret - - EPILOGUE diff --git a/kernel/x86_64/dgemm_kernel_8x2_bulldozer.S b/kernel/x86_64/dgemm_kernel_8x2_bulldozer.S new file mode 100644 index 000000000..dc980cbe7 --- /dev/null +++ b/kernel/x86_64/dgemm_kernel_8x2_bulldozer.S @@ -0,0 +1,3854 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +/********************************************************************* +* 2013/06/02 Saar +* +* Parameter: +* UNROLL_M 8 +* UNROLL_N 2 +* DGEMM_P 360 +* DGEMM_Q 160 +* +* Performance at m x n without prefetch of BO: +* +* 5760x5760 93.4 GFLOPS with 8 threads on 4 modules (ACML: 90.8 GFLOPS) +* 5760x5760 84.2 GFLOPS with 4 threads on 4 modules (ACML: 82.4 GFLOPS) +* 3840x3840 50.3 GFLOPS with 2 threads on 2 modules (ACML: 49.5 GFLOPS) +* +* 5760x5760 56.4 GFLOPS with 4 threads on 2 modules (ACML: 58.5 GFLOPS) +* 3840x3840 29.0 GFLOPS with 2 threads on 1 modules (ACML: 30.2 GFLOPS) +* 3840x3840 26.1 GFLOPS with 1 threads on 1 modules (ACML: 25.9 GFLOPS) +* +*********************************************************************/ + +/********************************************************************* +* 2013/06/03 Saar +* +* Parameter: +* UNROLL_M 8 +* UNROLL_N 2 +* DGEMM_P 336 +* DGEMM_Q 168 +* NO_WARMUP 1 +* NO_AFFINITY 1 +* GEMM_MULTITHREAD_THRESHOLD 4 +* +* Performance at m x n with prefetch of BO: +* +* 8064x3840 93.7 GFLOPS with 8 threads on 4 modules (ACML: 93.6 GFLOPS) +* 6048x2880 85.1 GFLOPS with 4 threads on 4 modules (ACML: 84.2 GFLOPS) +* 6048x2880 52.0 GFLOPS with 2 threads on 2 modules (ACML: 50.0 GFLOPS) +* +* 6048x2880 56.3 GFLOPS with 4 threads on 2 modules (ACML: 57.6 GFLOPS) +* 4032x1920 29.5 GFLOPS with 2 threads on 1 modules (ACML: 30.5 GFLOPS) +* 4032x1920 26.9 GFLOPS with 1 threads on 1 modules (ACML: 26.1 GFLOPS) +* +*********************************************************************/ + +/********************************************************************* +* 2013/06/04 Saar +* +* Parameter: +* UNROLL_M 8 +* UNROLL_N 2 +* DGEMM_P 384 +* DGEMM_Q 168 +* NO_WARMUP 1 +* NO_AFFINITY 1 +* GEMM_MULTITHREAD_THRESHOLD 4 +* +* Performance at m x n with prefetch of BO: +* +* 6144x5376 94.6 GFLOPS with 8 threads on 4 modules (ACML: 90.5 GFLOPS) +* 6144x5376 86.0 GFLOPS with 4 threads on 4 modules (ACML: 81.5 GFLOPS) +* 4608x4032 52.0 GFLOPS with 2 threads on 2 modules (ACML: 47.5 GFLOPS) +* +* 6144x5376 57.3 GFLOPS with 4 threads on 2 modules (ACML: 56.5 GFLOPS) +* 4608x4032 29.6 GFLOPS with 2 threads on 1 modules (ACML: 30.2 GFLOPS) +* 4608x4032 26.9 GFLOPS with 1 threads on 1 modules (ACML: 25.6 GFLOPS) +* +*********************************************************************/ + + + +#define ASSEMBLER +#include "common.h" + +#define OLD_M %rdi +#define OLD_N %rsi +#define M %r13 +#define J %r14 +#define OLD_K %rdx + +#define A %rcx +#define B %r8 +#define C %r9 +#define LDC %r10 + +#define I %r11 +#define AO %rdi +#define BO %rsi +#define CO1 %r15 +#define K %r12 +#define BI %rbp +#define SP %rbx + +#define BO1 %rdi +#define BO2 %r15 + +#ifndef WINDOWS_ABI + +#define STACKSIZE 96 + +#else + +#define STACKSIZE 256 + +#define OLD_A 40 + STACKSIZE(%rsp) +#define OLD_B 48 + STACKSIZE(%rsp) +#define OLD_C 56 + STACKSIZE(%rsp) +#define OLD_LDC 64 + STACKSIZE(%rsp) +#define OLD_OFFSET 72 + STACKSIZE(%rsp) + +#endif + +#define L_BUFFER_SIZE 512*8*4 +#define LB2_OFFSET 512*8*2 + +#define Ndiv6 24(%rsp) +#define Nmod6 32(%rsp) +#define N 40(%rsp) +#define ALPHA 48(%rsp) +#define OFFSET 56(%rsp) +#define KK 64(%rsp) +#define KKK 72(%rsp) +#define BUFFER1 128(%rsp) +#define BUFFER2 LB2_OFFSET+128(%rsp) + + + +#define A_PR1 384 +#define B_PR1 192 + +#define KERNEL8x3_1(xx) \ + prefetcht0 A_PR1(AO,%rax,8) ;\ + vmovddup -6 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -5 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup -4 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\ + vmovups -12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddpd %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vfmaddpd %xmm12,%xmm3,%xmm0,%xmm12 ;\ + vmovups -10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddpd %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vfmaddpd %xmm15,%xmm3,%xmm0,%xmm15 ;\ + +#define KERNEL8x3_2(xx) \ + prefetcht0 A_PR1+64(AO,%rax,8) ;\ + vmovddup -3 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -8 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -2 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup -1 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -6 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\ + vmovups -4 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddpd %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vfmaddpd %xmm12,%xmm3,%xmm0,%xmm12 ;\ + vmovups -2 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddpd %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vfmaddpd %xmm15,%xmm3,%xmm0,%xmm15 ;\ + +#define KERNEL8x3_3(xx) \ + prefetcht0 A_PR1+128(AO,%rax,8) ;\ + vmovddup 0 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups 0 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup 1 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup 2 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups 2 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\ + vmovups 4 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddpd %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vfmaddpd %xmm12,%xmm3,%xmm0,%xmm12 ;\ + vmovups 6 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddpd %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vfmaddpd %xmm15,%xmm3,%xmm0,%xmm15 ;\ + +#define KERNEL8x3_4(xx) \ + prefetcht0 A_PR1+192(AO,%rax,8) ;\ + vmovddup 3 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups 8 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup 4 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup 5 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups 10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\ + vmovups 12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddpd %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vfmaddpd %xmm12,%xmm3,%xmm0,%xmm12 ;\ + vmovups 14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddpd %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vfmaddpd %xmm15,%xmm3,%xmm0,%xmm15 ;\ + addq $12, BI ;\ + addq $32, %rax ;\ + +#define KERNEL8x3_SUB(xx) \ + vmovddup -6 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -5 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup -4 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\ + vmovups -12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddpd %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vfmaddpd %xmm12,%xmm3,%xmm0,%xmm12 ;\ + vmovups -10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddpd %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vfmaddpd %xmm15,%xmm3,%xmm0,%xmm15 ;\ + + +/*******************************************************************************************/ + +#define KERNEL4x3_1(xx) \ + vmovddup -6 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -5 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup -4 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\ + +#define KERNEL4x3_2(xx) \ + vmovddup -3 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -2 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup -1 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\ + +#define KERNEL4x3_3(xx) \ + vmovddup 0 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -8 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup 1 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup 2 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -6 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\ + +#define KERNEL4x3_4(xx) \ + vmovddup 3 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -4 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup 4 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup 5 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -2 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\ + addq $12, BI ;\ + addq $16, %rax ;\ + +#define KERNEL4x3_SUB(xx) \ + vmovddup -6 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -5 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup -4 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\ + + + + + +/*******************************************************************************************/ + +#define KERNEL2x3_1(xx) \ + vmovddup -6 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -5 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup -4 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +#define KERNEL2x3_2(xx) \ + vmovddup -3 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -2 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup -1 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +#define KERNEL2x3_3(xx) \ + vmovddup 0 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup 1 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup 2 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +#define KERNEL2x3_4(xx) \ + vmovddup 3 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup 4 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup 5 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + addq $12, BI ;\ + addq $8, %rax ;\ + +#define KERNEL2x3_SUB(xx) \ + vmovddup -6 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -5 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovddup -4 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddpd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +/*******************************************************************************************/ + +#define KERNEL1x3_1(xx) \ + vmovsd -6 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovsd -5 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddsd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovsd -4 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddsd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +#define KERNEL1x3_2(xx) \ + vmovsd -3 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -15 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovsd -2 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddsd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovsd -1 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddsd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +#define KERNEL1x3_3(xx) \ + vmovsd 0 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovsd 1 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddsd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovsd 2 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddsd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +#define KERNEL1x3_4(xx) \ + vmovsd 3 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -13 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovsd 4 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddsd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovsd 5 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddsd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + addq $12, BI ;\ + addq $4, %rax ;\ + +#define KERNEL1x3_SUB(xx) \ + vmovsd -6 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovsd -5 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddsd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovsd -4 * SIZE(BO, BI, 8), %xmm3 ;\ + vfmaddsd %xmm6,%xmm3,%xmm0,%xmm6 ;\ + + + +/******************************************************************************************* +* 2 lines of N +*******************************************************************************************/ + +#define KERNEL8x2_1(xx) \ + prefetcht0 A_PR1(AO,%rax,8) ;\ + vmovddup -4 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -3 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vmovups -12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddpd %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vmovups -10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddpd %xmm14,%xmm2,%xmm0,%xmm14 ;\ + +#define KERNEL8x2_2(xx) \ + prefetcht0 A_PR1+64(AO,%rax,8) ;\ + vmovddup -2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -8 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -1 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -6 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vmovups -4 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddpd %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vmovups -2 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddpd %xmm14,%xmm2,%xmm0,%xmm14 ;\ + +#define KERNEL8x2_3(xx) \ + prefetcht0 A_PR1+128(AO,%rax,8) ;\ + vmovddup 0 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups 0 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup 1 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups 2 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vmovups 4 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddpd %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vmovups 6 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddpd %xmm14,%xmm2,%xmm0,%xmm14 ;\ + +#define KERNEL8x2_4(xx) \ + prefetcht0 A_PR1+192(AO,%rax,8) ;\ + vmovddup 2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups 8 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup 3 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups 10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vmovups 12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddpd %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vmovups 14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddpd %xmm14,%xmm2,%xmm0,%xmm14 ;\ + addq $8, BI ;\ + addq $32, %rax ;\ + +#define KERNEL8x2_SUB(xx) \ + vmovddup -4 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -3 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vmovups -12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddpd %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vmovups -10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddpd %xmm14,%xmm2,%xmm0,%xmm14 ;\ + + +/*******************************************************************************************/ + +#define KERNEL4x2_1(xx) \ + vmovddup -4 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -3 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + +#define KERNEL4x2_2(xx) \ + vmovddup -2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -1 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + +#define KERNEL4x2_3(xx) \ + vmovddup 0 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -8 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup 1 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -6 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + +#define KERNEL4x2_4(xx) \ + vmovddup 2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -4 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup 3 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -2 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + addq $8, BI ;\ + addq $16, %rax ;\ + +#define KERNEL4x2_SUB(xx) \ + vmovddup -4 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -3 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddpd %xmm8,%xmm2,%xmm0,%xmm8 ;\ + + +/*******************************************************************************************/ + +#define KERNEL2x2_1(xx) \ + vmovddup -4 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -3 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +#define KERNEL2x2_2(xx) \ + vmovddup -2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -1 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +#define KERNEL2x2_3(xx) \ + vmovddup 0 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup 1 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +#define KERNEL2x2_4(xx) \ + vmovddup 2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup 3 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + addq $8, BI ;\ + addq $8, %rax ;\ + +#define KERNEL2x2_SUB(xx) \ + vmovddup -4 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovddup -3 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddpd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +/*******************************************************************************************/ + +#define KERNEL1x2_1(xx) \ + vmovsd -4 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovsd -3 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddsd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +#define KERNEL1x2_2(xx) \ + vmovsd -2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -15 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovsd -1 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddsd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +#define KERNEL1x2_3(xx) \ + vmovsd 0 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovsd 1 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddsd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +#define KERNEL1x2_4(xx) \ + vmovsd 2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -13 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovsd 3 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddsd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + addq $8, BI ;\ + addq $4, %rax ;\ + +#define KERNEL1x2_SUB(xx) \ + vmovsd -4 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovsd -3 * SIZE(BO, BI, 8), %xmm2 ;\ + vfmaddsd %xmm5,%xmm2,%xmm0,%xmm5 ;\ + + + +/******************************************************************************************* +* 1 line of N +*******************************************************************************************/ + +#define KERNEL8x1_1(xx) \ + prefetcht0 A_PR1(AO,%rax,8) ;\ + vmovddup -2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vmovups -12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vmovups -10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + +#define KERNEL8x1_2(xx) \ + prefetcht0 A_PR1+64(AO,%rax,8) ;\ + vmovddup -1 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -8 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -6 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vmovups -4 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vmovups -2 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + +#define KERNEL8x1_3(xx) \ + prefetcht0 A_PR1+128(AO,%rax,8) ;\ + vmovddup 0 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups 0 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups 2 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vmovups 4 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vmovups 6 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + +#define KERNEL8x1_4(xx) \ + prefetcht0 A_PR1+192(AO,%rax,8) ;\ + vmovddup 1 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups 8 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups 10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vmovups 12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vmovups 14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + addq $4, BI ;\ + addq $32, %rax ;\ + +#define KERNEL8x1_SUB(xx) \ + vmovddup -2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vmovups -12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vmovups -10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm13,%xmm1,%xmm0,%xmm13 ;\ + + +/*******************************************************************************************/ + +#define KERNEL4x1_1(xx) \ + vmovddup -2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + +#define KERNEL4x1_2(xx) \ + vmovddup -1 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + +#define KERNEL4x1_3(xx) \ + vmovddup 0 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -8 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -6 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + +#define KERNEL4x1_4(xx) \ + vmovddup 1 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -4 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -2 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + addq $4, BI ;\ + addq $16, %rax ;\ + +#define KERNEL4x1_SUB(xx) \ + vmovddup -2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm7,%xmm1,%xmm0,%xmm7 ;\ + + +/*******************************************************************************************/ + +#define KERNEL2x1_1(xx) \ + vmovddup -2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +#define KERNEL2x1_2(xx) \ + vmovddup -1 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +#define KERNEL2x1_3(xx) \ + vmovddup 0 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -12 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +#define KERNEL2x1_4(xx) \ + vmovddup 1 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -10 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + addq $4, BI ;\ + addq $8, %rax ;\ + +#define KERNEL2x1_SUB(xx) \ + vmovddup -2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddpd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +/*******************************************************************************************/ + +#define KERNEL1x1_1(xx) \ + vmovsd -2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +#define KERNEL1x1_2(xx) \ + vmovsd -1 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -15 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +#define KERNEL1x1_3(xx) \ + vmovsd 0 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -14 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +#define KERNEL1x1_4(xx) \ + vmovsd 1 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -13 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + addq $4, BI ;\ + addq $4, %rax ;\ + +#define KERNEL1x1_SUB(xx) \ + vmovsd -2 * SIZE(BO, BI, 8), %xmm1 ;\ + vmovsd -16 * SIZE(AO, %rax, 8), %xmm0 ;\ + vfmaddsd %xmm4,%xmm1,%xmm0,%xmm4 ;\ + + +/*******************************************************************************************/ + + + + + + PROLOGUE + PROFCODE + + subq $STACKSIZE, %rsp + movq %rbx, (%rsp) + movq %rbp, 8(%rsp) + movq %r12, 16(%rsp) + movq %r13, 24(%rsp) + movq %r14, 32(%rsp) + movq %r15, 40(%rsp) + + vzeroupper + +#ifdef WINDOWS_ABI + movq %rdi, 48(%rsp) + movq %rsi, 56(%rsp) + movups %xmm6, 64(%rsp) + movups %xmm7, 80(%rsp) + movups %xmm8, 96(%rsp) + movups %xmm9, 112(%rsp) + movups %xmm10, 128(%rsp) + movups %xmm11, 144(%rsp) + movups %xmm12, 160(%rsp) + movups %xmm13, 176(%rsp) + movups %xmm14, 192(%rsp) + movups %xmm15, 208(%rsp) + + movq ARG1, OLD_M + movq ARG2, OLD_N + movq ARG3, OLD_K + movq OLD_A, A + movq OLD_B, B + movq OLD_C, C + movq OLD_LDC, LDC +#ifdef TRMMKERNEL + movsd OLD_OFFSET, %xmm12 +#endif + vmovaps %xmm3, %xmm0 + +#else + movq STACKSIZE + 8(%rsp), LDC +#ifdef TRMMKERNEL + movsd STACKSIZE + 16(%rsp), %xmm12 +#endif + +#endif + + movq %rsp, SP # save old stack + subq $128 + L_BUFFER_SIZE, %rsp + andq $-4096, %rsp # align stack + + STACK_TOUCHING + + cmpq $0, OLD_M + je .L999 + + cmpq $0, OLD_N + je .L999 + + cmpq $0, OLD_K + je .L999 + + movq OLD_M, M + movq OLD_N, N + movq OLD_K, K + + vmovsd %xmm0, ALPHA + + salq $BASE_SHIFT, LDC + + movq N, %rax + xorq %rdx, %rdx + movq $6, %rdi + divq %rdi // N / 6 + movq %rax, Ndiv6 // N / 6 + movq %rdx, Nmod6 // N % 6 + + + +#ifdef TRMMKERNEL + vmovsd %xmm12, OFFSET + vmovsd %xmm12, KK +#ifndef LEFT + negq KK +#endif +#endif + + movq Ndiv6, J + cmpq $0, J + je .L2_0 + ALIGN_4 + +.L6_01: + // copy to sub buffer + movq K, %rax + salq $1,%rax // K * 2 + movq B, BO1 + leaq (B,%rax,8), BO2 // next offset to BO2 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + sarq $2, %rax // K / 4 + jz .L6_02a + ALIGN_4 + +.L6_02: + prefetcht0 512(BO1) + prefetcht0 512(BO2) + prefetchw 512(BO) + vmovups (BO1), %xmm0 + vmovups 2*SIZE(BO1), %xmm2 + vmovups 4*SIZE(BO1), %xmm4 + vmovups 6*SIZE(BO1), %xmm6 + vmovsd (BO2), %xmm1 + vmovsd 2*SIZE(BO2), %xmm3 + vmovsd 4*SIZE(BO2), %xmm5 + vmovsd 6*SIZE(BO2), %xmm7 + vmovups %xmm0, (BO) + vmovsd %xmm1, 2*SIZE(BO) + vmovups %xmm2, 3*SIZE(BO) + vmovsd %xmm3, 5*SIZE(BO) + vmovups %xmm4, 6*SIZE(BO) + vmovsd %xmm5, 8*SIZE(BO) + vmovups %xmm6, 9*SIZE(BO) + vmovsd %xmm7,11*SIZE(BO) + addq $8*SIZE,BO1 + addq $8*SIZE,BO2 + addq $12*SIZE,BO + decq %rax + jnz .L6_02 + +.L6_02a: + + movq K, %rax + andq $3, %rax // K % 4 + jz .L6_02c + ALIGN_4 + +.L6_02b: + + vmovups (BO1), %xmm0 + vmovsd (BO2), %xmm1 + vmovups %xmm0, (BO) + vmovsd %xmm1, 2*SIZE(BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO2 + addq $3*SIZE,BO + decq %rax + jnz .L6_02b + +.L6_02c: + + movq K, %rax + salq $1,%rax // K * 2 + leaq (B,%rax,8), BO1 // next offset to BO1 + leaq (BO1,%rax,8), BO2 // next offset to BO1 + leaq BUFFER2, BO // second buffer to BO + movq K, %rax + sarq $2, %rax // k / 4 + jz .L6_03a + ALIGN_4 + + +.L6_03: + + prefetcht0 512(BO2) + prefetchw 512(BO) + vmovups (BO2), %xmm0 + vmovups 2*SIZE(BO2), %xmm2 + vmovups 4*SIZE(BO2), %xmm4 + vmovups 6*SIZE(BO2), %xmm6 + vmovsd 1*SIZE(BO1), %xmm1 + vmovsd 3*SIZE(BO1), %xmm3 + vmovsd 5*SIZE(BO1), %xmm5 + vmovsd 7*SIZE(BO1), %xmm7 + vmovsd %xmm1, 0*SIZE(BO) + vmovups %xmm0, 1*SIZE(BO) + vmovsd %xmm3, 3*SIZE(BO) + vmovups %xmm2, 4*SIZE(BO) + vmovsd %xmm5, 6*SIZE(BO) + vmovups %xmm4, 7*SIZE(BO) + vmovsd %xmm7, 9*SIZE(BO) + vmovups %xmm6,10*SIZE(BO) + addq $8*SIZE,BO1 + addq $8*SIZE,BO2 + addq $12*SIZE,BO + decq %rax + jnz .L6_03 + +.L6_03a: + + movq K, %rax + andq $3, %rax // K % 4 + jz .L6_03c + ALIGN_4 + + +.L6_03b: + + vmovsd 1*SIZE(BO1), %xmm0 + vmovups (BO2), %xmm1 + vmovsd %xmm0, (BO) + vmovups %xmm1, 1*SIZE(BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO2 + addq $3*SIZE,BO + decq %rax + jnz .L6_03b + + +.L6_03c: + + movq BO2, B // next offset of B + +.L6_10: + movq C, CO1 + leaq (C, LDC, 2), C + leaq (C, LDC, 1), C // c += 3 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $16 * SIZE, AO + + movq M, I + sarq $3, I // i = (m >> 3) + je .L6_20 + + ALIGN_4 + +.L6_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L6_16 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L6_12: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + KERNEL8x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL8x3_4(xxx) + + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL8x3_3(xxx) + KERNEL8x3_4(xxx) + + je .L6_16 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + KERNEL8x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL8x3_4(xxx) + + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL8x3_3(xxx) + KERNEL8x3_4(xxx) + + je .L6_16 + + jmp .L6_12 + ALIGN_4 + +.L6_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L6_19 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L6_17: + + KERNEL8x3_SUB(xxx) + addq $3, BI + addq $8, %rax + jl .L6_17 + ALIGN_4 + + +.L6_19: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddpd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddpd 2 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + vfmaddpd 4 * SIZE(CO1),%xmm0, %xmm10,%xmm10 + vfmaddpd 6 * SIZE(CO1),%xmm0, %xmm13,%xmm13 + + vfmaddpd (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddpd 2 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + vfmaddpd 4 * SIZE(CO1, LDC),%xmm0, %xmm11,%xmm11 + vfmaddpd 6 * SIZE(CO1, LDC),%xmm0, %xmm14,%xmm14 + + vfmaddpd (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + vfmaddpd 2 * SIZE(CO1, LDC, 2),%xmm0, %xmm9,%xmm9 + vfmaddpd 4 * SIZE(CO1, LDC, 2),%xmm0, %xmm12,%xmm12 + vfmaddpd 6 * SIZE(CO1, LDC, 2),%xmm0, %xmm15,%xmm15 + +#else + vmulpd %xmm0, %xmm4,%xmm4 + vmulpd %xmm0, %xmm7,%xmm7 + vmulpd %xmm0, %xmm10,%xmm10 + vmulpd %xmm0, %xmm13,%xmm13 + + vmulpd %xmm0, %xmm5,%xmm5 + vmulpd %xmm0, %xmm8,%xmm8 + vmulpd %xmm0, %xmm11,%xmm11 + vmulpd %xmm0, %xmm14,%xmm14 + + vmulpd %xmm0, %xmm6,%xmm6 + vmulpd %xmm0, %xmm9,%xmm9 + vmulpd %xmm0, %xmm12,%xmm12 + vmulpd %xmm0, %xmm15,%xmm15 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 2 * SIZE(CO1) + vmovups %xmm10, 4 * SIZE(CO1) + vmovups %xmm13, 6 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 2 * SIZE(CO1, LDC) + vmovups %xmm11, 4 * SIZE(CO1, LDC) + vmovups %xmm14, 6 * SIZE(CO1, LDC) + + vmovups %xmm6 , (CO1, LDC, 2) + vmovups %xmm9 , 2 * SIZE(CO1, LDC, 2) + vmovups %xmm12, 4 * SIZE(CO1, LDC, 2) + vmovups %xmm15, 6 * SIZE(CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + decq I # i -- + jg .L6_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L6_20: + // Test rest of M + + testq $7, M + jz .L7_10 // to next 3 lines of N + + testq $4, M + jz .L6_30 + + ALIGN_4 + +.L6_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L6_26 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L6_22: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + KERNEL4x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL4x3_4(xxx) + + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL4x3_3(xxx) + KERNEL4x3_4(xxx) + + je .L6_26 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + KERNEL4x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL4x3_4(xxx) + + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL4x3_3(xxx) + KERNEL4x3_4(xxx) + + je .L6_26 + + jmp .L6_22 + ALIGN_4 + +.L6_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L6_29 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L6_27: + + KERNEL4x3_SUB(xxx) + addq $3, BI + addq $4, %rax + jl .L6_27 + ALIGN_4 + + +.L6_29: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddpd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddpd 2 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + + vfmaddpd (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddpd 2 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + + vfmaddpd (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + vfmaddpd 2 * SIZE(CO1, LDC, 2),%xmm0, %xmm9,%xmm9 + +#else + vmulpd %xmm0, %xmm4,%xmm4 + vmulpd %xmm0, %xmm7,%xmm7 + + vmulpd %xmm0, %xmm5,%xmm5 + vmulpd %xmm0, %xmm8,%xmm8 + + vmulpd %xmm0, %xmm6,%xmm6 + vmulpd %xmm0, %xmm9,%xmm9 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 2 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 2 * SIZE(CO1, LDC) + + vmovups %xmm6 , (CO1, LDC, 2) + vmovups %xmm9 , 2 * SIZE(CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L6_30: + testq $2, M + jz .L6_40 + + ALIGN_4 + +.L6_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, 8), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L6_36 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L6_32: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + KERNEL2x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL2x3_4(xxx) + + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL2x3_3(xxx) + KERNEL2x3_4(xxx) + + je .L6_36 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + KERNEL2x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL2x3_4(xxx) + + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL2x3_3(xxx) + KERNEL2x3_4(xxx) + + je .L6_36 + + jmp .L6_32 + ALIGN_4 + +.L6_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L6_39 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L6_37: + + KERNEL2x3_SUB(xxx) + addq $3, BI + addq $2, %rax + jl .L6_37 + ALIGN_4 + + +.L6_39: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddpd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddpd (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddpd (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + +#else + vmulpd %xmm0, %xmm4,%xmm4 + vmulpd %xmm0, %xmm5,%xmm5 + vmulpd %xmm0, %xmm6,%xmm6 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm6 , (CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L6_40: + testq $1, M + jz .L7_10 // to next 3 lines of N + + ALIGN_4 + +.L6_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + leaq (AO, %rax, 8), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L6_46 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L6_42: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL1x3_4(xxx) + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + je .L6_46 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL1x3_4(xxx) + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + je .L6_46 + + jmp .L6_42 + ALIGN_4 + +.L6_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L6_49 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L6_47: + + KERNEL1x3_SUB(xxx) + addq $3, BI + addq $1, %rax + jl .L6_47 + ALIGN_4 + + +.L6_49: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddsd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddsd (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddsd (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + +#else + vmulsd %xmm0, %xmm4,%xmm4 + vmulsd %xmm0, %xmm5,%xmm5 + vmulsd %xmm0, %xmm6,%xmm6 + +#endif + + vmovsd %xmm4 , (CO1) + vmovsd %xmm5 , (CO1, LDC) + vmovsd %xmm6 , (CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + + + +/***************************************************************************************************************/ + +.L7_10: + movq C, CO1 + leaq (C, LDC, 2), C + leaq (C, LDC, 1), C // c += 3 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $16 * SIZE, AO + + movq M, I + sarq $3, I // i = (m >> 3) + je .L7_20 + ALIGN_4 + +.L7_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L7_16 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + + ALIGN_4 + +.L7_12: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + KERNEL8x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL8x3_4(xxx) + + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL8x3_3(xxx) + KERNEL8x3_4(xxx) + + je .L7_16 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + KERNEL8x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL8x3_4(xxx) + + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL8x3_3(xxx) + KERNEL8x3_4(xxx) + + je .L7_16 + + jmp .L7_12 + ALIGN_4 + +.L7_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L7_19 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L7_17: + + KERNEL8x3_SUB(xxx) + addq $3, BI + addq $8, %rax + jl .L7_17 + ALIGN_4 + + +.L7_19: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddpd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddpd 2 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + vfmaddpd 4 * SIZE(CO1),%xmm0, %xmm10,%xmm10 + vfmaddpd 6 * SIZE(CO1),%xmm0, %xmm13,%xmm13 + + vfmaddpd (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddpd 2 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + vfmaddpd 4 * SIZE(CO1, LDC),%xmm0, %xmm11,%xmm11 + vfmaddpd 6 * SIZE(CO1, LDC),%xmm0, %xmm14,%xmm14 + + vfmaddpd (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + vfmaddpd 2 * SIZE(CO1, LDC, 2),%xmm0, %xmm9,%xmm9 + vfmaddpd 4 * SIZE(CO1, LDC, 2),%xmm0, %xmm12,%xmm12 + vfmaddpd 6 * SIZE(CO1, LDC, 2),%xmm0, %xmm15,%xmm15 + +#else + vmulpd %xmm0, %xmm4,%xmm4 + vmulpd %xmm0, %xmm7,%xmm7 + vmulpd %xmm0, %xmm10,%xmm10 + vmulpd %xmm0, %xmm13,%xmm13 + + vmulpd %xmm0, %xmm5,%xmm5 + vmulpd %xmm0, %xmm8,%xmm8 + vmulpd %xmm0, %xmm11,%xmm11 + vmulpd %xmm0, %xmm14,%xmm14 + + vmulpd %xmm0, %xmm6,%xmm6 + vmulpd %xmm0, %xmm9,%xmm9 + vmulpd %xmm0, %xmm12,%xmm12 + vmulpd %xmm0, %xmm15,%xmm15 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 2 * SIZE(CO1) + vmovups %xmm10, 4 * SIZE(CO1) + vmovups %xmm13, 6 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 2 * SIZE(CO1, LDC) + vmovups %xmm11, 4 * SIZE(CO1, LDC) + vmovups %xmm14, 6 * SIZE(CO1, LDC) + + vmovups %xmm6 , (CO1, LDC, 2) + vmovups %xmm9 , 2 * SIZE(CO1, LDC, 2) + vmovups %xmm12, 4 * SIZE(CO1, LDC, 2) + vmovups %xmm15, 6 * SIZE(CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + decq I # i -- + jg .L7_11 + ALIGN_4 + +.L7_20: + // Test rest of M + + testq $7, M + jz .L7_60 // to next 6 lines of N + + testq $4, M + jz .L7_30 + + ALIGN_4 + +.L7_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L7_26 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L7_22: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + KERNEL4x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL4x3_4(xxx) + + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL4x3_3(xxx) + KERNEL4x3_4(xxx) + + je .L7_26 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + KERNEL4x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL4x3_4(xxx) + + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL4x3_3(xxx) + KERNEL4x3_4(xxx) + + je .L7_26 + + jmp .L7_22 + ALIGN_4 + +.L7_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L7_29 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L7_27: + + KERNEL4x3_SUB(xxx) + addq $3, BI + addq $4, %rax + jl .L7_27 + ALIGN_4 + + +.L7_29: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddpd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddpd 2 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + + vfmaddpd (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddpd 2 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + + vfmaddpd (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + vfmaddpd 2 * SIZE(CO1, LDC, 2),%xmm0, %xmm9,%xmm9 + +#else + vmulpd %xmm0, %xmm4,%xmm4 + vmulpd %xmm0, %xmm7,%xmm7 + + vmulpd %xmm0, %xmm5,%xmm5 + vmulpd %xmm0, %xmm8,%xmm8 + + vmulpd %xmm0, %xmm6,%xmm6 + vmulpd %xmm0, %xmm9,%xmm9 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 2 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 2 * SIZE(CO1, LDC) + + vmovups %xmm6 , (CO1, LDC, 2) + vmovups %xmm9 , 2 * SIZE(CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L7_30: + testq $2, M + jz .L7_40 + + ALIGN_4 + +.L7_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, 8), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L7_36 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L7_32: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + KERNEL2x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL2x3_4(xxx) + + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL2x3_3(xxx) + KERNEL2x3_4(xxx) + + je .L7_36 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + KERNEL2x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL2x3_4(xxx) + + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL2x3_3(xxx) + KERNEL2x3_4(xxx) + + je .L7_36 + + jmp .L7_32 + ALIGN_4 + +.L7_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L7_39 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L7_37: + + KERNEL2x3_SUB(xxx) + addq $3, BI + addq $2, %rax + jl .L7_37 + ALIGN_4 + + +.L7_39: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddpd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddpd (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddpd (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + +#else + vmulpd %xmm0, %xmm4,%xmm4 + vmulpd %xmm0, %xmm5,%xmm5 + vmulpd %xmm0, %xmm6,%xmm6 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm6 , (CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + + + + + +.L7_40: + testq $1, M + jz .L7_60 // to next 6 lines of N + + ALIGN_4 + +.L7_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + leaq (AO, %rax, 8), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L7_46 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L7_42: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL1x3_4(xxx) + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + je .L7_46 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + prefetcht0 B_PR1+64(BO,BI,8) + KERNEL1x3_4(xxx) + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,8) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + je .L7_46 + + jmp .L7_42 + ALIGN_4 + +.L7_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L7_49 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L7_47: + + KERNEL1x3_SUB(xxx) + addq $3, BI + addq $1, %rax + jl .L7_47 + ALIGN_4 + + +.L7_49: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddsd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddsd (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddsd (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + +#else + vmulsd %xmm0, %xmm4,%xmm4 + vmulsd %xmm0, %xmm5,%xmm5 + vmulsd %xmm0, %xmm6,%xmm6 + +#endif + + vmovsd %xmm4 , (CO1) + vmovsd %xmm5 , (CO1, LDC) + vmovsd %xmm6 , (CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, 8), BO + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + +.L7_60: + + decq J // j -- + jg .L6_01 + + +.L2_0: + cmpq $0, Nmod6 // N % 6 == 0 + je .L999 + +/************************************************************************************************ +* Loop for Nmod6 / 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + sarq $1, J // j = j / 2 + je .L1_0 + ALIGN_4 + +.L2_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L2_02b: + + vmovups (BO1), %xmm0 + vmovups %xmm0, (BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO + decq %rax + jnz .L2_02b + +.L2_02c: + + movq BO1, B // next offset of B + +.L2_10: + movq C, CO1 + leaq (C, LDC, 2), C // c += 2 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $16 * SIZE, AO + + movq M, I + sarq $3, I // i = (m >> 3) + je .L2_20 + + ALIGN_4 + +.L2_11: + +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, 8), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax // K = K - ( K % 8 ) + je .L2_16 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L2_12: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x2_1(xxx) + KERNEL8x2_2(xxx) + KERNEL8x2_3(xxx) + KERNEL8x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x2_1(xxx) + KERNEL8x2_2(xxx) + KERNEL8x2_3(xxx) + KERNEL8x2_4(xxx) + + je .L2_16 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x2_1(xxx) + KERNEL8x2_2(xxx) + KERNEL8x2_3(xxx) + KERNEL8x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x2_1(xxx) + KERNEL8x2_2(xxx) + KERNEL8x2_3(xxx) + KERNEL8x2_4(xxx) + + je .L2_16 + + jmp .L2_12 + ALIGN_4 + +.L2_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_19 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L2_17: + + KERNEL8x2_SUB(xxx) + addq $2, BI + addq $8, %rax + jl .L2_17 + ALIGN_4 + + +.L2_19: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddpd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddpd 2 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + vfmaddpd 4 * SIZE(CO1),%xmm0, %xmm10,%xmm10 + vfmaddpd 6 * SIZE(CO1),%xmm0, %xmm13,%xmm13 + + vfmaddpd (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddpd 2 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + vfmaddpd 4 * SIZE(CO1, LDC),%xmm0, %xmm11,%xmm11 + vfmaddpd 6 * SIZE(CO1, LDC),%xmm0, %xmm14,%xmm14 + + +#else + vmulpd %xmm0, %xmm4,%xmm4 + vmulpd %xmm0, %xmm7,%xmm7 + vmulpd %xmm0, %xmm10,%xmm10 + vmulpd %xmm0, %xmm13,%xmm13 + + vmulpd %xmm0, %xmm5,%xmm5 + vmulpd %xmm0, %xmm8,%xmm8 + vmulpd %xmm0, %xmm11,%xmm11 + vmulpd %xmm0, %xmm14,%xmm14 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 2 * SIZE(CO1) + vmovups %xmm10, 4 * SIZE(CO1) + vmovups %xmm13, 6 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 2 * SIZE(CO1, LDC) + vmovups %xmm11, 4 * SIZE(CO1, LDC) + vmovups %xmm14, 6 * SIZE(CO1, LDC) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, 8), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + decq I # i -- + jg .L2_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L2_20: + // Test rest of M + + testq $7, M + jz .L2_60 // to next 2 lines of N + + testq $4, M + jz .L2_30 + + ALIGN_4 + +.L2_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, 8), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_26 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L2_22: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + je .L2_26 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + je .L2_26 + + jmp .L2_22 + ALIGN_4 + +.L2_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_29 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L2_27: + + KERNEL4x2_SUB(xxx) + addq $2, BI + addq $4, %rax + jl .L2_27 + ALIGN_4 + + +.L2_29: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddpd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddpd 2 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + + vfmaddpd (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddpd 2 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + + +#else + vmulpd %xmm0, %xmm4,%xmm4 + vmulpd %xmm0, %xmm7,%xmm7 + + vmulpd %xmm0, %xmm5,%xmm5 + vmulpd %xmm0, %xmm8,%xmm8 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 2 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 2 * SIZE(CO1, LDC) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, 8), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L2_30: + testq $2, M + jz .L2_40 + + ALIGN_4 + +.L2_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, 8), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, 8), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_36 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L2_32: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,8) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + je .L2_36 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,8) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + je .L2_36 + + jmp .L2_32 + ALIGN_4 + +.L2_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_39 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L2_37: + + KERNEL2x2_SUB(xxx) + addq $2, BI + addq $2, %rax + jl .L2_37 + ALIGN_4 + + +.L2_39: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddpd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddpd (CO1, LDC),%xmm0, %xmm5,%xmm5 + +#else + vmulpd %xmm0, %xmm4,%xmm4 + vmulpd %xmm0, %xmm5,%xmm5 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm5 , (CO1, LDC) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, 8), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + + +.L2_40: + testq $1, M + jz .L2_60 // to next 2 lines of N + + ALIGN_4 + +.L2_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, 8), BO + leaq (AO, %rax, 8), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L2_46 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L2_42: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,8) + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + je .L2_46 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,8) + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + je .L2_46 + + jmp .L2_42 + ALIGN_4 + +.L2_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_49 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L2_47: + + KERNEL1x2_SUB(xxx) + addq $2, BI + addq $1, %rax + jl .L2_47 + ALIGN_4 + + +.L2_49: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddsd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddsd (CO1, LDC),%xmm0, %xmm5,%xmm5 + +#else + vmulsd %xmm0, %xmm4,%xmm4 + vmulsd %xmm0, %xmm5,%xmm5 + +#endif + + vmovsd %xmm4 , (CO1) + vmovsd %xmm5 , (CO1, LDC) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, 8), BO + leaq (AO, %rax, 8), AO +#endif + + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + + +.L2_60: + + decq J // j -- + jg .L2_01 // next 2 lines of N + + + +.L1_0: + +/************************************************************************************************ +* Loop for Nmod6 % 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + andq $1, J // j % 2 + je .L999 + ALIGN_4 + +.L1_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L1_02b: + + vmovsd (BO1), %xmm0 + vmovsd %xmm0, (BO) + addq $1*SIZE,BO1 + addq $1*SIZE,BO + decq %rax + jnz .L1_02b + +.L1_02c: + + movq BO1, B // next offset of B + +.L1_10: + movq C, CO1 + leaq (C, LDC, 1), C // c += 1 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $16 * SIZE, AO + + movq M, I + sarq $3, I // i = (m >> 3) + je .L1_20 + + ALIGN_4 + +.L1_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, 8), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L1_16 + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L1_12: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x1_1(xxx) + KERNEL8x1_2(xxx) + KERNEL8x1_3(xxx) + KERNEL8x1_4(xxx) + + KERNEL8x1_1(xxx) + KERNEL8x1_2(xxx) + KERNEL8x1_3(xxx) + KERNEL8x1_4(xxx) + + je .L1_16 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x1_1(xxx) + KERNEL8x1_2(xxx) + KERNEL8x1_3(xxx) + KERNEL8x1_4(xxx) + + KERNEL8x1_1(xxx) + KERNEL8x1_2(xxx) + KERNEL8x1_3(xxx) + KERNEL8x1_4(xxx) + + je .L1_16 + + jmp .L1_12 + ALIGN_4 + +.L1_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_19 + + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L1_17: + + KERNEL8x1_SUB(xxx) + addq $1, BI + addq $8, %rax + jl .L1_17 + ALIGN_4 + + +.L1_19: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddpd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddpd 2 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + vfmaddpd 4 * SIZE(CO1),%xmm0, %xmm10,%xmm10 + vfmaddpd 6 * SIZE(CO1),%xmm0, %xmm13,%xmm13 + +#else + vmulpd %xmm0, %xmm4,%xmm4 + vmulpd %xmm0, %xmm7,%xmm7 + vmulpd %xmm0, %xmm10,%xmm10 + vmulpd %xmm0, %xmm13,%xmm13 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 2 * SIZE(CO1) + vmovups %xmm10, 4 * SIZE(CO1) + vmovups %xmm13, 6 * SIZE(CO1) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, 8), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + addq $8 * SIZE, CO1 # coffset += 8 + decq I # i -- + jg .L1_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L1_20: + // Test rest of M + + testq $7, M + jz .L999 + + testq $4, M + jz .L1_30 + + ALIGN_4 + +.L1_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, 8), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_26 + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L1_22: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + je .L1_26 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + je .L1_26 + + jmp .L1_22 + ALIGN_4 + +.L1_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_29 + + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L1_27: + + KERNEL4x1_SUB(xxx) + addq $1, BI + addq $4, %rax + jl .L1_27 + ALIGN_4 + + +.L1_29: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddpd (CO1),%xmm0, %xmm4,%xmm4 + vfmaddpd 2 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + +#else + vmulpd %xmm0, %xmm4,%xmm4 + vmulpd %xmm0, %xmm7,%xmm7 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 2 * SIZE(CO1) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, 8), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L1_30: + testq $2, M + jz .L1_40 + + ALIGN_4 + +.L1_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, 8), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, 8), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_36 + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L1_32: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + je .L1_36 + + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + je .L1_36 + + jmp .L1_32 + ALIGN_4 + +.L1_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_39 + + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L1_37: + + KERNEL2x1_SUB(xxx) + addq $1, BI + addq $2, %rax + jl .L1_37 + ALIGN_4 + + +.L1_39: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddpd (CO1),%xmm0, %xmm4,%xmm4 + +#else + vmulpd %xmm0, %xmm4,%xmm4 + +#endif + + vmovups %xmm4 , (CO1) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, 8), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + + +.L1_40: + testq $1, M + jz .L999 + + ALIGN_4 + +.L1_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, 8), BO + leaq (AO, %rax, 8), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L1_46 + movq %rax, BI // Index for BO + + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L1_42: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + je .L1_46 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + je .L1_46 + + jmp .L1_42 + ALIGN_4 + +.L1_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_49 + + movq %rax, BI // Index for BO + + leaq (AO, %rax, 8), AO + leaq (BO, BI, 8), BO + negq BI + negq %rax + ALIGN_4 + +.L1_47: + + KERNEL1x1_SUB(xxx) + addq $1, BI + addq $1, %rax + jl .L1_47 + ALIGN_4 + + +.L1_49: + + vmovddup ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddsd (CO1),%xmm0, %xmm4,%xmm4 + +#else + vmulsd %xmm0, %xmm4,%xmm4 + +#endif + + vmovsd %xmm4 , (CO1) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, 8), BO + leaq (AO, %rax, 8), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + +.L999: + movq SP, %rsp + movq (%rsp), %rbx + movq 8(%rsp), %rbp + movq 16(%rsp), %r12 + movq 24(%rsp), %r13 + movq 32(%rsp), %r14 + movq 40(%rsp), %r15 + +#ifdef WINDOWS_ABI + movq 48(%rsp), %rdi + movq 56(%rsp), %rsi + movups 64(%rsp), %xmm6 + movups 80(%rsp), %xmm7 + movups 96(%rsp), %xmm8 + movups 112(%rsp), %xmm9 + movups 128(%rsp), %xmm10 + movups 144(%rsp), %xmm11 + movups 160(%rsp), %xmm12 + movups 176(%rsp), %xmm13 + movups 192(%rsp), %xmm14 + movups 208(%rsp), %xmm15 +#endif + + addq $STACKSIZE, %rsp + ret + + EPILOGUE diff --git a/param.h b/param.h index d6c3a9b00..95b8a699d 100644 --- a/param.h +++ b/param.h @@ -153,7 +153,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define GEMM_DEFAULT_ALIGN 0x0fffUL #define SGEMM_DEFAULT_UNROLL_N 4 +#if defined(BULLDOZER) && defined(ARCH_X86_64) && !defined(COMPLEX) +#define DGEMM_DEFAULT_UNROLL_N 2 +#else #define DGEMM_DEFAULT_UNROLL_N 4 +#endif #define QGEMM_DEFAULT_UNROLL_N 2 #define CGEMM_DEFAULT_UNROLL_N 2 #define ZGEMM_DEFAULT_UNROLL_N 2 @@ -161,14 +165,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef ARCH_X86 #define SGEMM_DEFAULT_UNROLL_M 4 -#define DGEMM_DEFAULT_UNROLL_M 2 +#define DGEMM_DEFAULT_UNROLL_M 4 #define QGEMM_DEFAULT_UNROLL_M 2 #define CGEMM_DEFAULT_UNROLL_M 2 #define ZGEMM_DEFAULT_UNROLL_M 1 #define XGEMM_DEFAULT_UNROLL_M 1 #else #define SGEMM_DEFAULT_UNROLL_M 8 +#if defined(BULLDOZER) && !defined(COMPLEX) +#define DGEMM_DEFAULT_UNROLL_M 8 +#else #define DGEMM_DEFAULT_UNROLL_M 4 +#endif #define QGEMM_DEFAULT_UNROLL_M 2 #define CGEMM_DEFAULT_UNROLL_M 4 #define ZGEMM_DEFAULT_UNROLL_M 2 @@ -193,26 +201,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else #define SGEMM_DEFAULT_P 448 - -#if defined(BULLDOZER) && defined(ARCH_X86_64) -#define DGEMM_DEFAULT_P 248 +#if defined(BULLDOZER) && defined(ARCH_X86_64) && !defined(COMPLEX) +#define DGEMM_DEFAULT_P 384 #else #define DGEMM_DEFAULT_P 224 #endif - #define QGEMM_DEFAULT_P 112 #define CGEMM_DEFAULT_P 224 #define ZGEMM_DEFAULT_P 112 #define XGEMM_DEFAULT_P 56 #define SGEMM_DEFAULT_Q 224 - -#if defined(BULLDOZER) && defined(ARCH_X86_64) -#define DGEMM_DEFAULT_Q 248 +#if defined(BULLDOZER) && defined(ARCH_X86_64) && !defined(COMPLEX) +#define DGEMM_DEFAULT_Q 168 #else #define DGEMM_DEFAULT_Q 224 #endif - #define QGEMM_DEFAULT_Q 224 #define CGEMM_DEFAULT_Q 224 #define ZGEMM_DEFAULT_Q 224 @@ -230,7 +234,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SYMV_P 16 #define HAVE_EXCLUSIVE_CACHE -#define GEMM_THREAD gemm_thread_mn +#define GEMM_THREAD gemm_thread_m #endif From ba800f0883d311519990eda469538e7bfe8b6372 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sat, 8 Jun 2013 10:03:59 +0200 Subject: [PATCH 09/15] correct GEMM_THREAD in param.h --- param.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/param.h b/param.h index 95b8a699d..007aba837 100644 --- a/param.h +++ b/param.h @@ -234,7 +234,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SYMV_P 16 #define HAVE_EXCLUSIVE_CACHE -#define GEMM_THREAD gemm_thread_m +#define GEMM_THREAD gemm_thread_mn #endif From e4c39c7c26288437e3aa9a373e83b8c544366001 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sat, 8 Jun 2013 10:43:08 +0200 Subject: [PATCH 10/15] changed stack touching --- kernel/x86_64/dgemm_kernel_8x2_bulldozer.S | 28 +++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/kernel/x86_64/dgemm_kernel_8x2_bulldozer.S b/kernel/x86_64/dgemm_kernel_8x2_bulldozer.S index dc980cbe7..dc32172dc 100644 --- a/kernel/x86_64/dgemm_kernel_8x2_bulldozer.S +++ b/kernel/x86_64/dgemm_kernel_8x2_bulldozer.S @@ -161,6 +161,32 @@ #define BUFFER1 128(%rsp) #define BUFFER2 LB2_OFFSET+128(%rsp) +#if defined(OS_WINDOWS) +#if L_BUFFER_SIZE > 16384 +#define STACK_TOUCH \ + movl $0, 4096 * 4(%rsp);\ + movl $0, 4096 * 3(%rsp);\ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 12288 +#define STACK_TOUCH \ + movl $0, 4096 * 3(%rsp);\ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 8192 +#define STACK_TOUCH \ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 4096 +#define STACK_TOUCH \ + movl $0, 4096 * 1(%rsp); +#else +#define STACK_TOUCH +#endif +#else +#define STACK_TOUCH +#endif + #define A_PR1 384 @@ -899,7 +925,7 @@ subq $128 + L_BUFFER_SIZE, %rsp andq $-4096, %rsp # align stack - STACK_TOUCHING + STACK_TOUCH cmpq $0, OLD_M je .L999 From d65bbec99b3729ccfacac911a03a3b582cdbd482 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sun, 9 Jun 2013 15:57:42 +0200 Subject: [PATCH 11/15] added new sgemm kernel for BULLDOZER --- kernel/x86_64/KERNEL.BULLDOZER | 10 +- kernel/x86_64/sgemm_kernel_16x2_bulldozer.S | 4532 +++++++++++++++++++ param.h | 16 +- 3 files changed, 4549 insertions(+), 9 deletions(-) create mode 100644 kernel/x86_64/sgemm_kernel_16x2_bulldozer.S diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index 70ae51f6d..e2fcf5256 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -1,11 +1,11 @@ ZGEMVNKERNEL = zgemv_n_dup.S ZGEMVTKERNEL = zgemv_t_dup.S -SGEMMKERNEL = gemm_kernel_8x4_barcelona.S -SGEMMINCOPY = ../generic/gemm_ncopy_8.c -SGEMMITCOPY = ../generic/gemm_tcopy_8.c -SGEMMONCOPY = gemm_ncopy_4_opteron.S -SGEMMOTCOPY = gemm_tcopy_4_opteron.S +SGEMMKERNEL = sgemm_kernel_16x2_bulldozer.S +SGEMMINCOPY = ../generic/gemm_ncopy_16.c +SGEMMITCOPY = ../generic/gemm_tcopy_16.c +SGEMMONCOPY = ../generic/gemm_ncopy_2.c +SGEMMOTCOPY = ../generic/gemm_tcopy_2.c SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) diff --git a/kernel/x86_64/sgemm_kernel_16x2_bulldozer.S b/kernel/x86_64/sgemm_kernel_16x2_bulldozer.S new file mode 100644 index 000000000..2e10fae71 --- /dev/null +++ b/kernel/x86_64/sgemm_kernel_16x2_bulldozer.S @@ -0,0 +1,4532 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + + +#define ASSEMBLER +#include "common.h" + +#define OLD_M %rdi +#define OLD_N %rsi +#define M %r13 +#define J %r14 +#define OLD_K %rdx + +#define A %rcx +#define B %r8 +#define C %r9 +#define LDC %r10 + +#define I %r11 +#define AO %rdi +#define BO %rsi +#define CO1 %r15 +#define K %r12 +#define BI %rbp +#define SP %rbx + +#define BO1 %rdi +#define BO2 %r15 + +#ifndef WINDOWS_ABI + +#define STACKSIZE 96 + +#else + +#define STACKSIZE 256 + +#define OLD_A 40 + STACKSIZE(%rsp) +#define OLD_B 48 + STACKSIZE(%rsp) +#define OLD_C 56 + STACKSIZE(%rsp) +#define OLD_LDC 64 + STACKSIZE(%rsp) +#define OLD_OFFSET 72 + STACKSIZE(%rsp) + +#endif + +#define L_BUFFER_SIZE 512*8*4 +#define LB2_OFFSET 512*8*2 + +#define Ndiv6 24(%rsp) +#define Nmod6 32(%rsp) +#define N 40(%rsp) +#define ALPHA 48(%rsp) +#define OFFSET 56(%rsp) +#define KK 64(%rsp) +#define KKK 72(%rsp) +#define BUFFER1 128(%rsp) +#define BUFFER2 LB2_OFFSET+128(%rsp) + +#if defined(OS_WINDOWS) +#if L_BUFFER_SIZE > 16384 +#define STACK_TOUCH \ + movl $0, 4096 * 4(%rsp);\ + movl $0, 4096 * 3(%rsp);\ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 12288 +#define STACK_TOUCH \ + movl $0, 4096 * 3(%rsp);\ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 8192 +#define STACK_TOUCH \ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 4096 +#define STACK_TOUCH \ + movl $0, 4096 * 1(%rsp); +#else +#define STACK_TOUCH +#endif +#else +#define STACK_TOUCH +#endif + + + +#define A_PR1 384 +#define B_PR1 192 + +/******************************************************************************************* +* 3 lines of N +*******************************************************************************************/ + +#define KERNEL16x3_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ + vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddps %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vfmaddps %xmm12,%xmm3,%xmm0,%xmm12 ;\ + vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vfmaddps %xmm15,%xmm3,%xmm0,%xmm15 ;\ + +#define KERNEL16x3_2(xx) \ + prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddps %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vfmaddps %xmm12,%xmm3,%xmm0,%xmm12 ;\ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vfmaddps %xmm15,%xmm3,%xmm0,%xmm15 ;\ + +#define KERNEL16x3_3(xx) \ + prefetcht0 A_PR1+128(AO,%rax,SIZE) ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups 0 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups 4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ + vmovups 8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddps %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vfmaddps %xmm12,%xmm3,%xmm0,%xmm12 ;\ + vmovups 12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vfmaddps %xmm15,%xmm3,%xmm0,%xmm15 ;\ + +#define KERNEL16x3_4(xx) \ + prefetcht0 A_PR1+192(AO,%rax,SIZE) ;\ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups 16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss 4 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss 5 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups 20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ + vmovups 24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddps %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vfmaddps %xmm12,%xmm3,%xmm0,%xmm12 ;\ + vmovups 28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vfmaddps %xmm15,%xmm3,%xmm0,%xmm15 ;\ + addq $12, BI ;\ + addq $64, %rax ;\ + +#define KERNEL16x3_SUB(xx) \ + vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ + vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddps %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vfmaddps %xmm12,%xmm3,%xmm0,%xmm12 ;\ + vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vfmaddps %xmm15,%xmm3,%xmm0,%xmm15 ;\ + + +/*******************************************************************************************/ + +#define KERNEL8x3_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ + +#define KERNEL8x3_2(xx) \ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ + +#define KERNEL8x3_3(xx) \ + prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ + +#define KERNEL8x3_4(xx) \ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss 4 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss 5 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ + addq $12, BI ;\ + addq $32, %rax ;\ + +#define KERNEL8x3_SUB(xx) \ + vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ + + +/*******************************************************************************************/ + +#define KERNEL4x3_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +#define KERNEL4x3_2(xx) \ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +#define KERNEL4x3_3(xx) \ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +#define KERNEL4x3_4(xx) \ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss 4 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss 5 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + addq $12, BI ;\ + addq $16, %rax ;\ + +#define KERNEL4x3_SUB(xx) \ + vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +/*******************************************************************************************/ + +#define KERNEL2x3_1(xx) \ + vmovss -6 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddss %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovss -31 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + vfmaddss %xmm10,%xmm2,%xmm0,%xmm10 ;\ + vfmaddss %xmm12,%xmm3,%xmm0,%xmm12 ;\ + +#define KERNEL2x3_2(xx) \ + vmovss -3 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -30 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -2 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss -1 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddss %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovss -29 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + vfmaddss %xmm10,%xmm2,%xmm0,%xmm10 ;\ + vfmaddss %xmm12,%xmm3,%xmm0,%xmm12 ;\ + +#define KERNEL2x3_3(xx) \ + vmovss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss 2 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddss %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovss -27 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + vfmaddss %xmm10,%xmm2,%xmm0,%xmm10 ;\ + vfmaddss %xmm12,%xmm3,%xmm0,%xmm12 ;\ + +#define KERNEL2x3_4(xx) \ + vmovss 3 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -26 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss 4 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss 5 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddss %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovss -25 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + vfmaddss %xmm10,%xmm2,%xmm0,%xmm10 ;\ + vfmaddss %xmm12,%xmm3,%xmm0,%xmm12 ;\ + addq $12, BI ;\ + addq $8, %rax ;\ + +#define KERNEL2x3_SUB(xx) \ + vmovss -6 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddss %xmm6,%xmm3,%xmm0,%xmm6 ;\ + vmovss -31 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + vfmaddss %xmm10,%xmm2,%xmm0,%xmm10 ;\ + vfmaddss %xmm12,%xmm3,%xmm0,%xmm12 ;\ + +/*******************************************************************************************/ + +#define KERNEL1x3_1(xx) \ + vmovss -6 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddss %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +#define KERNEL1x3_2(xx) \ + vmovss -3 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -31 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -2 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss -1 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddss %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +#define KERNEL1x3_3(xx) \ + vmovss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -30 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss 2 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddss %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +#define KERNEL1x3_4(xx) \ + vmovss 3 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -29 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss 4 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss 5 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddss %xmm6,%xmm3,%xmm0,%xmm6 ;\ + addq $12, BI ;\ + addq $4, %rax ;\ + +#define KERNEL1x3_SUB(xx) \ + vmovss -6 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + vfmaddss %xmm6,%xmm3,%xmm0,%xmm6 ;\ + +/*******************************************************************************************/ + +/******************************************************************************************* +* 2 lines of N +*******************************************************************************************/ + +#define KERNEL16x2_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddps %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + +#define KERNEL16x2_2(xx) \ + prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddps %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + +#define KERNEL16x2_3(xx) \ + prefetcht0 A_PR1+128(AO,%rax,SIZE) ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups 0 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups 4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vmovups 8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddps %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vmovups 12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + +#define KERNEL16x2_4(xx) \ + prefetcht0 A_PR1+192(AO,%rax,SIZE) ;\ + vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups 16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups 20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vmovups 24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddps %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vmovups 28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + addq $8, BI ;\ + addq $64, %rax ;\ + +#define KERNEL16x2_SUB(xx) \ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vfmaddps %xmm11,%xmm2,%xmm0,%xmm11 ;\ + vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + + +/*******************************************************************************************/ + +#define KERNEL8x2_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + +#define KERNEL8x2_2(xx) \ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + +#define KERNEL8x2_3(xx) \ + prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + +#define KERNEL8x2_4(xx) \ + vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + addq $8, BI ;\ + addq $32, %rax ;\ + +#define KERNEL8x2_SUB(xx) \ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + + +/*******************************************************************************************/ + +#define KERNEL4x2_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +#define KERNEL4x2_2(xx) \ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +#define KERNEL4x2_3(xx) \ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +#define KERNEL4x2_4(xx) \ + vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + addq $8, BI ;\ + addq $16, %rax ;\ + +#define KERNEL4x2_SUB(xx) \ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +/*******************************************************************************************/ + +#define KERNEL2x2_1(xx) \ + vmovss -4 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss -31 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + vfmaddss %xmm10,%xmm2,%xmm0,%xmm10 ;\ + +#define KERNEL2x2_2(xx) \ + vmovss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -30 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss -29 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + vfmaddss %xmm10,%xmm2,%xmm0,%xmm10 ;\ + +#define KERNEL2x2_3(xx) \ + vmovss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss -27 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + vfmaddss %xmm10,%xmm2,%xmm0,%xmm10 ;\ + +#define KERNEL2x2_4(xx) \ + vmovss 2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -26 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss 3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss -25 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + vfmaddss %xmm10,%xmm2,%xmm0,%xmm10 ;\ + addq $8, BI ;\ + addq $8, %rax ;\ + +#define KERNEL2x2_SUB(xx) \ + vmovss -4 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + vmovss -31 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + vfmaddss %xmm10,%xmm2,%xmm0,%xmm10 ;\ + +/*******************************************************************************************/ + +#define KERNEL1x2_1(xx) \ + vmovss -4 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +#define KERNEL1x2_2(xx) \ + vmovss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -31 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +#define KERNEL1x2_3(xx) \ + vmovss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -30 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +#define KERNEL1x2_4(xx) \ + vmovss 2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -29 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss 3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + addq $8, BI ;\ + addq $4, %rax ;\ + +#define KERNEL1x2_SUB(xx) \ + vmovss -4 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -3 * SIZE(BO, BI, SIZE), %xmm2 ;\ + vfmaddss %xmm5,%xmm2,%xmm0,%xmm5 ;\ + +/*******************************************************************************************/ + +/******************************************************************************************* +* 1 line of N +*******************************************************************************************/ + +#define KERNEL16x1_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + +#define KERNEL16x1_2(xx) \ + prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + +#define KERNEL16x1_3(xx) \ + prefetcht0 A_PR1+128(AO,%rax,SIZE) ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups 0 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups 4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vmovups 8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vmovups 12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + +#define KERNEL16x1_4(xx) \ + prefetcht0 A_PR1+192(AO,%rax,SIZE) ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups 16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups 20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vmovups 24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vmovups 28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + addq $4, BI ;\ + addq $64, %rax ;\ + +#define KERNEL16x1_SUB(xx) \ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ + vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ + + +/*******************************************************************************************/ + +#define KERNEL8x1_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + +#define KERNEL8x1_2(xx) \ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + +#define KERNEL8x1_3(xx) \ + prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + +#define KERNEL8x1_4(xx) \ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + addq $4, BI ;\ + addq $32, %rax ;\ + +#define KERNEL8x1_SUB(xx) \ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ + + +/*******************************************************************************************/ + +#define KERNEL4x1_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +#define KERNEL4x1_2(xx) \ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +#define KERNEL4x1_3(xx) \ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +#define KERNEL4x1_4(xx) \ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + addq $4, BI ;\ + addq $16, %rax ;\ + +#define KERNEL4x1_SUB(xx) \ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +/*******************************************************************************************/ + +#define KERNEL2x1_1(xx) \ + vmovss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -31 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + +#define KERNEL2x1_2(xx) \ + vmovss -1 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -30 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -29 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + +#define KERNEL2x1_3(xx) \ + vmovss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -27 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + +#define KERNEL2x1_4(xx) \ + vmovss 1 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -26 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -25 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + addq $4, BI ;\ + addq $8, %rax ;\ + +#define KERNEL2x1_SUB(xx) \ + vmovss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + vmovss -31 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm8,%xmm1,%xmm0,%xmm8 ;\ + +/*******************************************************************************************/ + +#define KERNEL1x1_1(xx) \ + vmovss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +#define KERNEL1x1_2(xx) \ + vmovss -1 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -31 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +#define KERNEL1x1_3(xx) \ + vmovss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -30 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +#define KERNEL1x1_4(xx) \ + vmovss 1 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -29 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + addq $4, BI ;\ + addq $4, %rax ;\ + +#define KERNEL1x1_SUB(xx) \ + vmovss -2 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vmovss -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vfmaddss %xmm4,%xmm1,%xmm0,%xmm4 ;\ + +/*******************************************************************************************/ + + + PROLOGUE + PROFCODE + + subq $STACKSIZE, %rsp + movq %rbx, (%rsp) + movq %rbp, 8(%rsp) + movq %r12, 16(%rsp) + movq %r13, 24(%rsp) + movq %r14, 32(%rsp) + movq %r15, 40(%rsp) + + vzeroupper + +#ifdef WINDOWS_ABI + movq %rdi, 48(%rsp) + movq %rsi, 56(%rsp) + movups %xmm6, 64(%rsp) + movups %xmm7, 80(%rsp) + movups %xmm8, 96(%rsp) + movups %xmm9, 112(%rsp) + movups %xmm10, 128(%rsp) + movups %xmm11, 144(%rsp) + movups %xmm12, 160(%rsp) + movups %xmm13, 176(%rsp) + movups %xmm14, 192(%rsp) + movups %xmm15, 208(%rsp) + + movq ARG1, OLD_M + movq ARG2, OLD_N + movq ARG3, OLD_K + movq OLD_A, A + movq OLD_B, B + movq OLD_C, C + movq OLD_LDC, LDC +#ifdef TRMMKERNEL + movsd OLD_OFFSET, %xmm12 +#endif + vmovaps %xmm3, %xmm0 + +#else + movq STACKSIZE + 8(%rsp), LDC +#ifdef TRMMKERNEL + movsd STACKSIZE + 16(%rsp), %xmm12 +#endif + +#endif + + movq %rsp, SP # save old stack + subq $128 + L_BUFFER_SIZE, %rsp + andq $-4096, %rsp # align stack + + STACK_TOUCH + + cmpq $0, OLD_M + je .L999 + + cmpq $0, OLD_N + je .L999 + + cmpq $0, OLD_K + je .L999 + + movq OLD_M, M + movq OLD_N, N + movq OLD_K, K + + vmovsd %xmm0, ALPHA + + salq $BASE_SHIFT, LDC + + movq N, %rax + xorq %rdx, %rdx + movq $6, %rdi + divq %rdi // N / 6 + movq %rax, Ndiv6 // N / 6 + movq %rdx, Nmod6 // N % 6 + + + +#ifdef TRMMKERNEL + vmovsd %xmm12, OFFSET + vmovsd %xmm12, KK +#ifndef LEFT + negq KK +#endif +#endif + + movq Ndiv6, J + cmpq $0, J + je .L2_0 + ALIGN_4 + +.L6_01: + // copy to sub buffer + movq K, %rax + salq $1,%rax // K * 2 ; read 2 values + movq B, BO1 + leaq (B,%rax, SIZE), BO2 // next offset to BO2 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L6_02b: + + vmovss 0 * SIZE(BO1), %xmm0 + vmovss 1 * SIZE(BO1), %xmm1 + vmovss 0 * SIZE(BO2), %xmm2 + vmovss %xmm0, 0*SIZE(BO) + vmovss %xmm1, 1*SIZE(BO) + vmovss %xmm2, 2*SIZE(BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO2 + addq $3*SIZE,BO + decq %rax + jnz .L6_02b + +.L6_02c: + + movq K, %rax + salq $1,%rax // K * 2 + leaq (B,%rax, SIZE), BO1 // next offset to BO1 + leaq (BO1,%rax, SIZE), BO2 // next offset to BO2 + leaq BUFFER2, BO // second buffer to BO + movq K, %rax + ALIGN_4 + + +.L6_03b: + + vmovss 1*SIZE(BO1), %xmm0 + vmovss 0*SIZE(BO2), %xmm1 + vmovss 1*SIZE(BO2), %xmm2 + vmovss %xmm0, 0*SIZE(BO) + vmovss %xmm1, 1*SIZE(BO) + vmovss %xmm2, 2*SIZE(BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO2 + addq $3*SIZE,BO + decq %rax + jnz .L6_03b + + +.L6_03c: + + movq BO2, B // next offset of B + +.L6_10: + movq C, CO1 + leaq (C, LDC, 2), C + leaq (C, LDC, 1), C // c += 3 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $32 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L6_20 + + ALIGN_4 + +.L6_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $16, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L6_16 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L6_12: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL16x3_1(xxx) + KERNEL16x3_2(xxx) + KERNEL16x3_3(xxx) + KERNEL16x3_4(xxx) + + KERNEL16x3_1(xxx) + prefetcht0 B_PR1+16(BO,BI, SIZE) + KERNEL16x3_2(xxx) + KERNEL16x3_3(xxx) + KERNEL16x3_4(xxx) + + je .L6_16 + + KERNEL16x3_1(xxx) + KERNEL16x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI, SIZE) + KERNEL16x3_3(xxx) + KERNEL16x3_4(xxx) + + KERNEL16x3_1(xxx) + KERNEL16x3_2(xxx) + KERNEL16x3_3(xxx) + KERNEL16x3_4(xxx) + + je .L6_16 + + jmp .L6_12 + ALIGN_4 + +.L6_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L6_19 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L6_17: + + KERNEL16x3_SUB(xxx) + addq $3, BI + addq $16, %rax + jl .L6_17 + ALIGN_4 + + +.L6_19: + + vbroadcastss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + vfmaddps 8 * SIZE(CO1),%xmm0, %xmm10,%xmm10 + vfmaddps 12 * SIZE(CO1),%xmm0, %xmm13,%xmm13 + + vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddps 4 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + vfmaddps 8 * SIZE(CO1, LDC),%xmm0, %xmm11,%xmm11 + vfmaddps 12 * SIZE(CO1, LDC),%xmm0, %xmm14,%xmm14 + + vfmaddps (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + vfmaddps 4 * SIZE(CO1, LDC, 2),%xmm0, %xmm9,%xmm9 + vfmaddps 8 * SIZE(CO1, LDC, 2),%xmm0, %xmm12,%xmm12 + vfmaddps 12 * SIZE(CO1, LDC, 2),%xmm0, %xmm15,%xmm15 + +#else + vmulps %xmm0, %xmm4,%xmm4 + vmulps %xmm0, %xmm7,%xmm7 + vmulps %xmm0, %xmm10,%xmm10 + vmulps %xmm0, %xmm13,%xmm13 + + vmulps %xmm0, %xmm5,%xmm5 + vmulps %xmm0, %xmm8,%xmm8 + vmulps %xmm0, %xmm11,%xmm11 + vmulps %xmm0, %xmm14,%xmm14 + + vmulps %xmm0, %xmm6,%xmm6 + vmulps %xmm0, %xmm9,%xmm9 + vmulps %xmm0, %xmm12,%xmm12 + vmulps %xmm0, %xmm15,%xmm15 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 4 * SIZE(CO1) + vmovups %xmm10, 8 * SIZE(CO1) + vmovups %xmm13,12 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 4 * SIZE(CO1, LDC) + vmovups %xmm11, 8 * SIZE(CO1, LDC) + vmovups %xmm14,12 * SIZE(CO1, LDC) + + vmovups %xmm6 , (CO1, LDC, 2) + vmovups %xmm9 , 4 * SIZE(CO1, LDC, 2) + vmovups %xmm12, 8 * SIZE(CO1, LDC, 2) + vmovups %xmm15,12 * SIZE(CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $16, KK +#endif + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L6_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L6_20: + // Test rest of M + + testq $15, M + jz .L7_10 // to next 3 lines of N + + testq $8, M + jz .L6_21pre + ALIGN_4 + +/**************************************************************************/ + +.L6_20_1: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in A +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L6_20_6 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L6_20_2: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + KERNEL8x3_3(xxx) + KERNEL8x3_4(xxx) + + KERNEL8x3_1(xxx) + prefetcht0 B_PR1+16(BO,BI, SIZE) + KERNEL8x3_2(xxx) + KERNEL8x3_3(xxx) + KERNEL8x3_4(xxx) + + je .L6_20_6 + + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI, SIZE) + KERNEL8x3_3(xxx) + KERNEL8x3_4(xxx) + + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + KERNEL8x3_3(xxx) + KERNEL8x3_4(xxx) + + je .L6_20_6 + + jmp .L6_20_2 + ALIGN_4 + +.L6_20_6: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L6_20_9 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L6_20_7: + + KERNEL8x3_SUB(xxx) + addq $3, BI + addq $8, %rax + jl .L6_20_7 + ALIGN_4 + + +.L6_20_9: + + vbroadcastss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + + vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddps 4 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + + vfmaddps (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + vfmaddps 4 * SIZE(CO1, LDC, 2),%xmm0, %xmm9,%xmm9 + +#else + vmulps %xmm0, %xmm4,%xmm4 + vmulps %xmm0, %xmm7,%xmm7 + + vmulps %xmm0, %xmm5,%xmm5 + vmulps %xmm0, %xmm8,%xmm8 + + vmulps %xmm0, %xmm6,%xmm6 + vmulps %xmm0, %xmm9,%xmm9 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 4 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 4 * SIZE(CO1, LDC) + + vmovups %xmm6 , (CO1, LDC, 2) + vmovups %xmm9 , 4 * SIZE(CO1, LDC, 2) + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L6_21pre: + + testq $4, M + jz .L6_30 + ALIGN_4 + +.L6_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L6_26 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L6_22: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + KERNEL4x3_3(xxx) + KERNEL4x3_4(xxx) + + KERNEL4x3_1(xxx) + prefetcht0 B_PR1+16(BO,BI, SIZE) + KERNEL4x3_2(xxx) + KERNEL4x3_3(xxx) + KERNEL4x3_4(xxx) + + je .L6_26 + + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI, SIZE) + KERNEL4x3_3(xxx) + KERNEL4x3_4(xxx) + + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + KERNEL4x3_3(xxx) + KERNEL4x3_4(xxx) + + je .L6_26 + + jmp .L6_22 + ALIGN_4 + +.L6_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L6_29 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L6_27: + + KERNEL4x3_SUB(xxx) + addq $3, BI + addq $4, %rax + jl .L6_27 + ALIGN_4 + + +.L6_29: + + vbroadcastss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddps (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + +#else + vmulps %xmm0, %xmm4,%xmm4 + vmulps %xmm0, %xmm5,%xmm5 + vmulps %xmm0, %xmm6,%xmm6 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm6 , (CO1, LDC, 2) + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L6_30: + testq $2, M + jz .L6_40 + + ALIGN_4 + +.L6_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L6_36 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L6_32: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + KERNEL2x3_3(xxx) + KERNEL2x3_4(xxx) + + KERNEL2x3_1(xxx) + prefetcht0 B_PR1+16(BO,BI,SIZE) + KERNEL2x3_2(xxx) + KERNEL2x3_3(xxx) + KERNEL2x3_4(xxx) + + je .L6_36 + + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,SIZE) + KERNEL2x3_3(xxx) + KERNEL2x3_4(xxx) + + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + KERNEL2x3_3(xxx) + KERNEL2x3_4(xxx) + + je .L6_36 + + jmp .L6_32 + ALIGN_4 + +.L6_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L6_39 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L6_37: + + KERNEL2x3_SUB(xxx) + addq $3, BI + addq $2, %rax + jl .L6_37 + ALIGN_4 + + +.L6_39: + + vmovss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddss (CO1),%xmm0, %xmm4,%xmm4 + vfmaddss 1 * SIZE(CO1),%xmm0, %xmm8,%xmm8 + vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddss 1 * SIZE(CO1, LDC),%xmm0, %xmm10,%xmm10 + vfmaddss (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + vfmaddss 1 * SIZE(CO1, LDC, 2),%xmm0, %xmm12,%xmm12 + +#else + vmulss %xmm0, %xmm4,%xmm4 + vmulss %xmm0, %xmm8,%xmm8 + vmulss %xmm0, %xmm5,%xmm5 + vmulss %xmm0, %xmm10,%xmm10 + vmulss %xmm0, %xmm6,%xmm6 + vmulss %xmm0, %xmm12,%xmm12 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm8 , 1 * SIZE(CO1) + vmovss %xmm5 , (CO1, LDC) + vmovss %xmm10, 1 * SIZE(CO1, LDC) + vmovss %xmm6 , (CO1, LDC, 2) + vmovss %xmm12, 1 * SIZE(CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L6_40: + testq $1, M + jz .L7_10 // to next 3 lines of N + + ALIGN_4 + +.L6_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L6_46 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L6_42: + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + je .L6_46 + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + je .L6_46 + + jmp .L6_42 + ALIGN_4 + +.L6_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L6_49 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L6_47: + + KERNEL1x3_SUB(xxx) + addq $3, BI + addq $1, %rax + jl .L6_47 + ALIGN_4 + + +.L6_49: + + vmovss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddss (CO1),%xmm0, %xmm4,%xmm4 + vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddss (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + +#else + vmulss %xmm0, %xmm4,%xmm4 + vmulss %xmm0, %xmm5,%xmm5 + vmulss %xmm0, %xmm6,%xmm6 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm5 , (CO1, LDC) + vmovss %xmm6 , (CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + + + +/***************************************************************************************************************/ + +.L7_10: + movq C, CO1 + leaq (C, LDC, 2), C + leaq (C, LDC, 1), C // c += 3 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $32 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L7_20 + + ALIGN_4 + +.L7_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $16, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L7_16 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L7_12: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL16x3_1(xxx) + KERNEL16x3_2(xxx) + KERNEL16x3_3(xxx) + KERNEL16x3_4(xxx) + + KERNEL16x3_1(xxx) + prefetcht0 B_PR1+16(BO,BI, SIZE) + KERNEL16x3_2(xxx) + KERNEL16x3_3(xxx) + KERNEL16x3_4(xxx) + + je .L7_16 + + KERNEL16x3_1(xxx) + KERNEL16x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI, SIZE) + KERNEL16x3_3(xxx) + KERNEL16x3_4(xxx) + + KERNEL16x3_1(xxx) + KERNEL16x3_2(xxx) + KERNEL16x3_3(xxx) + KERNEL16x3_4(xxx) + + je .L7_16 + + jmp .L7_12 + ALIGN_4 + +.L7_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L7_19 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L7_17: + + KERNEL16x3_SUB(xxx) + addq $3, BI + addq $16, %rax + jl .L7_17 + ALIGN_4 + + +.L7_19: + + vbroadcastss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + vfmaddps 8 * SIZE(CO1),%xmm0, %xmm10,%xmm10 + vfmaddps 12 * SIZE(CO1),%xmm0, %xmm13,%xmm13 + + vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddps 4 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + vfmaddps 8 * SIZE(CO1, LDC),%xmm0, %xmm11,%xmm11 + vfmaddps 12 * SIZE(CO1, LDC),%xmm0, %xmm14,%xmm14 + + vfmaddps (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + vfmaddps 4 * SIZE(CO1, LDC, 2),%xmm0, %xmm9,%xmm9 + vfmaddps 8 * SIZE(CO1, LDC, 2),%xmm0, %xmm12,%xmm12 + vfmaddps 12 * SIZE(CO1, LDC, 2),%xmm0, %xmm15,%xmm15 + +#else + vmulps %xmm0, %xmm4,%xmm4 + vmulps %xmm0, %xmm7,%xmm7 + vmulps %xmm0, %xmm10,%xmm10 + vmulps %xmm0, %xmm13,%xmm13 + + vmulps %xmm0, %xmm5,%xmm5 + vmulps %xmm0, %xmm8,%xmm8 + vmulps %xmm0, %xmm11,%xmm11 + vmulps %xmm0, %xmm14,%xmm14 + + vmulps %xmm0, %xmm6,%xmm6 + vmulps %xmm0, %xmm9,%xmm9 + vmulps %xmm0, %xmm12,%xmm12 + vmulps %xmm0, %xmm15,%xmm15 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 4 * SIZE(CO1) + vmovups %xmm10, 8 * SIZE(CO1) + vmovups %xmm13,12 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 4 * SIZE(CO1, LDC) + vmovups %xmm11, 8 * SIZE(CO1, LDC) + vmovups %xmm14,12 * SIZE(CO1, LDC) + + vmovups %xmm6 , (CO1, LDC, 2) + vmovups %xmm9 , 4 * SIZE(CO1, LDC, 2) + vmovups %xmm12, 8 * SIZE(CO1, LDC, 2) + vmovups %xmm15,12 * SIZE(CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $16, KK +#endif + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L7_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L7_20: + // Test rest of M + + testq $15, M + jz .L7_60 // to next 3 lines of N + + testq $8, M + jz .L7_21pre + ALIGN_4 + +/**************************************************************************/ + +.L7_20_1: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER2, BO // first buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER2, BO // first buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in A +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L7_20_6 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L7_20_2: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + KERNEL8x3_3(xxx) + KERNEL8x3_4(xxx) + + KERNEL8x3_1(xxx) + prefetcht0 B_PR1+16(BO,BI, SIZE) + KERNEL8x3_2(xxx) + KERNEL8x3_3(xxx) + KERNEL8x3_4(xxx) + + je .L7_20_6 + + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI, SIZE) + KERNEL8x3_3(xxx) + KERNEL8x3_4(xxx) + + KERNEL8x3_1(xxx) + KERNEL8x3_2(xxx) + KERNEL8x3_3(xxx) + KERNEL8x3_4(xxx) + + je .L7_20_6 + + jmp .L7_20_2 + ALIGN_4 + +.L7_20_6: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L7_20_9 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L7_20_7: + + KERNEL8x3_SUB(xxx) + addq $3, BI + addq $8, %rax + jl .L7_20_7 + ALIGN_4 + + +.L7_20_9: + + vbroadcastss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + + vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddps 4 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + + vfmaddps (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + vfmaddps 4 * SIZE(CO1, LDC, 2),%xmm0, %xmm9,%xmm9 + +#else + vmulps %xmm0, %xmm4,%xmm4 + vmulps %xmm0, %xmm7,%xmm7 + + vmulps %xmm0, %xmm5,%xmm5 + vmulps %xmm0, %xmm8,%xmm8 + + vmulps %xmm0, %xmm6,%xmm6 + vmulps %xmm0, %xmm9,%xmm9 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 4 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 4 * SIZE(CO1, LDC) + + vmovups %xmm6 , (CO1, LDC, 2) + vmovups %xmm9 , 4 * SIZE(CO1, LDC, 2) + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L7_21pre: + + testq $4, M + jz .L7_30 + ALIGN_4 + +.L7_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L7_26 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L7_22: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + KERNEL4x3_3(xxx) + KERNEL4x3_4(xxx) + + KERNEL4x3_1(xxx) + prefetcht0 B_PR1+16(BO,BI, SIZE) + KERNEL4x3_2(xxx) + KERNEL4x3_3(xxx) + KERNEL4x3_4(xxx) + + je .L7_26 + + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI, SIZE) + KERNEL4x3_3(xxx) + KERNEL4x3_4(xxx) + + KERNEL4x3_1(xxx) + KERNEL4x3_2(xxx) + KERNEL4x3_3(xxx) + KERNEL4x3_4(xxx) + + je .L7_26 + + jmp .L7_22 + ALIGN_4 + +.L7_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L7_29 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L7_27: + + KERNEL4x3_SUB(xxx) + addq $3, BI + addq $4, %rax + jl .L7_27 + ALIGN_4 + + +.L7_29: + + vbroadcastss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddps (CO1, LDC, 2),%xmm0, %xmm6 ,%xmm6 + +#else + vmulps %xmm0, %xmm4,%xmm4 + vmulps %xmm0, %xmm5,%xmm5 + vmulps %xmm0, %xmm6,%xmm6 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm6 , (CO1, LDC, 2) + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L7_30: + testq $2, M + jz .L7_40 + + ALIGN_4 + +.L7_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L7_36 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L7_32: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + KERNEL2x3_3(xxx) + KERNEL2x3_4(xxx) + + KERNEL2x3_1(xxx) + prefetcht0 B_PR1+16(BO,BI,SIZE) + KERNEL2x3_2(xxx) + KERNEL2x3_3(xxx) + KERNEL2x3_4(xxx) + + je .L7_36 + + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + prefetcht0 B_PR1+32(BO,BI,SIZE) + KERNEL2x3_3(xxx) + KERNEL2x3_4(xxx) + + KERNEL2x3_1(xxx) + KERNEL2x3_2(xxx) + KERNEL2x3_3(xxx) + KERNEL2x3_4(xxx) + + je .L7_36 + + jmp .L7_32 + ALIGN_4 + +.L7_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L7_39 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L7_37: + + KERNEL2x3_SUB(xxx) + addq $3, BI + addq $2, %rax + jl .L7_37 + ALIGN_4 + + +.L7_39: + + vmovss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddss (CO1),%xmm0, %xmm4,%xmm4 + vfmaddss 1 * SIZE(CO1),%xmm0, %xmm8,%xmm8 + vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddss 1 * SIZE(CO1, LDC),%xmm0, %xmm10,%xmm10 + vfmaddss (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + vfmaddss 1 * SIZE(CO1, LDC, 2),%xmm0, %xmm12,%xmm12 + +#else + vmulss %xmm0, %xmm4,%xmm4 + vmulss %xmm0, %xmm8,%xmm8 + vmulss %xmm0, %xmm5,%xmm5 + vmulss %xmm0, %xmm10,%xmm10 + vmulss %xmm0, %xmm6,%xmm6 + vmulss %xmm0, %xmm12,%xmm12 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm8 , 1 * SIZE(CO1) + vmovss %xmm5 , (CO1, LDC) + vmovss %xmm10, 1 * SIZE(CO1, LDC) + vmovss %xmm6 , (CO1, LDC, 2) + vmovss %xmm12, 1 * SIZE(CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L7_40: + testq $1, M + jz .L7_60 // to next 3 lines of N + + ALIGN_4 + +.L7_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER2, BO // second buffer to BO + addq $6 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $3, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L7_46 + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L7_42: + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + je .L7_46 + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + KERNEL1x3_1(xxx) + KERNEL1x3_2(xxx) + KERNEL1x3_3(xxx) + KERNEL1x3_4(xxx) + + je .L7_46 + + jmp .L7_42 + ALIGN_4 + +.L7_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L7_49 + + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L7_47: + + KERNEL1x3_SUB(xxx) + addq $3, BI + addq $1, %rax + jl .L7_47 + ALIGN_4 + + +.L7_49: + + vmovss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddss (CO1),%xmm0, %xmm4,%xmm4 + vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddss (CO1, LDC, 2),%xmm0, %xmm6,%xmm6 + +#else + vmulss %xmm0, %xmm4,%xmm4 + vmulss %xmm0, %xmm5,%xmm5 + vmulss %xmm0, %xmm6,%xmm6 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm5 , (CO1, LDC) + vmovss %xmm6 , (CO1, LDC, 2) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,2), BI // BI = BI * 3 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + + +.L7_60: + + decq J // j -- + jg .L6_01 + + +.L2_0: + cmpq $0, Nmod6 // N % 6 == 0 + je .L999 + +/************************************************************************************************ +* Loop for Nmod6 / 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + sarq $1, J // j = j / 2 + je .L1_0 + ALIGN_4 + +.L2_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L2_02b: + + vmovsd (BO1), %xmm0 + vmovsd %xmm0, (BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO + decq %rax + jnz .L2_02b + +.L2_02c: + + movq BO1, B // next offset of B + +.L2_10: + movq C, CO1 + leaq (C, LDC, 2), C // c += 2 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $32 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L2_20 + + ALIGN_4 + +.L2_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $16, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L2_16 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_12: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL16x2_1(xxx) + KERNEL16x2_2(xxx) + KERNEL16x2_3(xxx) + KERNEL16x2_4(xxx) + + KERNEL16x2_1(xxx) + KERNEL16x2_2(xxx) + KERNEL16x2_3(xxx) + KERNEL16x2_4(xxx) + + je .L2_16 + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL16x2_1(xxx) + KERNEL16x2_2(xxx) + KERNEL16x2_3(xxx) + KERNEL16x2_4(xxx) + + KERNEL16x2_1(xxx) + KERNEL16x2_2(xxx) + KERNEL16x2_3(xxx) + KERNEL16x2_4(xxx) + + je .L2_16 + + jmp .L2_12 + ALIGN_4 + +.L2_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_19 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_17: + + KERNEL16x2_SUB(xxx) + addq $2, BI + addq $16, %rax + jl .L2_17 + ALIGN_4 + + +.L2_19: + + vbroadcastss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + vfmaddps 8 * SIZE(CO1),%xmm0, %xmm10,%xmm10 + vfmaddps 12 * SIZE(CO1),%xmm0, %xmm13,%xmm13 + + vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddps 4 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + vfmaddps 8 * SIZE(CO1, LDC),%xmm0, %xmm11,%xmm11 + vfmaddps 12 * SIZE(CO1, LDC),%xmm0, %xmm14,%xmm14 + +#else + vmulps %xmm0, %xmm4,%xmm4 + vmulps %xmm0, %xmm7,%xmm7 + vmulps %xmm0, %xmm10,%xmm10 + vmulps %xmm0, %xmm13,%xmm13 + + vmulps %xmm0, %xmm5,%xmm5 + vmulps %xmm0, %xmm8,%xmm8 + vmulps %xmm0, %xmm11,%xmm11 + vmulps %xmm0, %xmm14,%xmm14 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 4 * SIZE(CO1) + vmovups %xmm10, 8 * SIZE(CO1) + vmovups %xmm13,12 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 4 * SIZE(CO1, LDC) + vmovups %xmm11, 8 * SIZE(CO1, LDC) + vmovups %xmm14,12 * SIZE(CO1, LDC) + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $16, KK +#endif + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L2_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L2_20: + // Test rest of M + + testq $15, M + jz .L2_60 // to next 3 lines of N + + testq $8, M + jz .L2_21pre + ALIGN_4 + +/**************************************************************************/ + +.L2_20_1: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in A +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_20_6 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_20_2: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL8x2_1(xxx) + KERNEL8x2_2(xxx) + KERNEL8x2_3(xxx) + KERNEL8x2_4(xxx) + + KERNEL8x2_1(xxx) + KERNEL8x2_2(xxx) + KERNEL8x2_3(xxx) + KERNEL8x2_4(xxx) + + je .L2_20_6 + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL8x2_1(xxx) + KERNEL8x2_2(xxx) + KERNEL8x2_3(xxx) + KERNEL8x2_4(xxx) + + KERNEL8x2_1(xxx) + KERNEL8x2_2(xxx) + KERNEL8x2_3(xxx) + KERNEL8x2_4(xxx) + + je .L2_20_6 + + jmp .L2_20_2 + ALIGN_4 + +.L2_20_6: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_20_9 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_20_7: + + KERNEL8x2_SUB(xxx) + addq $2, BI + addq $8, %rax + jl .L2_20_7 + ALIGN_4 + + +.L2_20_9: + + vbroadcastss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + + vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddps 4 * SIZE(CO1, LDC),%xmm0, %xmm8,%xmm8 + +#else + vmulps %xmm0, %xmm4,%xmm4 + vmulps %xmm0, %xmm7,%xmm7 + + vmulps %xmm0, %xmm5,%xmm5 + vmulps %xmm0, %xmm8,%xmm8 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 4 * SIZE(CO1) + + vmovups %xmm5 , (CO1, LDC) + vmovups %xmm8 , 4 * SIZE(CO1, LDC) + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L2_21pre: + + testq $4, M + jz .L2_30 + ALIGN_4 + +.L2_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_26 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 1 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_22: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + je .L2_26 + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + je .L2_26 + + jmp .L2_22 + ALIGN_4 + +.L2_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_29 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_27: + + KERNEL4x2_SUB(xxx) + addq $2, BI + addq $4, %rax + jl .L2_27 + ALIGN_4 + + +.L2_29: + + vbroadcastss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps (CO1, LDC),%xmm0, %xmm5,%xmm5 + +#else + vmulps %xmm0, %xmm4,%xmm4 + vmulps %xmm0, %xmm5,%xmm5 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm5 , (CO1, LDC) + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L2_30: + testq $2, M + jz .L2_40 + + ALIGN_4 + +.L2_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_36 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_32: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + je .L2_36 + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + je .L2_36 + + jmp .L2_32 + ALIGN_4 + +.L2_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_39 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_37: + + KERNEL2x2_SUB(xxx) + addq $2, BI + addq $2, %rax + jl .L2_37 + ALIGN_4 + + +.L2_39: + + vmovss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddss (CO1),%xmm0, %xmm4,%xmm4 + vfmaddss 1 * SIZE(CO1),%xmm0, %xmm8,%xmm8 + vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 + vfmaddss 1 * SIZE(CO1, LDC),%xmm0, %xmm10,%xmm10 + +#else + vmulss %xmm0, %xmm4,%xmm4 + vmulss %xmm0, %xmm8,%xmm8 + vmulss %xmm0, %xmm5,%xmm5 + vmulss %xmm0, %xmm10,%xmm10 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm8 , 1 * SIZE(CO1) + vmovss %xmm5 , (CO1, LDC) + vmovss %xmm10, 1 * SIZE(CO1, LDC) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L2_40: + testq $1, M + jz .L2_60 // to next 2 lines of N + + ALIGN_4 + +.L2_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L2_46 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_42: + + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + je .L2_46 + + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + je .L2_46 + + jmp .L2_42 + ALIGN_4 + +.L2_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_49 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_47: + + KERNEL1x2_SUB(xxx) + addq $2, BI + addq $1, %rax + jl .L2_47 + ALIGN_4 + + +.L2_49: + + vmovss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddss (CO1),%xmm0, %xmm4,%xmm4 + vfmaddss (CO1, LDC),%xmm0, %xmm5,%xmm5 + +#else + vmulss %xmm0, %xmm4,%xmm4 + vmulss %xmm0, %xmm5,%xmm5 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm5 , (CO1, LDC) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + + + + +.L2_60: + + decq J // j -- + jg .L2_01 // next 2 lines of N + + + +.L1_0: + +/************************************************************************************************ +* Loop for Nmod6 % 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + andq $1, J // j % 2 + je .L999 + ALIGN_4 + +.L1_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L1_02b: + + vmovss (BO1), %xmm0 + vmovss %xmm0, (BO) + addq $1*SIZE,BO1 + addq $1*SIZE,BO + decq %rax + jnz .L1_02b + +.L1_02c: + + movq BO1, B // next offset of B + +.L1_10: + movq C, CO1 + leaq (C, LDC, 1), C // c += 1 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $32 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L1_20 + + ALIGN_4 + +.L1_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $16, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L1_16 + movq %rax, BI // Index for BO + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_12: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL16x1_1(xxx) + KERNEL16x1_2(xxx) + KERNEL16x1_3(xxx) + KERNEL16x1_4(xxx) + + KERNEL16x1_1(xxx) + KERNEL16x1_2(xxx) + KERNEL16x1_3(xxx) + KERNEL16x1_4(xxx) + + je .L1_16 + + KERNEL16x1_1(xxx) + KERNEL16x1_2(xxx) + KERNEL16x1_3(xxx) + KERNEL16x1_4(xxx) + + KERNEL16x1_1(xxx) + KERNEL16x1_2(xxx) + KERNEL16x1_3(xxx) + KERNEL16x1_4(xxx) + + je .L1_16 + + jmp .L1_12 + ALIGN_4 + +.L1_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_19 + + movq %rax, BI // Index for BO + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_17: + + KERNEL16x1_SUB(xxx) + addq $1, BI + addq $16, %rax + jl .L1_17 + ALIGN_4 + + +.L1_19: + + vbroadcastss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + vfmaddps 8 * SIZE(CO1),%xmm0, %xmm10,%xmm10 + vfmaddps 12 * SIZE(CO1),%xmm0, %xmm13,%xmm13 + +#else + vmulps %xmm0, %xmm4,%xmm4 + vmulps %xmm0, %xmm7,%xmm7 + vmulps %xmm0, %xmm10,%xmm10 + vmulps %xmm0, %xmm13,%xmm13 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 4 * SIZE(CO1) + vmovups %xmm10, 8 * SIZE(CO1) + vmovups %xmm13,12 * SIZE(CO1) + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $16, KK +#endif + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L1_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L1_20: + // Test rest of M + + testq $15, M + jz .L999 + + testq $8, M + jz .L1_21pre + ALIGN_4 + +/**************************************************************************/ + +.L1_20_1: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in A +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_20_6 + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_20_2: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL8x1_1(xxx) + KERNEL8x1_2(xxx) + KERNEL8x1_3(xxx) + KERNEL8x1_4(xxx) + + KERNEL8x1_1(xxx) + KERNEL8x1_2(xxx) + KERNEL8x1_3(xxx) + KERNEL8x1_4(xxx) + + je .L1_20_6 + + KERNEL8x1_1(xxx) + KERNEL8x1_2(xxx) + KERNEL8x1_3(xxx) + KERNEL8x1_4(xxx) + + KERNEL8x1_1(xxx) + KERNEL8x1_2(xxx) + KERNEL8x1_3(xxx) + KERNEL8x1_4(xxx) + + je .L1_20_6 + + jmp .L1_20_2 + ALIGN_4 + +.L1_20_6: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_20_9 + + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_20_7: + + KERNEL8x1_SUB(xxx) + addq $1, BI + addq $8, %rax + jl .L1_20_7 + ALIGN_4 + + +.L1_20_9: + + vbroadcastss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + vfmaddps 4 * SIZE(CO1),%xmm0, %xmm7,%xmm7 + +#else + vmulps %xmm0, %xmm4,%xmm4 + vmulps %xmm0, %xmm7,%xmm7 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm7 , 4 * SIZE(CO1) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L1_21pre: + + testq $4, M + jz .L1_30 + ALIGN_4 + +.L1_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_26 + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_22: + + prefetcht0 B_PR1(BO,BI, SIZE) + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + je .L1_26 + + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + je .L1_26 + + jmp .L1_22 + ALIGN_4 + +.L1_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_29 + + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_27: + + KERNEL4x1_SUB(xxx) + addq $1, BI + addq $4, %rax + jl .L1_27 + ALIGN_4 + + +.L1_29: + + vbroadcastss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddps (CO1),%xmm0, %xmm4,%xmm4 + +#else + vmulps %xmm0, %xmm4,%xmm4 + +#endif + + vmovups %xmm4 , (CO1) + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L1_30: + testq $2, M + jz .L1_40 + + ALIGN_4 + +.L1_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_36 + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_32: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + je .L1_36 + + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + je .L1_36 + + jmp .L1_32 + ALIGN_4 + +.L1_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_39 + + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_37: + + KERNEL2x1_SUB(xxx) + addq $1, BI + addq $2, %rax + jl .L1_37 + ALIGN_4 + + +.L1_39: + + vmovss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddss (CO1),%xmm0, %xmm4,%xmm4 + vfmaddss 1 * SIZE(CO1),%xmm0, %xmm8,%xmm8 + +#else + vmulss %xmm0, %xmm4,%xmm4 + vmulss %xmm0, %xmm8,%xmm8 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm8 , 1 * SIZE(CO1) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L1_40: + testq $1, M + jz .L999 + + ALIGN_4 + +.L1_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L1_46 + movq %rax, BI // Index for BO + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_42: + + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + je .L1_46 + + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + je .L1_46 + + jmp .L1_42 + ALIGN_4 + +.L1_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_49 + + movq %rax, BI // Index for BO + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_47: + + KERNEL1x1_SUB(xxx) + addq $1, BI + addq $1, %rax + jl .L1_47 + ALIGN_4 + + +.L1_49: + + vmovss ALPHA, %xmm0 + +#ifndef TRMMKERNEL + + vfmaddss (CO1),%xmm0, %xmm4,%xmm4 + +#else + vmulss %xmm0, %xmm4,%xmm4 + +#endif + + vmovss %xmm4 , (CO1) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + +.L999: + movq SP, %rsp + movq (%rsp), %rbx + movq 8(%rsp), %rbp + movq 16(%rsp), %r12 + movq 24(%rsp), %r13 + movq 32(%rsp), %r14 + movq 40(%rsp), %r15 + +#ifdef WINDOWS_ABI + movq 48(%rsp), %rdi + movq 56(%rsp), %rsi + movups 64(%rsp), %xmm6 + movups 80(%rsp), %xmm7 + movups 96(%rsp), %xmm8 + movups 112(%rsp), %xmm9 + movups 128(%rsp), %xmm10 + movups 144(%rsp), %xmm11 + movups 160(%rsp), %xmm12 + movups 176(%rsp), %xmm13 + movups 192(%rsp), %xmm14 + movups 208(%rsp), %xmm15 +#endif + + addq $STACKSIZE, %rsp + ret + + EPILOGUE diff --git a/param.h b/param.h index 007aba837..0357c1323 100644 --- a/param.h +++ b/param.h @@ -152,10 +152,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define GEMM_DEFAULT_OFFSET_B 832 #define GEMM_DEFAULT_ALIGN 0x0fffUL -#define SGEMM_DEFAULT_UNROLL_N 4 #if defined(BULLDOZER) && defined(ARCH_X86_64) && !defined(COMPLEX) +#define SGEMM_DEFAULT_UNROLL_N 2 #define DGEMM_DEFAULT_UNROLL_N 2 #else +#define SGEMM_DEFAULT_UNROLL_N 4 #define DGEMM_DEFAULT_UNROLL_N 4 #endif #define QGEMM_DEFAULT_UNROLL_N 2 @@ -171,16 +172,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ZGEMM_DEFAULT_UNROLL_M 1 #define XGEMM_DEFAULT_UNROLL_M 1 #else -#define SGEMM_DEFAULT_UNROLL_M 8 #if defined(BULLDOZER) && !defined(COMPLEX) +#define SGEMM_DEFAULT_UNROLL_M 16 #define DGEMM_DEFAULT_UNROLL_M 8 #else +#define SGEMM_DEFAULT_UNROLL_M 8 #define DGEMM_DEFAULT_UNROLL_M 4 #endif #define QGEMM_DEFAULT_UNROLL_M 2 #define CGEMM_DEFAULT_UNROLL_M 4 #define ZGEMM_DEFAULT_UNROLL_M 2 #define XGEMM_DEFAULT_UNROLL_M 1 +#define CGEMM3M_DEFAULT_UNROLL_N 4 +#define CGEMM3M_DEFAULT_UNROLL_M 8 +#define ZGEMM3M_DEFAULT_UNROLL_N 4 +#define ZGEMM3M_DEFAULT_UNROLL_M 4 #endif #if 0 @@ -200,10 +206,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #else -#define SGEMM_DEFAULT_P 448 #if defined(BULLDOZER) && defined(ARCH_X86_64) && !defined(COMPLEX) +#define SGEMM_DEFAULT_P 768 #define DGEMM_DEFAULT_P 384 #else +#define SGEMM_DEFAULT_P 448 #define DGEMM_DEFAULT_P 224 #endif #define QGEMM_DEFAULT_P 112 @@ -211,10 +218,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ZGEMM_DEFAULT_P 112 #define XGEMM_DEFAULT_P 56 -#define SGEMM_DEFAULT_Q 224 #if defined(BULLDOZER) && defined(ARCH_X86_64) && !defined(COMPLEX) +#define SGEMM_DEFAULT_Q 168 #define DGEMM_DEFAULT_Q 168 #else +#define SGEMM_DEFAULT_Q 224 #define DGEMM_DEFAULT_Q 224 #endif #define QGEMM_DEFAULT_Q 224 From d854b30ae6a5a3205f954fa36f6ff00d16f18154 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sun, 9 Jun 2013 17:26:42 +0200 Subject: [PATCH 12/15] Added UNROLL values for 3M to getarch_2nd.c, Makefile.system and Makefile.L3 --- Makefile.system | 7 + getarch_2nd.c | 42 ++++- kernel/Makefile.L3 | 432 ++++++++++++++++++++++----------------------- 3 files changed, 263 insertions(+), 218 deletions(-) diff --git a/Makefile.system b/Makefile.system index eac61e961..35cec17bf 100644 --- a/Makefile.system +++ b/Makefile.system @@ -835,6 +835,13 @@ export ZGEMM_UNROLL_M export ZGEMM_UNROLL_N export XGEMM_UNROLL_M export XGEMM_UNROLL_N +export CGEMM3M_UNROLL_M +export CGEMM3M_UNROLL_N +export ZGEMM3M_UNROLL_M +export ZGEMM3M_UNROLL_N +export XGEMM3M_UNROLL_M +export XGEMM3M_UNROLL_N + ifdef USE_CUDA export CUDADIR diff --git a/getarch_2nd.c b/getarch_2nd.c index 4bdd16a99..fc800cfac 100644 --- a/getarch_2nd.c +++ b/getarch_2nd.c @@ -8,7 +8,7 @@ int main(int argc, char **argv) { - if ((argc < 1) || (*argv[1] == '0')) { + if ( (argc <= 1) || (argc >= 2) && (*argv[1] == '0')) { printf("SGEMM_UNROLL_M=%d\n", SGEMM_DEFAULT_UNROLL_M); printf("SGEMM_UNROLL_N=%d\n", SGEMM_DEFAULT_UNROLL_N); printf("DGEMM_UNROLL_M=%d\n", DGEMM_DEFAULT_UNROLL_M); @@ -22,10 +22,48 @@ int main(int argc, char **argv) { printf("ZGEMM_UNROLL_N=%d\n", ZGEMM_DEFAULT_UNROLL_N); printf("XGEMM_UNROLL_M=%d\n", XGEMM_DEFAULT_UNROLL_M); printf("XGEMM_UNROLL_N=%d\n", XGEMM_DEFAULT_UNROLL_N); + +#ifdef CGEMM3M_DEFAULT_UNROLL_M + printf("CGEMM3M_UNROLL_M=%d\n", CGEMM3M_DEFAULT_UNROLL_M); +#else + printf("CGEMM3M_UNROLL_M=%d\n", SGEMM_DEFAULT_UNROLL_M); +#endif + +#ifdef CGEMM3M_DEFAULT_UNROLL_N + printf("CGEMM3M_UNROLL_N=%d\n", CGEMM3M_DEFAULT_UNROLL_N); +#else + printf("CGEMM3M_UNROLL_N=%d\n", SGEMM_DEFAULT_UNROLL_N); +#endif + +#ifdef ZGEMM3M_DEFAULT_UNROLL_M + printf("ZGEMM3M_UNROLL_M=%d\n", ZGEMM3M_DEFAULT_UNROLL_M); +#else + printf("ZGEMM3M_UNROLL_M=%d\n", DGEMM_DEFAULT_UNROLL_M); +#endif + +#ifdef ZGEMM3M_DEFAULT_UNROLL_N + printf("ZGEMM3M_UNROLL_N=%d\n", ZGEMM3M_DEFAULT_UNROLL_N); +#else + printf("ZGEMM3M_UNROLL_N=%d\n", DGEMM_DEFAULT_UNROLL_N); +#endif + +#ifdef XGEMM3M_DEFAULT_UNROLL_M + printf("XGEMM3M_UNROLL_M=%d\n", ZGEMM3M_DEFAULT_UNROLL_M); +#else + printf("XGEMM3M_UNROLL_M=%d\n", QGEMM_DEFAULT_UNROLL_M); +#endif + +#ifdef XGEMM3M_DEFAULT_UNROLL_N + printf("XGEMM3M_UNROLL_N=%d\n", ZGEMM3M_DEFAULT_UNROLL_N); +#else + printf("XGEMM3M_UNROLL_N=%d\n", QGEMM_DEFAULT_UNROLL_N); +#endif + + } - if ((argc >= 1) && (*argv[1] == '1')) { + if ((argc >= 2) && (*argv[1] == '1')) { printf("#define SLOCAL_BUFFER_SIZE\t%ld\n", (SGEMM_DEFAULT_Q * SGEMM_DEFAULT_UNROLL_N * 4 * 1 * sizeof(float))); printf("#define DLOCAL_BUFFER_SIZE\t%ld\n", (DGEMM_DEFAULT_Q * DGEMM_DEFAULT_UNROLL_N * 2 * 1 * sizeof(double))); printf("#define CLOCAL_BUFFER_SIZE\t%ld\n", (CGEMM_DEFAULT_Q * CGEMM_DEFAULT_UNROLL_N * 4 * 2 * sizeof(float))); diff --git a/kernel/Makefile.L3 b/kernel/Makefile.L3 index 4f419dc80..2dcae0b9d 100644 --- a/kernel/Makefile.L3 +++ b/kernel/Makefile.L3 @@ -1206,328 +1206,328 @@ $(KDIR)xhemm_iutcopy$(TSUFFIX).$(SUFFIX) : generic/zhemm_utcopy_$(XGEMM_UNROLL_M $(KDIR)xhemm_iltcopy$(TSUFFIX).$(SUFFIX) : generic/zhemm_ltcopy_$(XGEMM_UNROLL_M).c $(CC) -c $(CFLAGS) $(NO_UNINITIALIZED_WARN) -DXDOUBLE -DCOMPLEX -UOUTER $< -DLOWER -o $@ -$(KDIR)cgemm3m_oncopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(SGEMM_UNROLL_N).c +$(KDIR)cgemm3m_oncopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)cgemm3m_oncopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(SGEMM_UNROLL_N).c +$(KDIR)cgemm3m_oncopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)cgemm3m_oncopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(SGEMM_UNROLL_N).c +$(KDIR)cgemm3m_oncopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)cgemm3m_otcopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(SGEMM_UNROLL_N).c +$(KDIR)cgemm3m_otcopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)cgemm3m_otcopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(SGEMM_UNROLL_N).c +$(KDIR)cgemm3m_otcopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)cgemm3m_otcopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(SGEMM_UNROLL_N).c +$(KDIR)cgemm3m_otcopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)cgemm3m_incopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(SGEMM_UNROLL_M).c +$(KDIR)cgemm3m_incopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA $< -o $@ -$(KDIR)cgemm3m_incopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(SGEMM_UNROLL_M).c +$(KDIR)cgemm3m_incopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)cgemm3m_incopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(SGEMM_UNROLL_M).c +$(KDIR)cgemm3m_incopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)cgemm3m_itcopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(SGEMM_UNROLL_M).c +$(KDIR)cgemm3m_itcopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA $< -o $@ -$(KDIR)cgemm3m_itcopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(SGEMM_UNROLL_M).c +$(KDIR)cgemm3m_itcopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)cgemm3m_itcopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(SGEMM_UNROLL_M).c +$(KDIR)cgemm3m_itcopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -UDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zgemm3m_oncopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(DGEMM_UNROLL_N).c +$(KDIR)zgemm3m_oncopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)zgemm3m_oncopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(DGEMM_UNROLL_N).c +$(KDIR)zgemm3m_oncopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zgemm3m_oncopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(DGEMM_UNROLL_N).c +$(KDIR)zgemm3m_oncopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zgemm3m_otcopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zgemm3m_otcopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)zgemm3m_otcopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zgemm3m_otcopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zgemm3m_otcopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zgemm3m_otcopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zgemm3m_incopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(DGEMM_UNROLL_M).c +$(KDIR)zgemm3m_incopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA $< -o $@ -$(KDIR)zgemm3m_incopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(DGEMM_UNROLL_M).c +$(KDIR)zgemm3m_incopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zgemm3m_incopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(DGEMM_UNROLL_M).c +$(KDIR)zgemm3m_incopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zgemm3m_itcopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zgemm3m_itcopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA $< -o $@ -$(KDIR)zgemm3m_itcopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zgemm3m_itcopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zgemm3m_itcopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zgemm3m_itcopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -DDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xgemm3m_oncopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(QGEMM_UNROLL_N).c +$(KDIR)xgemm3m_oncopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)xgemm3m_oncopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(QGEMM_UNROLL_N).c +$(KDIR)xgemm3m_oncopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xgemm3m_oncopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(QGEMM_UNROLL_N).c +$(KDIR)xgemm3m_oncopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xgemm3m_otcopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xgemm3m_otcopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)xgemm3m_otcopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xgemm3m_otcopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xgemm3m_otcopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xgemm3m_otcopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xgemm3m_incopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(QGEMM_UNROLL_M).c +$(KDIR)xgemm3m_incopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA $< -o $@ -$(KDIR)xgemm3m_incopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(QGEMM_UNROLL_M).c +$(KDIR)xgemm3m_incopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xgemm3m_incopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(QGEMM_UNROLL_M).c +$(KDIR)xgemm3m_incopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_ncopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xgemm3m_itcopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xgemm3m_itcopyb$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA $< -o $@ -$(KDIR)xgemm3m_itcopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xgemm3m_itcopyr$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xgemm3m_itcopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xgemm3m_itcopyi$(TSUFFIX).$(SUFFIX) : generic/zgemm3m_tcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) -c -DXDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)csymm3m_oucopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(SGEMM_UNROLL_N).c +$(KDIR)csymm3m_oucopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)csymm3m_olcopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(SGEMM_UNROLL_N).c +$(KDIR)csymm3m_olcopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)csymm3m_oucopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(SGEMM_UNROLL_N).c +$(KDIR)csymm3m_oucopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)csymm3m_olcopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(SGEMM_UNROLL_N).c +$(KDIR)csymm3m_olcopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)csymm3m_oucopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(SGEMM_UNROLL_N).c +$(KDIR)csymm3m_oucopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)csymm3m_olcopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(SGEMM_UNROLL_N).c +$(KDIR)csymm3m_olcopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)csymm3m_iucopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(SGEMM_UNROLL_M).c +$(KDIR)csymm3m_iucopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)csymm3m_ilcopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(SGEMM_UNROLL_M).c +$(KDIR)csymm3m_ilcopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)csymm3m_iucopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(SGEMM_UNROLL_M).c +$(KDIR)csymm3m_iucopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)csymm3m_ilcopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(SGEMM_UNROLL_M).c +$(KDIR)csymm3m_ilcopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)csymm3m_iucopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(SGEMM_UNROLL_M).c +$(KDIR)csymm3m_iucopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)csymm3m_ilcopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(SGEMM_UNROLL_M).c +$(KDIR)csymm3m_ilcopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zsymm3m_oucopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(DGEMM_UNROLL_N).c +$(KDIR)zsymm3m_oucopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)zsymm3m_olcopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zsymm3m_olcopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)zsymm3m_oucopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(DGEMM_UNROLL_N).c +$(KDIR)zsymm3m_oucopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zsymm3m_olcopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zsymm3m_olcopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zsymm3m_oucopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(DGEMM_UNROLL_N).c +$(KDIR)zsymm3m_oucopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zsymm3m_olcopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zsymm3m_olcopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zsymm3m_iucopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(DGEMM_UNROLL_M).c +$(KDIR)zsymm3m_iucopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)zsymm3m_ilcopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zsymm3m_ilcopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)zsymm3m_iucopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(DGEMM_UNROLL_M).c +$(KDIR)zsymm3m_iucopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zsymm3m_ilcopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zsymm3m_ilcopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zsymm3m_iucopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(DGEMM_UNROLL_M).c +$(KDIR)zsymm3m_iucopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zsymm3m_ilcopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zsymm3m_ilcopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xsymm3m_oucopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(QGEMM_UNROLL_N).c +$(KDIR)xsymm3m_oucopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)xsymm3m_olcopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xsymm3m_olcopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)xsymm3m_oucopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(QGEMM_UNROLL_N).c +$(KDIR)xsymm3m_oucopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xsymm3m_olcopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xsymm3m_olcopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xsymm3m_oucopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(QGEMM_UNROLL_N).c +$(KDIR)xsymm3m_oucopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xsymm3m_olcopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xsymm3m_olcopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xsymm3m_iucopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(QGEMM_UNROLL_M).c +$(KDIR)xsymm3m_iucopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)xsymm3m_ilcopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xsymm3m_ilcopyb$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)xsymm3m_iucopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(QGEMM_UNROLL_M).c +$(KDIR)xsymm3m_iucopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xsymm3m_ilcopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xsymm3m_ilcopyr$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xsymm3m_iucopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(QGEMM_UNROLL_M).c +$(KDIR)xsymm3m_iucopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_ucopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xsymm3m_ilcopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xsymm3m_ilcopyi$(TSUFFIX).$(SUFFIX) : generic/zsymm3m_lcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)chemm3m_oucopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(SGEMM_UNROLL_N).c +$(KDIR)chemm3m_oucopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)chemm3m_olcopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(SGEMM_UNROLL_N).c +$(KDIR)chemm3m_olcopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)chemm3m_oucopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(SGEMM_UNROLL_N).c +$(KDIR)chemm3m_oucopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)chemm3m_olcopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(SGEMM_UNROLL_N).c +$(KDIR)chemm3m_olcopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)chemm3m_oucopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(SGEMM_UNROLL_N).c +$(KDIR)chemm3m_oucopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)chemm3m_olcopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(SGEMM_UNROLL_N).c +$(KDIR)chemm3m_olcopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)chemm3m_iucopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(SGEMM_UNROLL_M).c +$(KDIR)chemm3m_iucopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)chemm3m_ilcopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(SGEMM_UNROLL_M).c +$(KDIR)chemm3m_ilcopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)chemm3m_iucopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(SGEMM_UNROLL_M).c +$(KDIR)chemm3m_iucopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)chemm3m_ilcopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(SGEMM_UNROLL_M).c +$(KDIR)chemm3m_ilcopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)chemm3m_iucopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(SGEMM_UNROLL_M).c +$(KDIR)chemm3m_iucopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)chemm3m_ilcopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(SGEMM_UNROLL_M).c +$(KDIR)chemm3m_ilcopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zhemm3m_oucopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(DGEMM_UNROLL_N).c +$(KDIR)zhemm3m_oucopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)zhemm3m_olcopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zhemm3m_olcopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)zhemm3m_oucopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(DGEMM_UNROLL_N).c +$(KDIR)zhemm3m_oucopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zhemm3m_olcopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zhemm3m_olcopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zhemm3m_oucopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(DGEMM_UNROLL_N).c +$(KDIR)zhemm3m_oucopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zhemm3m_olcopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zhemm3m_olcopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zhemm3m_iucopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(DGEMM_UNROLL_M).c +$(KDIR)zhemm3m_iucopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)zhemm3m_ilcopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zhemm3m_ilcopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)zhemm3m_iucopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(DGEMM_UNROLL_M).c +$(KDIR)zhemm3m_iucopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zhemm3m_ilcopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zhemm3m_ilcopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zhemm3m_iucopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(DGEMM_UNROLL_M).c +$(KDIR)zhemm3m_iucopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zhemm3m_ilcopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zhemm3m_ilcopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xhemm3m_oucopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(QGEMM_UNROLL_N).c +$(KDIR)xhemm3m_oucopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)xhemm3m_olcopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xhemm3m_olcopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)xhemm3m_oucopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(QGEMM_UNROLL_N).c +$(KDIR)xhemm3m_oucopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xhemm3m_olcopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xhemm3m_olcopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xhemm3m_oucopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(QGEMM_UNROLL_N).c +$(KDIR)xhemm3m_oucopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xhemm3m_olcopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xhemm3m_olcopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xhemm3m_iucopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(QGEMM_UNROLL_M).c +$(KDIR)xhemm3m_iucopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)xhemm3m_ilcopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xhemm3m_ilcopyb$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)xhemm3m_iucopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(QGEMM_UNROLL_M).c +$(KDIR)xhemm3m_iucopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xhemm3m_ilcopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xhemm3m_ilcopyr$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xhemm3m_iucopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(QGEMM_UNROLL_M).c +$(KDIR)xhemm3m_iucopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_ucopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xhemm3m_ilcopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xhemm3m_ilcopyi$(TSUFFIX).$(SUFFIX) : generic/zhemm3m_lcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(CFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ $(KDIR)strsm_iunucopy$(TSUFFIX).$(SUFFIX) : generic/trsm_uncopy_$(SGEMM_UNROLL_M).c @@ -2608,328 +2608,328 @@ $(KDIR)xhemm_iutcopy$(TSUFFIX).$(PSUFFIX) : generic/zhemm_utcopy_$(XGEMM_UNROLL_ $(KDIR)xhemm_iltcopy$(TSUFFIX).$(PSUFFIX) : generic/zhemm_ltcopy_$(XGEMM_UNROLL_M).c $(CC) -c $(PFLAGS) $(NO_UNINITIALIZED_WARN) -DXDOUBLE -DCOMPLEX -UOUTER $< -DLOWER -o $@ -$(KDIR)cgemm3m_oncopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(SGEMM_UNROLL_N).c +$(KDIR)cgemm3m_oncopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)cgemm3m_oncopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(SGEMM_UNROLL_N).c +$(KDIR)cgemm3m_oncopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)cgemm3m_oncopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(SGEMM_UNROLL_N).c +$(KDIR)cgemm3m_oncopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)cgemm3m_otcopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(SGEMM_UNROLL_N).c +$(KDIR)cgemm3m_otcopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)cgemm3m_otcopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(SGEMM_UNROLL_N).c +$(KDIR)cgemm3m_otcopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)cgemm3m_otcopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(SGEMM_UNROLL_N).c +$(KDIR)cgemm3m_otcopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)cgemm3m_incopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(SGEMM_UNROLL_M).c +$(KDIR)cgemm3m_incopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA $< -o $@ -$(KDIR)cgemm3m_incopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(SGEMM_UNROLL_M).c +$(KDIR)cgemm3m_incopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)cgemm3m_incopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(SGEMM_UNROLL_M).c +$(KDIR)cgemm3m_incopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)cgemm3m_itcopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(SGEMM_UNROLL_M).c +$(KDIR)cgemm3m_itcopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA $< -o $@ -$(KDIR)cgemm3m_itcopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(SGEMM_UNROLL_M).c +$(KDIR)cgemm3m_itcopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)cgemm3m_itcopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(SGEMM_UNROLL_M).c +$(KDIR)cgemm3m_itcopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -UDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zgemm3m_oncopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(DGEMM_UNROLL_N).c +$(KDIR)zgemm3m_oncopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)zgemm3m_oncopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(DGEMM_UNROLL_N).c +$(KDIR)zgemm3m_oncopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zgemm3m_oncopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(DGEMM_UNROLL_N).c +$(KDIR)zgemm3m_oncopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zgemm3m_otcopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zgemm3m_otcopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)zgemm3m_otcopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zgemm3m_otcopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zgemm3m_otcopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zgemm3m_otcopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zgemm3m_incopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(DGEMM_UNROLL_M).c +$(KDIR)zgemm3m_incopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -DDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA $< -o $@ -$(KDIR)zgemm3m_incopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(DGEMM_UNROLL_M).c +$(KDIR)zgemm3m_incopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -DDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zgemm3m_incopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(DGEMM_UNROLL_M).c +$(KDIR)zgemm3m_incopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -DDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zgemm3m_itcopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zgemm3m_itcopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -DDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA $< -o $@ -$(KDIR)zgemm3m_itcopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zgemm3m_itcopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -DDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zgemm3m_itcopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zgemm3m_itcopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -DDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xgemm3m_oncopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(QGEMM_UNROLL_N).c +$(KDIR)xgemm3m_oncopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)xgemm3m_oncopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(QGEMM_UNROLL_N).c +$(KDIR)xgemm3m_oncopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xgemm3m_oncopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(QGEMM_UNROLL_N).c +$(KDIR)xgemm3m_oncopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xgemm3m_otcopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xgemm3m_otcopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)xgemm3m_otcopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xgemm3m_otcopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xgemm3m_otcopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xgemm3m_otcopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xgemm3m_incopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(QGEMM_UNROLL_M).c +$(KDIR)xgemm3m_incopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA $< -o $@ -$(KDIR)xgemm3m_incopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(QGEMM_UNROLL_M).c +$(KDIR)xgemm3m_incopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xgemm3m_incopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(QGEMM_UNROLL_M).c +$(KDIR)xgemm3m_incopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_ncopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xgemm3m_itcopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xgemm3m_itcopyb$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA $< -o $@ -$(KDIR)xgemm3m_itcopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xgemm3m_itcopyr$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xgemm3m_itcopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xgemm3m_itcopyi$(TSUFFIX).$(PSUFFIX) : generic/zgemm3m_tcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) -c -DXDOUBLE -DCOMPLEX -DICOPY -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)csymm3m_oucopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(SGEMM_UNROLL_N).c +$(KDIR)csymm3m_oucopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)csymm3m_olcopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(SGEMM_UNROLL_N).c +$(KDIR)csymm3m_olcopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)csymm3m_oucopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(SGEMM_UNROLL_N).c +$(KDIR)csymm3m_oucopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)csymm3m_olcopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(SGEMM_UNROLL_N).c +$(KDIR)csymm3m_olcopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)csymm3m_oucopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(SGEMM_UNROLL_N).c +$(KDIR)csymm3m_oucopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)csymm3m_olcopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(SGEMM_UNROLL_N).c +$(KDIR)csymm3m_olcopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)csymm3m_iucopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(SGEMM_UNROLL_M).c +$(KDIR)csymm3m_iucopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)csymm3m_ilcopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(SGEMM_UNROLL_M).c +$(KDIR)csymm3m_ilcopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)csymm3m_iucopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(SGEMM_UNROLL_M).c +$(KDIR)csymm3m_iucopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)csymm3m_ilcopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(SGEMM_UNROLL_M).c +$(KDIR)csymm3m_ilcopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)csymm3m_iucopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(SGEMM_UNROLL_M).c +$(KDIR)csymm3m_iucopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)csymm3m_ilcopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(SGEMM_UNROLL_M).c +$(KDIR)csymm3m_ilcopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zsymm3m_oucopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(DGEMM_UNROLL_N).c +$(KDIR)zsymm3m_oucopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)zsymm3m_olcopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zsymm3m_olcopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)zsymm3m_oucopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(DGEMM_UNROLL_N).c +$(KDIR)zsymm3m_oucopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zsymm3m_olcopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zsymm3m_olcopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zsymm3m_oucopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(DGEMM_UNROLL_N).c +$(KDIR)zsymm3m_oucopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zsymm3m_olcopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zsymm3m_olcopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zsymm3m_iucopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(DGEMM_UNROLL_M).c +$(KDIR)zsymm3m_iucopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)zsymm3m_ilcopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zsymm3m_ilcopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)zsymm3m_iucopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(DGEMM_UNROLL_M).c +$(KDIR)zsymm3m_iucopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zsymm3m_ilcopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zsymm3m_ilcopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zsymm3m_iucopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(DGEMM_UNROLL_M).c +$(KDIR)zsymm3m_iucopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zsymm3m_ilcopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zsymm3m_ilcopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xsymm3m_oucopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(QGEMM_UNROLL_N).c +$(KDIR)xsymm3m_oucopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)xsymm3m_olcopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xsymm3m_olcopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)xsymm3m_oucopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(QGEMM_UNROLL_N).c +$(KDIR)xsymm3m_oucopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xsymm3m_olcopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xsymm3m_olcopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xsymm3m_oucopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(QGEMM_UNROLL_N).c +$(KDIR)xsymm3m_oucopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xsymm3m_olcopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xsymm3m_olcopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xsymm3m_iucopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(QGEMM_UNROLL_M).c +$(KDIR)xsymm3m_iucopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)xsymm3m_ilcopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xsymm3m_ilcopyb$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)xsymm3m_iucopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(QGEMM_UNROLL_M).c +$(KDIR)xsymm3m_iucopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xsymm3m_ilcopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xsymm3m_ilcopyr$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xsymm3m_iucopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(QGEMM_UNROLL_M).c +$(KDIR)xsymm3m_iucopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_ucopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xsymm3m_ilcopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xsymm3m_ilcopyi$(TSUFFIX).$(PSUFFIX) : generic/zsymm3m_lcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)chemm3m_oucopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(SGEMM_UNROLL_N).c +$(KDIR)chemm3m_oucopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)chemm3m_olcopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(SGEMM_UNROLL_N).c +$(KDIR)chemm3m_olcopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)chemm3m_oucopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(SGEMM_UNROLL_N).c +$(KDIR)chemm3m_oucopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)chemm3m_olcopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(SGEMM_UNROLL_N).c +$(KDIR)chemm3m_olcopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)chemm3m_oucopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(SGEMM_UNROLL_N).c +$(KDIR)chemm3m_oucopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)chemm3m_olcopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(SGEMM_UNROLL_N).c +$(KDIR)chemm3m_olcopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(CGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)chemm3m_iucopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(SGEMM_UNROLL_M).c +$(KDIR)chemm3m_iucopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)chemm3m_ilcopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(SGEMM_UNROLL_M).c +$(KDIR)chemm3m_ilcopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)chemm3m_iucopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(SGEMM_UNROLL_M).c +$(KDIR)chemm3m_iucopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)chemm3m_ilcopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(SGEMM_UNROLL_M).c +$(KDIR)chemm3m_ilcopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)chemm3m_iucopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(SGEMM_UNROLL_M).c +$(KDIR)chemm3m_iucopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)chemm3m_ilcopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(SGEMM_UNROLL_M).c +$(KDIR)chemm3m_ilcopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(CGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -UDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zhemm3m_oucopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(DGEMM_UNROLL_N).c +$(KDIR)zhemm3m_oucopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)zhemm3m_olcopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zhemm3m_olcopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)zhemm3m_oucopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(DGEMM_UNROLL_N).c +$(KDIR)zhemm3m_oucopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zhemm3m_olcopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zhemm3m_olcopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zhemm3m_oucopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(DGEMM_UNROLL_N).c +$(KDIR)zhemm3m_oucopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zhemm3m_olcopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(DGEMM_UNROLL_N).c +$(KDIR)zhemm3m_olcopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(ZGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zhemm3m_iucopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(DGEMM_UNROLL_M).c +$(KDIR)zhemm3m_iucopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)zhemm3m_ilcopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zhemm3m_ilcopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)zhemm3m_iucopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(DGEMM_UNROLL_M).c +$(KDIR)zhemm3m_iucopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zhemm3m_ilcopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zhemm3m_ilcopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)zhemm3m_iucopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(DGEMM_UNROLL_M).c +$(KDIR)zhemm3m_iucopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)zhemm3m_ilcopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(DGEMM_UNROLL_M).c +$(KDIR)zhemm3m_ilcopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(ZGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xhemm3m_oucopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(QGEMM_UNROLL_N).c +$(KDIR)xhemm3m_oucopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)xhemm3m_olcopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xhemm3m_olcopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA $< -o $@ -$(KDIR)xhemm3m_oucopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(QGEMM_UNROLL_N).c +$(KDIR)xhemm3m_oucopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xhemm3m_olcopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xhemm3m_olcopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xhemm3m_oucopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(QGEMM_UNROLL_N).c +$(KDIR)xhemm3m_oucopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xhemm3m_olcopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(QGEMM_UNROLL_N).c +$(KDIR)xhemm3m_olcopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(XGEMM3M_UNROLL_N).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -DUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xhemm3m_iucopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(QGEMM_UNROLL_M).c +$(KDIR)xhemm3m_iucopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)xhemm3m_ilcopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xhemm3m_ilcopyb$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA $< -o $@ -$(KDIR)xhemm3m_iucopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(QGEMM_UNROLL_M).c +$(KDIR)xhemm3m_iucopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xhemm3m_ilcopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xhemm3m_ilcopyr$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DREAL_ONLY $< -o $@ -$(KDIR)xhemm3m_iucopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(QGEMM_UNROLL_M).c +$(KDIR)xhemm3m_iucopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_ucopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ -$(KDIR)xhemm3m_ilcopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(QGEMM_UNROLL_M).c +$(KDIR)xhemm3m_ilcopyi$(TSUFFIX).$(PSUFFIX) : generic/zhemm3m_lcopy_$(XGEMM3M_UNROLL_M).c $(CC) $(PFLAGS) $(NO_UNINITIALIZED_WARN) -c -DXDOUBLE -DCOMPLEX -UUSE_ALPHA -DIMAGE_ONLY $< -o $@ $(KDIR)strsm_iunucopy$(TSUFFIX).$(PSUFFIX) : generic/trsm_uncopy_$(SGEMM_UNROLL_M).c From 8eaa04acbbedef20cf10be20a7a13cf2e00bdeac Mon Sep 17 00:00:00 2001 From: wernsaar Date: Tue, 11 Jun 2013 12:00:49 +0200 Subject: [PATCH 13/15] added zgemm_kernel_2x2_bulldozer.S --- kernel/x86_64/KERNEL.BULLDOZER | 6 +- kernel/x86_64/zgemm_kernel_2x2_bulldozer.S | 1407 ++++++++++++++++++++ 2 files changed, 1410 insertions(+), 3 deletions(-) create mode 100644 kernel/x86_64/zgemm_kernel_2x2_bulldozer.S diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index e2fcf5256..9463520d2 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -28,11 +28,11 @@ CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) -ZGEMMKERNEL = zgemm_kernel_2x2_barcelona.S +ZGEMMKERNEL = zgemm_kernel_2x2_bulldozer.S ZGEMMINCOPY = ZGEMMITCOPY = -ZGEMMONCOPY = zgemm_ncopy_2.S -ZGEMMOTCOPY = zgemm_tcopy_2.S +ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c ZGEMMINCOPYOBJ = ZGEMMITCOPYOBJ = ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) diff --git a/kernel/x86_64/zgemm_kernel_2x2_bulldozer.S b/kernel/x86_64/zgemm_kernel_2x2_bulldozer.S new file mode 100644 index 000000000..fa02bbe84 --- /dev/null +++ b/kernel/x86_64/zgemm_kernel_2x2_bulldozer.S @@ -0,0 +1,1407 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + + +#define ASSEMBLER +#include "common.h" + +#define OLD_M %rdi +#define OLD_N %rsi +#define M %r13 +#define J %r14 +#define OLD_K %rdx + +#define A %rcx +#define B %r8 +#define C %r9 +#define LDC %r10 + +#define I %r11 +#define AO %rdi +#define BO %rsi +#define CO1 %r15 +#define K %r12 +#define BI %rbp +#define SP %rbx + +#define BO1 %rdi +#define BO2 %r15 + +#ifndef WINDOWS_ABI + +#define STACKSIZE 96 + +#else + +#define STACKSIZE 320 + +#define OLD_ALPHA_I 40 + STACKSIZE(%rsp) +#define OLD_A 48 + STACKSIZE(%rsp) +#define OLD_B 56 + STACKSIZE(%rsp) +#define OLD_C 64 + STACKSIZE(%rsp) +#define OLD_LDC 72 + STACKSIZE(%rsp) +#define OLD_OFFSET 80 + STACKSIZE(%rsp) + +#endif + +#define L_BUFFER_SIZE 512*8*4 +#define LB2_OFFSET 512*8*2 + +#define Ndiv6 24(%rsp) +#define Nmod6 32(%rsp) +#define N 40(%rsp) +#define ALPHA_R 48(%rsp) +#define ALPHA_I 56(%rsp) +#define OFFSET 64(%rsp) +#define KK 72(%rsp) +#define KKK 80(%rsp) +#define BUFFER1 128(%rsp) +#define BUFFER2 LB2_OFFSET+128(%rsp) + +#if defined(OS_WINDOWS) +#if L_BUFFER_SIZE > 16384 +#define STACK_TOUCH \ + movl $0, 4096 * 4(%rsp);\ + movl $0, 4096 * 3(%rsp);\ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 12288 +#define STACK_TOUCH \ + movl $0, 4096 * 3(%rsp);\ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 8192 +#define STACK_TOUCH \ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 4096 +#define STACK_TOUCH \ + movl $0, 4096 * 1(%rsp); +#else +#define STACK_TOUCH +#endif +#else +#define STACK_TOUCH +#endif + + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) +#define VFMADD_R vfmaddpd +#define VFMADD_I vfmaddpd +#elif defined(RN) || defined(RT) || defined(CN) || defined(CT) +#define VFMADD_R vfnmaddpd +#define VFMADD_I vfmaddpd +#elif defined(NR) || defined(NC) || defined(TR) || defined(TC) +#define VFMADD_R vfmaddpd +#define VFMADD_I vfnmaddpd +#else +#define VFMADD_R vfnmaddpd +#define VFMADD_I vfnmaddpd +#endif + + +#define A_PR1 384 +#define B_PR1 192 + +#define KERNEL2x2_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup -8 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups -6 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vmovddup -7 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + vmovddup -6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\ + vmovddup -5 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\ + +#define KERNEL2x2_2(xx) \ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups -2 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + vmovddup -2 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\ + vmovddup -1 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\ + +#define KERNEL2x2_3(xx) \ + prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\ + vmovups 0 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup 0 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups 2 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vmovddup 1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + vmovddup 2 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\ + vmovddup 3 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\ + +#define KERNEL2x2_4(xx) \ + vmovups 4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup 4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups 6 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vmovddup 5 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + vmovddup 6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\ + vmovddup 7 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\ + addq $16, BI ;\ + addq $16, %rax ;\ + + +#define KERNEL2x2_SUB(xx) \ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup -8 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups -6 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vmovddup -7 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + vmovddup -6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\ + vmovddup -5 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\ + addq $4, BI ;\ + addq $4, %rax ;\ + +/************************************************************************************************/ + +#define KERNEL1x2_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup -8 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovddup -7 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vmovddup -6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vmovddup -5 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + +#define KERNEL1x2_2(xx) \ + vmovups -6 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vmovddup -2 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vmovddup -1 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + +#define KERNEL1x2_3(xx) \ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup 0 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovddup 1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vmovddup 2 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vmovddup 3 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + +#define KERNEL1x2_4(xx) \ + vmovups -2 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup 4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovddup 5 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vmovddup 6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vmovddup 7 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + addq $16, BI ;\ + addq $8 , %rax ;\ + + +#define KERNEL1x2_SUB(xx) \ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup -8 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovddup -7 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vmovddup -6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vmovddup -5 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + addq $4, BI ;\ + addq $2, %rax ;\ + +/************************************************************************************************/ + +#define KERNEL2x1_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups -6 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + +#define KERNEL2x1_2(xx) \ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup -2 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups -2 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vmovddup -1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + +#define KERNEL2x1_3(xx) \ + prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\ + vmovups 0 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup 0 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups 2 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vmovddup 1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + +#define KERNEL2x1_4(xx) \ + vmovups 4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup 2 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups 6 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vmovddup 3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + addq $8, BI ;\ + addq $16, %rax ;\ + + +#define KERNEL2x1_SUB(xx) \ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups -6 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + addq $2, BI ;\ + addq $4, %rax ;\ + + +/************************************************************************************************/ + +#define KERNEL1x1_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + +#define KERNEL1x1_2(xx) \ + vmovups -6 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup -2 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovddup -1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + +#define KERNEL1x1_3(xx) \ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup 0 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovddup 1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + +#define KERNEL1x1_4(xx) \ + vmovups -2 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup 2 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovddup 3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + addq $8, BI ;\ + addq $8, %rax ;\ + + +#define KERNEL1x1_SUB(xx) \ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vmovddup -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovddup -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + addq $2, BI ;\ + addq $2, %rax ;\ + + +/************************************************************************************************/ + + + + + PROLOGUE + PROFCODE + + subq $STACKSIZE, %rsp + movq %rbx, (%rsp) + movq %rbp, 8(%rsp) + movq %r12, 16(%rsp) + movq %r13, 24(%rsp) + movq %r14, 32(%rsp) + movq %r15, 40(%rsp) + + vzeroupper + +#ifdef WINDOWS_ABI + movq %rdi, 48(%rsp) + movq %rsi, 56(%rsp) + movups %xmm6, 64(%rsp) + movups %xmm7, 80(%rsp) + movups %xmm8, 96(%rsp) + movups %xmm9, 112(%rsp) + movups %xmm10, 128(%rsp) + movups %xmm11, 144(%rsp) + movups %xmm12, 160(%rsp) + movups %xmm13, 176(%rsp) + movups %xmm14, 192(%rsp) + movups %xmm15, 208(%rsp) + + movq ARG1, OLD_M + movq ARG2, OLD_N + movq ARG3, OLD_K + movq OLD_A, A + movq OLD_B, B + movq OLD_C, C + movq OLD_LDC, LDC +#ifdef TRMMKERNEL + movsd OLD_OFFSET, %xmm12 +#endif + vmovaps %xmm3, %xmm0 + +#else + movq STACKSIZE + 8(%rsp), LDC +#ifdef TRMMKERNEL + movsd STACKSIZE + 16(%rsp), %xmm12 +#endif + +#endif + + movq %rsp, SP # save old stack + subq $128 + L_BUFFER_SIZE, %rsp + andq $-4096, %rsp # align stack + + STACK_TOUCH + + cmpq $0, OLD_M + je .L999 + + cmpq $0, OLD_N + je .L999 + + cmpq $0, OLD_K + je .L999 + + movq OLD_M, M + movq OLD_N, N + movq OLD_K, K + + vmovsd %xmm0, ALPHA_R + vmovsd %xmm1, ALPHA_I + + salq $ZBASE_SHIFT, LDC + + movq N, %rax + xorq %rdx, %rdx + movq $2, %rdi + divq %rdi // N / 2 + movq %rax, Ndiv6 // N / 2 + movq %rdx, Nmod6 // N % 2 + + + +#ifdef TRMMKERNEL + vmovsd %xmm12, OFFSET + vmovsd %xmm12, KK +#ifndef LEFT + negq KK +#endif +#endif + +.L2_0: + + movq Ndiv6, J + cmpq $0, J + je .L1_0 + ALIGN_4 + + + +.L2_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L2_02b: + + vmovups (BO1), %xmm0 + vmovups 2 * SIZE(BO1), %xmm1 + vmovups %xmm0, (BO) + vmovups %xmm1, 2 * SIZE(BO) + addq $4*SIZE,BO1 + addq $4*SIZE,BO + decq %rax + jnz .L2_02b + +.L2_02c: + + movq BO1, B // next offset of B + +.L2_10: + movq C, CO1 + leaq (C, LDC, 2), C // c += 2 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $8 * SIZE, AO + + movq M, I + sarq $1, I // i = (m >> 1) + je .L2_40 + + ALIGN_4 + +.L2_11: + +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $8 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $8 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax // K = K - ( K % 8 ) + je .L2_16 + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_12: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + prefetcht0 B_PR1+64(BO,BI,SIZE) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + prefetcht0 B_PR1+64(BO,BI,SIZE) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + je .L2_16 + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + prefetcht0 B_PR1+64(BO,BI,SIZE) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + prefetcht0 B_PR1+64(BO,BI,SIZE) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + je .L2_16 + + jmp .L2_12 + ALIGN_4 + +.L2_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_19 + + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_17: + + KERNEL2x2_SUB(xxx) + jl .L2_17 + ALIGN_4 + + +.L2_19: + + vmovddup ALPHA_R, %xmm0 + vmovddup ALPHA_I, %xmm1 + + // swap high and low 64 bytes + vshufpd $0x01, %xmm9 , %xmm9, %xmm9 + vshufpd $0x01, %xmm11, %xmm11, %xmm11 + vshufpd $0x01, %xmm13, %xmm13, %xmm13 + vshufpd $0x01, %xmm15, %xmm15, %xmm15 + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ + defined(NR) || defined(NC) || defined(TR) || defined(TC) + + vaddsubpd %xmm9, %xmm8 , %xmm8 + vaddsubpd %xmm11,%xmm10, %xmm10 + vaddsubpd %xmm13,%xmm12, %xmm12 + vaddsubpd %xmm15,%xmm14, %xmm14 + + vshufpd $0x01, %xmm8 , %xmm8, %xmm9 + vshufpd $0x01, %xmm10, %xmm10, %xmm11 + vshufpd $0x01, %xmm12, %xmm12, %xmm13 + vshufpd $0x01, %xmm14, %xmm14, %xmm15 + +#else + vaddsubpd %xmm8, %xmm9 ,%xmm9 + vaddsubpd %xmm10, %xmm11,%xmm11 + vaddsubpd %xmm12, %xmm13,%xmm13 + vaddsubpd %xmm14, %xmm15,%xmm15 + + vmovapd %xmm9, %xmm8 + vmovapd %xmm11, %xmm10 + vmovapd %xmm13, %xmm12 + vmovapd %xmm15, %xmm14 + + // swap high and low 64 bytes + vshufpd $0x01, %xmm9 , %xmm9, %xmm9 + vshufpd $0x01, %xmm11, %xmm11, %xmm11 + vshufpd $0x01, %xmm13, %xmm13, %xmm13 + vshufpd $0x01, %xmm15, %xmm15, %xmm15 + +#endif + + // multiply with ALPHA_R + vmulpd %xmm8 , %xmm0, %xmm8 + vmulpd %xmm10, %xmm0, %xmm10 + vmulpd %xmm12, %xmm0, %xmm12 + vmulpd %xmm14, %xmm0, %xmm14 + + // multiply with ALPHA_I + vmulpd %xmm9 , %xmm1, %xmm9 + vmulpd %xmm11, %xmm1, %xmm11 + vmulpd %xmm13, %xmm1, %xmm13 + vmulpd %xmm15, %xmm1, %xmm15 + + vaddsubpd %xmm9, %xmm8 , %xmm8 + vaddsubpd %xmm11,%xmm10, %xmm10 + vaddsubpd %xmm13,%xmm12, %xmm12 + vaddsubpd %xmm15,%xmm14, %xmm14 + + + +#ifndef TRMMKERNEL + + vaddpd (CO1), %xmm8 , %xmm8 + vaddpd 2 * SIZE(CO1), %xmm12, %xmm12 + + vaddpd (CO1, LDC), %xmm10, %xmm10 + vaddpd 2 * SIZE(CO1, LDC), %xmm14, %xmm14 + +#endif + + vmovups %xmm8 , (CO1) + vmovups %xmm12 , 2 * SIZE(CO1) + + vmovups %xmm10 , (CO1, LDC) + vmovups %xmm14 , 2 * SIZE(CO1, LDC) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + decq I # i -- + jg .L2_11 + ALIGN_4 + + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L2_40: + testq $1, M + jz .L2_60 // to next 2 lines of N + + ALIGN_4 + +.L2_41: + +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $8 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $8 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax // K = K - ( K % 8 ) + je .L2_46 + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_42: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + prefetcht0 B_PR1+64(BO,BI,SIZE) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + prefetcht0 B_PR1+64(BO,BI,SIZE) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + je .L2_46 + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + prefetcht0 B_PR1+64(BO,BI,SIZE) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + prefetcht0 B_PR1+64(BO,BI,SIZE) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + je .L2_46 + + jmp .L2_42 + ALIGN_4 + +.L2_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_49 + + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_47: + + KERNEL1x2_SUB(xxx) + jl .L2_47 + ALIGN_4 + + +.L2_49: + + vmovddup ALPHA_R, %xmm0 + vmovddup ALPHA_I, %xmm1 + + // swap high and low 64 bytes + vshufpd $0x01, %xmm9 , %xmm9, %xmm9 + vshufpd $0x01, %xmm11, %xmm11, %xmm11 + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ + defined(NR) || defined(NC) || defined(TR) || defined(TC) + + vaddsubpd %xmm9, %xmm8 , %xmm8 + vaddsubpd %xmm11,%xmm10, %xmm10 + + vshufpd $0x01, %xmm8 , %xmm8, %xmm9 + vshufpd $0x01, %xmm10, %xmm10, %xmm11 + +#else + vaddsubpd %xmm8, %xmm9, %xmm9 + vaddsubpd %xmm10,%xmm11, %xmm11 + + vmovapd %xmm9, %xmm8 + vmovapd %xmm11, %xmm10 + + // swap high and low 64 bytes + vshufpd $0x01, %xmm9 , %xmm9, %xmm9 + vshufpd $0x01, %xmm11, %xmm11, %xmm11 + +#endif + + // multiply with ALPHA_R + vmulpd %xmm8 , %xmm0, %xmm8 + vmulpd %xmm10, %xmm0, %xmm10 + + // multiply with ALPHA_I + vmulpd %xmm9 , %xmm1, %xmm9 + vmulpd %xmm11, %xmm1, %xmm11 + + vaddsubpd %xmm9, %xmm8 , %xmm8 + vaddsubpd %xmm11,%xmm10, %xmm10 + + + +#ifndef TRMMKERNEL + + vaddpd (CO1), %xmm8 , %xmm8 + vaddpd (CO1, LDC), %xmm10, %xmm10 + +#endif + + vmovups %xmm8 , (CO1) + vmovups %xmm10 , (CO1, LDC) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + decq I # i -- + jg .L2_41 + ALIGN_4 + + + + +.L2_60: + + decq J // j -- + jg .L2_01 // next 2 lines of N + + + +.L1_0: + +/************************************************************************************************ +* Loop for Nmod6 % 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + andq $1, J // j % 2 + je .L999 + ALIGN_4 + +.L1_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L1_02b: + + vmovups (BO1), %xmm0 + vmovups %xmm0, (BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO + decq %rax + jnz .L1_02b + +.L1_02c: + + movq BO1, B // next offset of B + +.L1_10: + movq C, CO1 + leaq (C, LDC, 1), C // c += 1 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $8 * SIZE, AO + + movq M, I + sarq $1, I // i = (m >> 1) + je .L1_40 + + ALIGN_4 + +.L1_11: + +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI,2), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax // K = K - ( K % 8 ) + je .L1_16 + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_12: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + je .L1_16 + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + je .L1_16 + + jmp .L1_12 + ALIGN_4 + +.L1_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_19 + + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_17: + + KERNEL2x1_SUB(xxx) + jl .L1_17 + ALIGN_4 + + +.L1_19: + + vmovddup ALPHA_R, %xmm0 + vmovddup ALPHA_I, %xmm1 + + // swap high and low 64 bytes + vshufpd $0x01, %xmm9 , %xmm9, %xmm9 + vshufpd $0x01, %xmm13, %xmm13, %xmm13 + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ + defined(NR) || defined(NC) || defined(TR) || defined(TC) + + vaddsubpd %xmm9, %xmm8 , %xmm8 + vaddsubpd %xmm13,%xmm12 , %xmm12 + + vshufpd $0x01, %xmm8 , %xmm8, %xmm9 + vshufpd $0x01, %xmm12, %xmm12, %xmm13 + +#else + vaddsubpd %xmm8, %xmm9 , %xmm9 + vaddsubpd %xmm12,%xmm13, %xmm13 + + vmovapd %xmm9, %xmm8 + vmovapd %xmm13, %xmm12 + + // swap high and low 64 bytes + vshufpd $0x01, %xmm9 , %xmm9, %xmm9 + vshufpd $0x01, %xmm13, %xmm13, %xmm13 + +#endif + + // multiply with ALPHA_R + vmulpd %xmm8 , %xmm0, %xmm8 + vmulpd %xmm12, %xmm0, %xmm12 + + // multiply with ALPHA_I + vmulpd %xmm9 , %xmm1, %xmm9 + vmulpd %xmm13, %xmm1, %xmm13 + + vaddsubpd %xmm9, %xmm8 , %xmm8 + vaddsubpd %xmm13, %xmm12, %xmm12 + + + +#ifndef TRMMKERNEL + + vaddpd (CO1), %xmm8 , %xmm8 + vaddpd 2 * SIZE(CO1), %xmm12, %xmm12 + +#endif + + vmovups %xmm8 , (CO1) + vmovups %xmm12 , 2 * SIZE(CO1) + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + decq I # i -- + jg .L1_11 + ALIGN_4 + + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L1_40: + testq $1, M + jz .L999 + + ALIGN_4 + +.L1_41: + +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI,2), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax // K = K - ( K % 8 ) + je .L1_46 + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_42: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + je .L1_46 + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + je .L1_46 + + jmp .L1_42 + ALIGN_4 + +.L1_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_49 + + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_47: + + KERNEL1x1_SUB(xxx) + jl .L1_47 + ALIGN_4 + + +.L1_49: + + vmovddup ALPHA_R, %xmm0 + vmovddup ALPHA_I, %xmm1 + + // swap high and low 64 bytes + vshufpd $0x01, %xmm9 , %xmm9, %xmm9 + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ + defined(NR) || defined(NC) || defined(TR) || defined(TC) + + vaddsubpd %xmm9, %xmm8, %xmm8 + + vshufpd $0x01, %xmm8 , %xmm8, %xmm9 + +#else + vaddsubpd %xmm8, %xmm9, %xmm9 + + vmovapd %xmm9, %xmm8 + + // swap high and low 64 bytes + vshufpd $0x01, %xmm9 , %xmm9, %xmm9 + +#endif + + // multiply with ALPHA_R + vmulpd %xmm8 , %xmm0, %xmm8 + + // multiply with ALPHA_I + vmulpd %xmm9 , %xmm1, %xmm9 + + vaddsubpd %xmm9 ,%xmm8, %xmm8 + + + +#ifndef TRMMKERNEL + + vaddpd (CO1), %xmm8 , %xmm8 + +#endif + + vmovups %xmm8 , (CO1) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + decq I # i -- + jg .L1_41 + ALIGN_4 + + + + + + +.L999: + movq SP, %rsp + movq (%rsp), %rbx + movq 8(%rsp), %rbp + movq 16(%rsp), %r12 + movq 24(%rsp), %r13 + movq 32(%rsp), %r14 + movq 40(%rsp), %r15 + +#ifdef WINDOWS_ABI + movq 48(%rsp), %rdi + movq 56(%rsp), %rsi + movups 64(%rsp), %xmm6 + movups 80(%rsp), %xmm7 + movups 96(%rsp), %xmm8 + movups 112(%rsp), %xmm9 + movups 128(%rsp), %xmm10 + movups 144(%rsp), %xmm11 + movups 160(%rsp), %xmm12 + movups 176(%rsp), %xmm13 + movups 192(%rsp), %xmm14 + movups 208(%rsp), %xmm15 +#endif + + addq $STACKSIZE, %rsp + ret + + EPILOGUE From a789b588cdae72fddda5ba48037879bbd418b2d3 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Wed, 12 Jun 2013 15:55:27 +0200 Subject: [PATCH 14/15] added cgemm_kernel_4x2_bulldozer.S --- kernel/x86_64/KERNEL.BULLDOZER | 6 +- kernel/x86_64/cgemm_kernel_4x2_bulldozer.S | 1900 ++++++++++++++++++++ 2 files changed, 1903 insertions(+), 3 deletions(-) create mode 100644 kernel/x86_64/cgemm_kernel_4x2_bulldozer.S diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index 9463520d2..8b3d1084a 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -19,11 +19,11 @@ DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) -CGEMMKERNEL = zgemm_kernel_4x2_barcelona.S +CGEMMKERNEL = cgemm_kernel_4x2_bulldozer.S CGEMMINCOPY = ../generic/zgemm_ncopy_4.c CGEMMITCOPY = ../generic/zgemm_tcopy_4.c -CGEMMONCOPY = zgemm_ncopy_2.S -CGEMMOTCOPY = zgemm_tcopy_2.S +CGEMMONCOPY = ../generic/zgemm_ncopy_2.c +CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) diff --git a/kernel/x86_64/cgemm_kernel_4x2_bulldozer.S b/kernel/x86_64/cgemm_kernel_4x2_bulldozer.S new file mode 100644 index 000000000..8cf8b54c7 --- /dev/null +++ b/kernel/x86_64/cgemm_kernel_4x2_bulldozer.S @@ -0,0 +1,1900 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + + +#define ASSEMBLER +#include "common.h" + +#define OLD_M %rdi +#define OLD_N %rsi +#define M %r13 +#define J %r14 +#define OLD_K %rdx + +#define A %rcx +#define B %r8 +#define C %r9 +#define LDC %r10 + +#define I %r11 +#define AO %rdi +#define BO %rsi +#define CO1 %r15 +#define K %r12 +#define BI %rbp +#define SP %rbx + +#define BO1 %rdi +#define BO2 %r15 + +#ifndef WINDOWS_ABI + +#define STACKSIZE 96 + +#else + +#define STACKSIZE 320 + +#define OLD_ALPHA_I 40 + STACKSIZE(%rsp) +#define OLD_A 48 + STACKSIZE(%rsp) +#define OLD_B 56 + STACKSIZE(%rsp) +#define OLD_C 64 + STACKSIZE(%rsp) +#define OLD_LDC 72 + STACKSIZE(%rsp) +#define OLD_OFFSET 80 + STACKSIZE(%rsp) + +#endif + +#define L_BUFFER_SIZE 512*8*4 +#define LB2_OFFSET 512*8*2 + +#define Ndiv6 24(%rsp) +#define Nmod6 32(%rsp) +#define N 40(%rsp) +#define ALPHA_R 48(%rsp) +#define ALPHA_I 56(%rsp) +#define OFFSET 64(%rsp) +#define KK 72(%rsp) +#define KKK 80(%rsp) +#define BUFFER1 128(%rsp) +#define BUFFER2 LB2_OFFSET+128(%rsp) + +#if defined(OS_WINDOWS) +#if L_BUFFER_SIZE > 16384 +#define STACK_TOUCH \ + movl $0, 4096 * 4(%rsp);\ + movl $0, 4096 * 3(%rsp);\ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 12288 +#define STACK_TOUCH \ + movl $0, 4096 * 3(%rsp);\ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 8192 +#define STACK_TOUCH \ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 4096 +#define STACK_TOUCH \ + movl $0, 4096 * 1(%rsp); +#else +#define STACK_TOUCH +#endif +#else +#define STACK_TOUCH +#endif + + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) +#define VFMADD_R vfmaddps +#define VFMADD_I vfmaddps +#elif defined(RN) || defined(RT) || defined(CN) || defined(CT) +#define VFMADD_R vfnmaddps +#define VFMADD_I vfmaddps +#elif defined(NR) || defined(NC) || defined(TR) || defined(TC) +#define VFMADD_R vfmaddps +#define VFMADD_I vfnmaddps +#else +#define VFMADD_R vfnmaddps +#define VFMADD_I vfnmaddps +#endif + + + +#define A_PR1 384 +#define B_PR1 192 + +#define KERNEL4x2_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -8 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups -12 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vbroadcastss -7 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\ + vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\ + +#define KERNEL4x2_2(xx) \ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\ + +#define KERNEL4x2_3(xx) \ + prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\ + vmovups 0 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups 4 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\ + +#define KERNEL4x2_4(xx) \ + vmovups 8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss 4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups 12 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vbroadcastss 5 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + vbroadcastss 6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\ + vbroadcastss 7 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\ + addq $16, BI ;\ + addq $32, %rax ;\ + + +#define KERNEL4x2_SUB(xx) \ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -8 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups -12 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vbroadcastss -7 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + VFMADD_R %xmm14,%xmm6,%xmm1,%xmm14 ;\ + vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + VFMADD_I %xmm15,%xmm7,%xmm1,%xmm15 ;\ + addq $4, BI ;\ + addq $8, %rax ;\ + +/************************************************************************************************/ + +#define KERNEL2x2_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -8 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss -7 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + +#define KERNEL2x2_2(xx) \ + vmovups -12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + +#define KERNEL2x2_3(xx) \ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + +#define KERNEL2x2_4(xx) \ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss 4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss 5 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vbroadcastss 6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vbroadcastss 7 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + addq $16, BI ;\ + addq $16, %rax ;\ + + +#define KERNEL2x2_SUB(xx) \ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -8 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss -7 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + addq $4, BI ;\ + addq $4, %rax ;\ + +/************************************************************************************************/ + +#define KERNEL1x2_1(xx) \ + vmovsd -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -8 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss -7 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + +#define KERNEL1x2_2(xx) \ + vmovsd -14 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + +#define KERNEL1x2_3(xx) \ + vmovsd -12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + +#define KERNEL1x2_4(xx) \ + vmovsd -10 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss 4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss 5 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vbroadcastss 6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vbroadcastss 7 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + addq $16, BI ;\ + addq $8, %rax ;\ + + +#define KERNEL1x2_SUB(xx) \ + vmovsd -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -8 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss -7 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm6 ;\ + VFMADD_R %xmm10,%xmm6,%xmm0,%xmm10 ;\ + vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm7 ;\ + VFMADD_I %xmm11,%xmm7,%xmm0,%xmm11 ;\ + addq $4, BI ;\ + addq $2, %rax ;\ + + + +/************************************************************************************************/ + +#define KERNEL4x1_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups -12 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + +#define KERNEL4x1_2(xx) \ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + +#define KERNEL4x1_3(xx) \ + prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\ + vmovups 0 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups 4 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + +#define KERNEL4x1_4(xx) \ + vmovups 8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups 12 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + addq $8, BI ;\ + addq $32, %rax ;\ + + +#define KERNEL4x1_SUB(xx) \ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vmovups -12 * SIZE(AO, %rax, SIZE), %xmm1 ;\ + VFMADD_R %xmm12,%xmm4,%xmm1,%xmm12 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + VFMADD_I %xmm13,%xmm5,%xmm1,%xmm13 ;\ + addq $2, BI ;\ + addq $8, %rax ;\ + + +/************************************************************************************************/ + +#define KERNEL2x1_1(xx) \ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + +#define KERNEL2x1_2(xx) \ + vmovups -12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + +#define KERNEL2x1_3(xx) \ + vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + +#define KERNEL2x1_4(xx) \ + vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + addq $8, BI ;\ + addq $16, %rax ;\ + + +#define KERNEL2x1_SUB(xx) \ + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + addq $2, BI ;\ + addq $4, %rax ;\ + + +/************************************************************************************************/ + +#define KERNEL1x1_1(xx) \ + vmovsd -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + +#define KERNEL1x1_2(xx) \ + vmovsd -14 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + +#define KERNEL1x1_3(xx) \ + vmovsd -12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + +#define KERNEL1x1_4(xx) \ + vmovsd -10 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + addq $8, BI ;\ + addq $8, %rax ;\ + + +#define KERNEL1x1_SUB(xx) \ + vmovsd -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm4 ;\ + VFMADD_R %xmm8,%xmm4,%xmm0,%xmm8 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm5 ;\ + VFMADD_I %xmm9,%xmm5,%xmm0,%xmm9 ;\ + addq $2, BI ;\ + addq $2, %rax ;\ + + +/************************************************************************************************/ + + + + + PROLOGUE + PROFCODE + + subq $STACKSIZE, %rsp + movq %rbx, (%rsp) + movq %rbp, 8(%rsp) + movq %r12, 16(%rsp) + movq %r13, 24(%rsp) + movq %r14, 32(%rsp) + movq %r15, 40(%rsp) + + vzeroupper + +#ifdef WINDOWS_ABI + movq %rdi, 48(%rsp) + movq %rsi, 56(%rsp) + movups %xmm6, 64(%rsp) + movups %xmm7, 80(%rsp) + movups %xmm8, 96(%rsp) + movups %xmm9, 112(%rsp) + movups %xmm10, 128(%rsp) + movups %xmm11, 144(%rsp) + movups %xmm12, 160(%rsp) + movups %xmm13, 176(%rsp) + movups %xmm14, 192(%rsp) + movups %xmm15, 208(%rsp) + + movq ARG1, OLD_M + movq ARG2, OLD_N + movq ARG3, OLD_K + movq OLD_A, A + movq OLD_B, B + movq OLD_C, C + movq OLD_LDC, LDC +#ifdef TRMMKERNEL + movsd OLD_OFFSET, %xmm12 +#endif + vmovaps %xmm3, %xmm0 + +#else + movq STACKSIZE + 8(%rsp), LDC +#ifdef TRMMKERNEL + movsd STACKSIZE + 16(%rsp), %xmm12 +#endif + +#endif + + movq %rsp, SP # save old stack + subq $128 + L_BUFFER_SIZE, %rsp + andq $-4096, %rsp # align stack + + STACK_TOUCH + + cmpq $0, OLD_M + je .L999 + + cmpq $0, OLD_N + je .L999 + + cmpq $0, OLD_K + je .L999 + + movq OLD_M, M + movq OLD_N, N + movq OLD_K, K + + vmovss %xmm0, ALPHA_R + vmovss %xmm1, ALPHA_I + + salq $ZBASE_SHIFT, LDC + + movq N, %rax + xorq %rdx, %rdx + movq $2, %rdi + divq %rdi // N / 2 + movq %rax, Ndiv6 // N / 2 + movq %rdx, Nmod6 // N % 2 + + + +#ifdef TRMMKERNEL + vmovsd %xmm12, OFFSET + vmovsd %xmm12, KK +#ifndef LEFT + negq KK +#endif +#endif + +.L2_0: + + movq Ndiv6, J + cmpq $0, J + je .L1_0 + ALIGN_4 + + + +.L2_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L2_02b: + + vmovups (BO1), %xmm0 + vmovups %xmm0, (BO) + addq $4*SIZE,BO1 + addq $4*SIZE,BO + decq %rax + jnz .L2_02b + +.L2_02c: + + movq BO1, B // next offset of B + +.L2_10: + movq C, CO1 + leaq (C, LDC, 2), C // c += 2 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $16 * SIZE, AO + + movq M, I + sarq $2, I // i = (m >> 2) + je .L2_20 + + ALIGN_4 + +.L2_11: + +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $8 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $8 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax // K = K - ( K % 8 ) + je .L2_16 + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_12: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + je .L2_16 + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL4x2_1(xxx) + KERNEL4x2_2(xxx) + KERNEL4x2_3(xxx) + KERNEL4x2_4(xxx) + + je .L2_16 + + jmp .L2_12 + ALIGN_4 + +.L2_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_19 + + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_17: + + KERNEL4x2_SUB(xxx) + jl .L2_17 + ALIGN_4 + + +.L2_19: + + vbroadcastss ALPHA_R, %xmm0 + vbroadcastss ALPHA_I, %xmm1 + + // swap high and low 64 bytes + vshufps $0xb1, %xmm9 , %xmm9, %xmm9 + vshufps $0xb1, %xmm11, %xmm11, %xmm11 + vshufps $0xb1, %xmm13, %xmm13, %xmm13 + vshufps $0xb1, %xmm15, %xmm15, %xmm15 + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ + defined(NR) || defined(NC) || defined(TR) || defined(TC) + + vaddsubps %xmm9, %xmm8 , %xmm8 + vaddsubps %xmm11,%xmm10, %xmm10 + vaddsubps %xmm13,%xmm12, %xmm12 + vaddsubps %xmm15,%xmm14, %xmm14 + + vshufps $0xb1, %xmm8 , %xmm8, %xmm9 + vshufps $0xb1, %xmm10, %xmm10, %xmm11 + vshufps $0xb1, %xmm12, %xmm12, %xmm13 + vshufps $0xb1, %xmm14, %xmm14, %xmm15 + +#else + vaddsubps %xmm8, %xmm9 ,%xmm9 + vaddsubps %xmm10, %xmm11,%xmm11 + vaddsubps %xmm12, %xmm13,%xmm13 + vaddsubps %xmm14, %xmm15,%xmm15 + + vmovaps %xmm9, %xmm8 + vmovaps %xmm11, %xmm10 + vmovaps %xmm13, %xmm12 + vmovaps %xmm15, %xmm14 + + // swap high and low 64 bytes + vshufps $0xb1, %xmm9 , %xmm9, %xmm9 + vshufps $0xb1, %xmm11, %xmm11, %xmm11 + vshufps $0xb1, %xmm13, %xmm13, %xmm13 + vshufps $0xb1, %xmm15, %xmm15, %xmm15 + +#endif + + // multiply with ALPHA_R + vmulps %xmm8 , %xmm0, %xmm8 + vmulps %xmm10, %xmm0, %xmm10 + vmulps %xmm12, %xmm0, %xmm12 + vmulps %xmm14, %xmm0, %xmm14 + + // multiply with ALPHA_I + vmulps %xmm9 , %xmm1, %xmm9 + vmulps %xmm11, %xmm1, %xmm11 + vmulps %xmm13, %xmm1, %xmm13 + vmulps %xmm15, %xmm1, %xmm15 + + vaddsubps %xmm9, %xmm8 , %xmm8 + vaddsubps %xmm11,%xmm10, %xmm10 + vaddsubps %xmm13,%xmm12, %xmm12 + vaddsubps %xmm15,%xmm14, %xmm14 + + + +#ifndef TRMMKERNEL + + vaddps (CO1), %xmm8 , %xmm8 + vaddps 4 * SIZE(CO1), %xmm12, %xmm12 + + vaddps (CO1, LDC), %xmm10, %xmm10 + vaddps 4 * SIZE(CO1, LDC), %xmm14, %xmm14 + +#endif + + vmovups %xmm8 , (CO1) + vmovups %xmm12 , 4 * SIZE(CO1) + + vmovups %xmm10 , (CO1, LDC) + vmovups %xmm14 , 4 * SIZE(CO1, LDC) + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + decq I # i -- + jg .L2_11 + ALIGN_4 + + + +/************************************************************************** +* Rest of M +***************************************************************************/ + +.L2_20: + testq $3, M + jz .L2_60 // to next 2 lines of N + + testq $2, M + jz .L2_40 + ALIGN_4 + +.L2_21: + +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $8 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $8 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax // K = K - ( K % 8 ) + je .L2_26 + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_22: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + je .L2_26 + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x2_1(xxx) + KERNEL2x2_2(xxx) + KERNEL2x2_3(xxx) + KERNEL2x2_4(xxx) + + je .L2_26 + + jmp .L2_22 + ALIGN_4 + +.L2_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_29 + + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_27: + + KERNEL2x2_SUB(xxx) + jl .L2_27 + ALIGN_4 + + +.L2_29: + + vbroadcastss ALPHA_R, %xmm0 + vbroadcastss ALPHA_I, %xmm1 + + // swap high and low 64 bytes + vshufps $0xb1, %xmm9 , %xmm9, %xmm9 + vshufps $0xb1, %xmm11, %xmm11, %xmm11 + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ + defined(NR) || defined(NC) || defined(TR) || defined(TC) + + vaddsubps %xmm9, %xmm8 , %xmm8 + vaddsubps %xmm11,%xmm10, %xmm10 + + vshufps $0xb1, %xmm8 , %xmm8, %xmm9 + vshufps $0xb1, %xmm10, %xmm10, %xmm11 + +#else + vaddsubps %xmm8, %xmm9 ,%xmm9 + vaddsubps %xmm10, %xmm11,%xmm11 + + vmovaps %xmm9, %xmm8 + vmovaps %xmm11, %xmm10 + + // swap high and low 64 bytes + vshufps $0xb1, %xmm9 , %xmm9, %xmm9 + vshufps $0xb1, %xmm11, %xmm11, %xmm11 + +#endif + + // multiply with ALPHA_R + vmulps %xmm8 , %xmm0, %xmm8 + vmulps %xmm10, %xmm0, %xmm10 + + // multiply with ALPHA_I + vmulps %xmm9 , %xmm1, %xmm9 + vmulps %xmm11, %xmm1, %xmm11 + + vaddsubps %xmm9, %xmm8 , %xmm8 + vaddsubps %xmm11,%xmm10, %xmm10 + + + +#ifndef TRMMKERNEL + + vaddps (CO1), %xmm8 , %xmm8 + + vaddps (CO1, LDC), %xmm10, %xmm10 + +#endif + + vmovups %xmm8 , (CO1) + + vmovups %xmm10 , (CO1, LDC) + + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + decq I # i -- + jg .L2_21 + ALIGN_4 + + + +/**************************************************************************/ +.L2_40: + testq $1, M + jz .L2_60 // to next 2 lines of N + + ALIGN_4 + +.L2_41: + +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $8 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $8 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax // K = K - ( K % 8 ) + je .L2_46 + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_42: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + je .L2_46 + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x2_1(xxx) + KERNEL1x2_2(xxx) + KERNEL1x2_3(xxx) + KERNEL1x2_4(xxx) + + je .L2_46 + + jmp .L2_42 + ALIGN_4 + +.L2_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_49 + + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_47: + + KERNEL1x2_SUB(xxx) + jl .L2_47 + ALIGN_4 + + +.L2_49: + + vbroadcastss ALPHA_R, %xmm0 + vbroadcastss ALPHA_I, %xmm1 + + // swap high and low 64 bytes + vshufps $0xb1, %xmm9 , %xmm9, %xmm9 + vshufps $0xb1, %xmm11, %xmm11, %xmm11 + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ + defined(NR) || defined(NC) || defined(TR) || defined(TC) + + vaddsubps %xmm9, %xmm8 , %xmm8 + vaddsubps %xmm11,%xmm10, %xmm10 + + vshufps $0xb1, %xmm8 , %xmm8, %xmm9 + vshufps $0xb1, %xmm10, %xmm10, %xmm11 + +#else + vaddsubps %xmm8, %xmm9 ,%xmm9 + vaddsubps %xmm10, %xmm11,%xmm11 + + vmovaps %xmm9, %xmm8 + vmovaps %xmm11, %xmm10 + + // swap high and low 64 bytes + vshufps $0xb1, %xmm9 , %xmm9, %xmm9 + vshufps $0xb1, %xmm11, %xmm11, %xmm11 + +#endif + + // multiply with ALPHA_R + vmulps %xmm8 , %xmm0, %xmm8 + vmulps %xmm10, %xmm0, %xmm10 + + // multiply with ALPHA_I + vmulps %xmm9 , %xmm1, %xmm9 + vmulps %xmm11, %xmm1, %xmm11 + + vaddsubps %xmm9, %xmm8 , %xmm8 + vaddsubps %xmm11,%xmm10, %xmm10 + + + +#ifndef TRMMKERNEL + + vmovsd (CO1), %xmm14 + vaddps %xmm14, %xmm8 , %xmm8 + + vmovsd (CO1, LDC), %xmm15 + vaddps %xmm15, %xmm10, %xmm10 + +#endif + + vmovsd %xmm8 , (CO1) + + vmovsd %xmm10 , (CO1, LDC) + + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq ( ,BI,4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + decq I # i -- + jg .L2_41 + ALIGN_4 + + + + +.L2_60: + + decq J // j -- + jg .L2_01 // next 2 lines of N + + + +.L1_0: + +/************************************************************************************************ +* Loop for Nmod6 % 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + andq $1, J // j % 2 + je .L999 + ALIGN_4 + +.L1_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L1_02b: + + vmovsd (BO1), %xmm0 + vmovsd %xmm0, (BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO + decq %rax + jnz .L1_02b + +.L1_02c: + + movq BO1, B // next offset of B + +.L1_10: + movq C, CO1 + leaq (C, LDC, 1), C // c += 1 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $16 * SIZE, AO + + movq M, I + sarq $2, I // i = (m >> 2) + je .L1_20 + + ALIGN_4 + +.L1_11: + +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI,2), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax // K = K - ( K % 8 ) + je .L1_16 + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_12: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + je .L1_16 + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + KERNEL4x1_1(xxx) + KERNEL4x1_2(xxx) + KERNEL4x1_3(xxx) + KERNEL4x1_4(xxx) + + je .L1_16 + + jmp .L1_12 + ALIGN_4 + +.L1_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_19 + + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 4 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_17: + + KERNEL4x1_SUB(xxx) + jl .L1_17 + ALIGN_4 + + +.L1_19: + + vbroadcastss ALPHA_R, %xmm0 + vbroadcastss ALPHA_I, %xmm1 + + // swap high and low 64 bytes + vshufps $0xb1, %xmm9 , %xmm9, %xmm9 + vshufps $0xb1, %xmm13, %xmm13, %xmm13 + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ + defined(NR) || defined(NC) || defined(TR) || defined(TC) + + vaddsubps %xmm9, %xmm8 , %xmm8 + vaddsubps %xmm13,%xmm12, %xmm12 + + vshufps $0xb1, %xmm8 , %xmm8, %xmm9 + vshufps $0xb1, %xmm12, %xmm12, %xmm13 + +#else + vaddsubps %xmm8, %xmm9 ,%xmm9 + vaddsubps %xmm12, %xmm13,%xmm13 + + vmovaps %xmm9, %xmm8 + vmovaps %xmm13, %xmm12 + + // swap high and low 64 bytes + vshufps $0xb1, %xmm9 , %xmm9, %xmm9 + vshufps $0xb1, %xmm13, %xmm13, %xmm13 + +#endif + + // multiply with ALPHA_R + vmulps %xmm8 , %xmm0, %xmm8 + vmulps %xmm12, %xmm0, %xmm12 + + // multiply with ALPHA_I + vmulps %xmm9 , %xmm1, %xmm9 + vmulps %xmm13, %xmm1, %xmm13 + + vaddsubps %xmm9, %xmm8 , %xmm8 + vaddsubps %xmm13,%xmm12, %xmm12 + + + +#ifndef TRMMKERNEL + + vaddps (CO1), %xmm8 , %xmm8 + vaddps 4 * SIZE(CO1), %xmm12, %xmm12 + +#endif + + vmovups %xmm8 , (CO1) + vmovups %xmm12 , 4 * SIZE(CO1) + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + decq I # i -- + jg .L1_11 + ALIGN_4 + + + +/************************************************************************** +* Rest of M +***************************************************************************/ + +.L1_20: + testq $3, M + jz .L999 + + testq $2, M + jz .L1_40 + ALIGN_4 + +.L1_21: + +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI,2), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax // K = K - ( K % 8 ) + je .L1_26 + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_22: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + je .L1_26 + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + KERNEL2x1_1(xxx) + KERNEL2x1_2(xxx) + KERNEL2x1_3(xxx) + KERNEL2x1_4(xxx) + + je .L1_26 + + jmp .L1_22 + ALIGN_4 + +.L1_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_29 + + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_27: + + KERNEL2x1_SUB(xxx) + jl .L1_27 + ALIGN_4 + + +.L1_29: + + vbroadcastss ALPHA_R, %xmm0 + vbroadcastss ALPHA_I, %xmm1 + + // swap high and low 64 bytes + vshufps $0xb1, %xmm9 , %xmm9, %xmm9 + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ + defined(NR) || defined(NC) || defined(TR) || defined(TC) + + vaddsubps %xmm9, %xmm8 , %xmm8 + + vshufps $0xb1, %xmm8 , %xmm8, %xmm9 + +#else + vaddsubps %xmm8, %xmm9 ,%xmm9 + + vmovaps %xmm9, %xmm8 + + // swap high and low 64 bytes + vshufps $0xb1, %xmm9 , %xmm9, %xmm9 + +#endif + + // multiply with ALPHA_R + vmulps %xmm8 , %xmm0, %xmm8 + + // multiply with ALPHA_I + vmulps %xmm9 , %xmm1, %xmm9 + + vaddsubps %xmm9, %xmm8 , %xmm8 + + + +#ifndef TRMMKERNEL + + vaddps (CO1), %xmm8 , %xmm8 + +#endif + + vmovups %xmm8 , (CO1) + + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + decq I # i -- + jg .L1_21 + ALIGN_4 + + + +/**************************************************************************/ +.L1_40: + testq $1, M + jz .L999 // to next 2 lines of N + + ALIGN_4 + +.L1_41: + +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI,2), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax // K = K - ( K % 8 ) + je .L1_46 + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_42: + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + je .L1_46 + + prefetcht0 B_PR1(BO,BI,SIZE) + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + KERNEL1x1_1(xxx) + KERNEL1x1_2(xxx) + KERNEL1x1_3(xxx) + KERNEL1x1_4(xxx) + + je .L1_46 + + jmp .L1_42 + ALIGN_4 + +.L1_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_49 + + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_47: + + KERNEL1x1_SUB(xxx) + jl .L1_47 + ALIGN_4 + + +.L1_49: + + vbroadcastss ALPHA_R, %xmm0 + vbroadcastss ALPHA_I, %xmm1 + + // swap high and low 64 bytes + vshufps $0xb1, %xmm9 , %xmm9, %xmm9 + +#if defined(NN) || defined(NT) || defined(TN) || defined(TT) || \ + defined(NR) || defined(NC) || defined(TR) || defined(TC) + + vaddsubps %xmm9, %xmm8 , %xmm8 + + vshufps $0xb1, %xmm8 , %xmm8, %xmm9 + +#else + vaddsubps %xmm8, %xmm9 ,%xmm9 + + vmovaps %xmm9, %xmm8 + + // swap high and low 64 bytes + vshufps $0xb1, %xmm9 , %xmm9, %xmm9 + +#endif + + // multiply with ALPHA_R + vmulps %xmm8 , %xmm0, %xmm8 + + // multiply with ALPHA_I + vmulps %xmm9 , %xmm1, %xmm9 + + vaddsubps %xmm9, %xmm8 , %xmm8 + + + +#ifndef TRMMKERNEL + + vmovsd (CO1), %xmm14 + vaddps %xmm14, %xmm8 , %xmm8 + +#endif + + vmovsd %xmm8 , (CO1) + + + + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq ( ,BI,2), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + decq I # i -- + jg .L1_41 + ALIGN_4 + + + + + + + +.L999: + movq SP, %rsp + movq (%rsp), %rbx + movq 8(%rsp), %rbp + movq 16(%rsp), %r12 + movq 24(%rsp), %r13 + movq 32(%rsp), %r14 + movq 40(%rsp), %r15 + +#ifdef WINDOWS_ABI + movq 48(%rsp), %rdi + movq 56(%rsp), %rsi + movups 64(%rsp), %xmm6 + movups 80(%rsp), %xmm7 + movups 96(%rsp), %xmm8 + movups 112(%rsp), %xmm9 + movups 128(%rsp), %xmm10 + movups 144(%rsp), %xmm11 + movups 160(%rsp), %xmm12 + movups 176(%rsp), %xmm13 + movups 192(%rsp), %xmm14 + movups 208(%rsp), %xmm15 +#endif + + addq $STACKSIZE, %rsp + ret + + EPILOGUE From 0ded1fcc1c10147ff94169428d1167a833b0c9cd Mon Sep 17 00:00:00 2001 From: wernsaar Date: Thu, 13 Jun 2013 11:35:15 +0200 Subject: [PATCH 15/15] performance optimizations in sgemm_kernel_16x2_bulldozer.S --- kernel/x86_64/sgemm_kernel_16x2_bulldozer.S | 165 +++++++++++++++++--- 1 file changed, 145 insertions(+), 20 deletions(-) diff --git a/kernel/x86_64/sgemm_kernel_16x2_bulldozer.S b/kernel/x86_64/sgemm_kernel_16x2_bulldozer.S index 2e10fae71..f02a1dfa5 100644 --- a/kernel/x86_64/sgemm_kernel_16x2_bulldozer.S +++ b/kernel/x86_64/sgemm_kernel_16x2_bulldozer.S @@ -127,17 +127,18 @@ *******************************************************************************************/ #define KERNEL16x3_1(xx) \ - prefetcht0 A_PR1(AO,%rax,SIZE) ;\ vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm1 ;\ vmovups -32 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + nop ;\ vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + prefetcht0 A_PR1(AO,%rax,SIZE) ;\ vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ vmovups -24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ @@ -146,20 +147,21 @@ vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm15,%xmm3,%xmm0,%xmm15 ;\ #define KERNEL16x3_2(xx) \ - prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\ - vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm1 ;\ vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ - vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm3 ;\ + nop ;\ vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ vmovups -12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + prefetcht0 A_PR1+64(AO,%rax,SIZE) ;\ vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ vmovups -8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ @@ -168,20 +170,21 @@ vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm15,%xmm3,%xmm0,%xmm15 ;\ #define KERNEL16x3_3(xx) \ - prefetcht0 A_PR1+128(AO,%rax,SIZE) ;\ - vbroadcastss 0 * SIZE(BO, BI, SIZE), %xmm1 ;\ vmovups 0 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ - vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm3 ;\ + nop ;\ vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ vmovups 4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + prefetcht0 A_PR1+128(AO,%rax,SIZE) ;\ vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ vmovups 8 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ @@ -190,31 +193,32 @@ vmovups 12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ + vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm1 ;\ + vbroadcastss 4 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm15,%xmm3,%xmm0,%xmm15 ;\ #define KERNEL16x3_4(xx) \ - prefetcht0 A_PR1+192(AO,%rax,SIZE) ;\ - vbroadcastss 3 * SIZE(BO, BI, SIZE), %xmm1 ;\ vmovups 16 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm4,%xmm1,%xmm0,%xmm4 ;\ - vbroadcastss 4 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ vbroadcastss 5 * SIZE(BO, BI, SIZE), %xmm3 ;\ + nop ;\ vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ vmovups 20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ vfmaddps %xmm8,%xmm2,%xmm0,%xmm8 ;\ + prefetcht0 A_PR1+192(AO,%rax,SIZE) ;\ vfmaddps %xmm9,%xmm3,%xmm0,%xmm9 ;\ vmovups 24 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm10,%xmm1,%xmm0,%xmm10 ;\ vfmaddps %xmm11,%xmm2,%xmm0,%xmm11 ;\ + addq $12, BI ;\ vfmaddps %xmm12,%xmm3,%xmm0,%xmm12 ;\ vmovups 28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm13,%xmm1,%xmm0,%xmm13 ;\ vfmaddps %xmm14,%xmm2,%xmm0,%xmm14 ;\ - vfmaddps %xmm15,%xmm3,%xmm0,%xmm15 ;\ - addq $12, BI ;\ addq $64, %rax ;\ + vfmaddps %xmm15,%xmm3,%xmm0,%xmm15 ;\ #define KERNEL16x3_SUB(xx) \ vbroadcastss -6 * SIZE(BO, BI, SIZE), %xmm1 ;\ @@ -223,6 +227,7 @@ vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + nop ;\ vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ @@ -248,6 +253,7 @@ vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + nop ;\ vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ @@ -261,6 +267,7 @@ vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm3 ;\ + nop ;\ vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ vmovups -20 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ @@ -275,6 +282,7 @@ vbroadcastss 1 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ vbroadcastss 2 * SIZE(BO, BI, SIZE), %xmm3 ;\ + nop ;\ vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ vmovups -12 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ @@ -288,6 +296,7 @@ vbroadcastss 4 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ vbroadcastss 5 * SIZE(BO, BI, SIZE), %xmm3 ;\ + nop ;\ vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ vmovups -4 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ @@ -303,6 +312,7 @@ vbroadcastss -5 * SIZE(BO, BI, SIZE), %xmm2 ;\ vfmaddps %xmm5,%xmm2,%xmm0,%xmm5 ;\ vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm3 ;\ + nop ;\ vfmaddps %xmm6,%xmm3,%xmm0,%xmm6 ;\ vmovups -28 * SIZE(AO, %rax, SIZE), %xmm0 ;\ vfmaddps %xmm7,%xmm1,%xmm0,%xmm7 ;\ @@ -1072,15 +1082,74 @@ leaq (B,%rax, SIZE), BO2 // next offset to BO2 leaq BUFFER1, BO // first buffer to BO movq K, %rax + sarq $3 , %rax // K / 8 + jz .L6_01a_2 ALIGN_4 +.L6_01a_1: + + prefetcht0 512(BO1) + prefetcht0 512(BO2) + prefetchw 512(BO) + + vmovsd 0 * SIZE(BO1), %xmm0 + vmovsd 2 * SIZE(BO1), %xmm2 + vmovsd 4 * SIZE(BO1), %xmm4 + vmovsd 6 * SIZE(BO1), %xmm6 + vmovss 0 * SIZE(BO2), %xmm1 + vmovss 2 * SIZE(BO2), %xmm3 + vmovss 4 * SIZE(BO2), %xmm5 + vmovss 6 * SIZE(BO2), %xmm7 + vmovsd %xmm0, 0*SIZE(BO) + vmovss %xmm1, 2*SIZE(BO) + vmovsd %xmm2, 3*SIZE(BO) + vmovss %xmm3, 5*SIZE(BO) + vmovsd %xmm4, 6*SIZE(BO) + vmovss %xmm5, 8*SIZE(BO) + vmovsd %xmm6, 9*SIZE(BO) + vmovss %xmm7,11*SIZE(BO) + addq $8*SIZE,BO1 + addq $8*SIZE,BO2 + addq $12*SIZE,BO + + vmovsd 0 * SIZE(BO1), %xmm0 + vmovsd 2 * SIZE(BO1), %xmm2 + vmovsd 4 * SIZE(BO1), %xmm4 + vmovsd 6 * SIZE(BO1), %xmm6 + vmovss 0 * SIZE(BO2), %xmm1 + vmovss 2 * SIZE(BO2), %xmm3 + vmovss 4 * SIZE(BO2), %xmm5 + vmovss 6 * SIZE(BO2), %xmm7 + vmovsd %xmm0, 0*SIZE(BO) + vmovss %xmm1, 2*SIZE(BO) + vmovsd %xmm2, 3*SIZE(BO) + vmovss %xmm3, 5*SIZE(BO) + vmovsd %xmm4, 6*SIZE(BO) + vmovss %xmm5, 8*SIZE(BO) + vmovsd %xmm6, 9*SIZE(BO) + vmovss %xmm7,11*SIZE(BO) + addq $8*SIZE,BO1 + addq $8*SIZE,BO2 + addq $12*SIZE,BO + + decq %rax + jnz .L6_01a_1 + + + +.L6_01a_2: + + movq K, %rax + andq $7, %rax // K % 8 + jz .L6_02c + ALIGN_4 + + .L6_02b: - vmovss 0 * SIZE(BO1), %xmm0 - vmovss 1 * SIZE(BO1), %xmm1 + vmovsd 0 * SIZE(BO1), %xmm0 vmovss 0 * SIZE(BO2), %xmm2 - vmovss %xmm0, 0*SIZE(BO) - vmovss %xmm1, 1*SIZE(BO) + vmovsd %xmm0, 0*SIZE(BO) vmovss %xmm2, 2*SIZE(BO) addq $2*SIZE,BO1 addq $2*SIZE,BO2 @@ -1096,17 +1165,73 @@ leaq (BO1,%rax, SIZE), BO2 // next offset to BO2 leaq BUFFER2, BO // second buffer to BO movq K, %rax + sarq $3 , %rax // K / 8 + jz .L6_02c_2 ALIGN_4 +.L6_02c_1: + + prefetcht0 512(BO2) + prefetchw 512(BO) + + vmovsd 0 * SIZE(BO2), %xmm0 + vmovsd 2 * SIZE(BO2), %xmm2 + vmovsd 4 * SIZE(BO2), %xmm4 + vmovsd 6 * SIZE(BO2), %xmm6 + vmovss 1 * SIZE(BO1), %xmm1 + vmovss 3 * SIZE(BO1), %xmm3 + vmovss 5 * SIZE(BO1), %xmm5 + vmovss 7 * SIZE(BO1), %xmm7 + vmovss %xmm1, 0*SIZE(BO) + vmovsd %xmm0, 1*SIZE(BO) + vmovss %xmm3, 3*SIZE(BO) + vmovsd %xmm2, 4*SIZE(BO) + vmovss %xmm5, 6*SIZE(BO) + vmovsd %xmm4, 7*SIZE(BO) + vmovss %xmm7, 9*SIZE(BO) + vmovsd %xmm6,10*SIZE(BO) + addq $8*SIZE,BO1 + addq $8*SIZE,BO2 + addq $12*SIZE,BO + + + vmovsd 0 * SIZE(BO2), %xmm0 + vmovsd 2 * SIZE(BO2), %xmm2 + vmovsd 4 * SIZE(BO2), %xmm4 + vmovsd 6 * SIZE(BO2), %xmm6 + vmovss 1 * SIZE(BO1), %xmm1 + vmovss 3 * SIZE(BO1), %xmm3 + vmovss 5 * SIZE(BO1), %xmm5 + vmovss 7 * SIZE(BO1), %xmm7 + vmovss %xmm1, 0*SIZE(BO) + vmovsd %xmm0, 1*SIZE(BO) + vmovss %xmm3, 3*SIZE(BO) + vmovsd %xmm2, 4*SIZE(BO) + vmovss %xmm5, 6*SIZE(BO) + vmovsd %xmm4, 7*SIZE(BO) + vmovss %xmm7, 9*SIZE(BO) + vmovsd %xmm6,10*SIZE(BO) + addq $8*SIZE,BO1 + addq $8*SIZE,BO2 + addq $12*SIZE,BO + + decq %rax + jnz .L6_02c_1 + + +.L6_02c_2: + + movq K, %rax + andq $7, %rax // K % 8 + jz .L6_03c + ALIGN_4 .L6_03b: vmovss 1*SIZE(BO1), %xmm0 - vmovss 0*SIZE(BO2), %xmm1 - vmovss 1*SIZE(BO2), %xmm2 + vmovsd 0*SIZE(BO2), %xmm1 vmovss %xmm0, 0*SIZE(BO) - vmovss %xmm1, 1*SIZE(BO) - vmovss %xmm2, 2*SIZE(BO) + vmovsd %xmm1, 1*SIZE(BO) addq $2*SIZE,BO1 addq $2*SIZE,BO2 addq $3*SIZE,BO