Merge pull request #195 from wernsaar/develop

Develop dgemm for bullozer
This commit is contained in:
Zhang Xianyi 2013-03-05 05:35:42 -08:00
commit 5900b1462e
1 changed files with 125 additions and 106 deletions

View File

@ -88,151 +88,142 @@
#define movupd movups #define movupd movups
#define KERNEL1(xx) \ #define KERNEL1(xx) \
vfmaddpd %xmm8,%xmm1,%xmm0,%xmm8 ;\ vmovups -16 * SIZE(AO, %rax, 4),%xmm0 ;\
vfmaddpd %xmm8,%xmm0,%xmm1,%xmm8 ;\
vmovaps %xmm2,%xmm0 ;\ vmovaps %xmm2,%xmm0 ;\
vmovddup -16 * SIZE(BO, %rax, 4), %xmm1 ;\
vmovddup -15 * SIZE(BO, %rax, 4), %xmm3 ;\
vfmaddpd %xmm9,%xmm0,%xmm3,%xmm9 ;\
vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\ vmovups -14 * SIZE(AO, %rax, 4),%xmm2 ;\
vfmaddpd %xmm9,%xmm3,%xmm0,%xmm9 ;\
vfmaddpd %xmm12,%xmm2,%xmm1,%xmm12 ;\ vfmaddpd %xmm12,%xmm2,%xmm1,%xmm12 ;\
vmovddup -14 * SIZE(BO, %rax, 4), %xmm1 ;\
vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\ vfmaddpd %xmm13,%xmm2,%xmm3,%xmm13 ;\
vmovddup -14 * SIZE(BO, %rax, 4), %xmm1 ;\
vmovddup -13 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup -13 * SIZE(BO, %rax, 4), %xmm3 ;\
vfmaddpd %xmm10,%xmm1,%xmm0,%xmm10 ;\ vfmaddpd %xmm10,%xmm0,%xmm1,%xmm10 ;\
vfmaddpd %xmm11,%xmm3,%xmm0,%xmm11 ;\ vfmaddpd %xmm11,%xmm0,%xmm3,%xmm11 ;\
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\
vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\
vmovups -12 * SIZE(AO, %rax, 4), %xmm0 ;\ vmovups -12 * SIZE(AO, %rax, 4), %xmm0 ;\
vmovups -10 * SIZE(AO, %rax, 4),%xmm2 ;\
vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\ vmovddup -12 * SIZE(BO, %rax, 4), %xmm1 ;\
vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup -11 * SIZE(BO, %rax, 4), %xmm3 ;\
vmovaps %xmm0, %xmm2
#define KERNEL2(xx) \ #define KERNEL2(xx) \
vfmaddpd %xmm8,%xmm1, %xmm0,%xmm8 ;\ vmovups -8 * SIZE(AO, %rax, 4),%xmm4 ;\
vmovaps %xmm2, %xmm0 ;\ vfmaddpd %xmm8, %xmm0, %xmm1,%xmm8 ;\
vmovups -10 * SIZE(AO, %rax, 4),%xmm2 ;\ vfmaddpd %xmm9, %xmm0, %xmm3,%xmm9 ;\
/*A*/ vmovups (AO, %rax, 4), %xmm6 ;\
vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\
vfmaddpd %xmm9,%xmm3, %xmm0,%xmm9 ;\
vmovddup -10 * SIZE(BO, %rax, 4), %xmm1 ;\
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\
vmovddup -10 * SIZE(BO, %rax, 4), %xmm1 ;\
vmovddup -9 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup -9 * SIZE(BO, %rax, 4), %xmm3 ;\
vfmaddpd %xmm10,%xmm1, %xmm0,%xmm10 ;\ vfmaddpd %xmm10,%xmm0, %xmm1,%xmm10 ;\
vfmaddpd %xmm11,%xmm0, %xmm3,%xmm11 ;\
vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\
/**/ vmovddup (BO, %rax, 4), %xmm1 ;\
vfmaddpd %xmm11,%xmm3, %xmm0,%xmm11 ;\
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\
vmovddup -8 * SIZE(BO, %rax, 4), %xmm5 ;\
vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup -7 * SIZE(BO, %rax, 4), %xmm3 ;\
vmovaps %xmm4, %xmm2
#define KERNEL3(xx) \ #define KERNEL3(xx) \
vfmaddpd %xmm8,%xmm5, %xmm4, %xmm8 ;\
vmovaps %xmm2, %xmm4 ;\
vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\ vmovups -6 * SIZE(AO, %rax, 4),%xmm2 ;\
vfmaddpd %xmm8, %xmm4, %xmm5, %xmm8 ;\
vfmaddpd %xmm9, %xmm4, %xmm3,%xmm9 ;\
vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\ vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\
vfmaddpd %xmm9,%xmm3, %xmm4,%xmm9 ;\
vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\
vmovddup -6 * SIZE(BO, %rax, 4), %xmm5 ;\
vmovddup -5 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup -5 * SIZE(BO, %rax, 4), %xmm3 ;\
vfmaddpd %xmm10,%xmm5, %xmm4,%xmm10 ;\ vfmaddpd %xmm10,%xmm4, %xmm5,%xmm10 ;\
vfmaddpd %xmm11,%xmm4, %xmm3, %xmm11 ;\
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\
vfmaddpd %xmm11,%xmm3, %xmm4, %xmm11 ;\
vmovups -4 * SIZE(AO, %rax, 4), %xmm4 ;\
vmovddup -4 * SIZE(BO, %rax, 4), %xmm5 ;\
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\
vmovups -4 * SIZE(AO, %rax, 4), %xmm4 ;\
vmovups -2 * SIZE(AO, %rax, 4),%xmm2 ;\
vmovddup -4 * SIZE(BO, %rax, 4), %xmm5 ;\
vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup -3 * SIZE(BO, %rax, 4), %xmm3 ;\
vmovaps %xmm4, %xmm2
#define KERNEL4(xx) \ #define KERNEL4(xx) \
vfmaddpd %xmm8,%xmm5, %xmm4,%xmm8 ;\ vfmaddpd %xmm8,%xmm4, %xmm5,%xmm8 ;\
vmovaps %xmm2, %xmm4 ;\ vfmaddpd %xmm9,%xmm4, %xmm3,%xmm9 ;\
vmovups -2 * SIZE(AO, %rax, 4),%xmm2 ;\
vfmaddpd %xmm12,%xmm2, %xmm5 ,%xmm12;\ vfmaddpd %xmm12,%xmm2, %xmm5 ,%xmm12;\
/*A*/ vmovups 8 * SIZE(AO, %rax, 4), %xmm7 ;\
vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\
vfmaddpd %xmm9,%xmm3, %xmm4,%xmm9 ;\
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\
vmovddup -2 * SIZE(BO, %rax, 4), %xmm5 ;\
vmovddup -1 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup -1 * SIZE(BO, %rax, 4), %xmm3 ;\
vfmaddpd %xmm10,%xmm5, %xmm4,%xmm10 ;\ vfmaddpd %xmm10,%xmm4, %xmm5,%xmm10 ;\
vfmaddpd %xmm11,%xmm4, %xmm3,%xmm11 ;\
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\
/**/ vmovddup 8 * SIZE(BO, %rax, 4), %xmm5 ;\
vfmaddpd %xmm11,%xmm3, %xmm4,%xmm11 ;\
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\
/*A*/ vmovups (AO, %rax, 4), %xmm6 ;\
vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\
vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup 1 * SIZE(BO, %rax, 4), %xmm3 ;\
vmovaps %xmm6, %xmm2 /**/ vmovddup (BO, %rax, 4), %xmm1 ;\
#define KERNEL5(xx) \ #define KERNEL5(xx) \
vfmaddpd %xmm8,%xmm1, %xmm6,%xmm8 ;\ vfmaddpd %xmm8,%xmm6, %xmm1,%xmm8 ;\
vmovaps %xmm2, %xmm6 ;\ vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\
vmovups 2 * SIZE(AO, %rax, 4),%xmm2 ;\
vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\
vmovddup 2 * SIZE(BO, %rax, 4), %xmm1 ;\
vfmaddpd %xmm9,%xmm3, %xmm6,%xmm9 ;\
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\
vmovddup 2 * SIZE(BO, %rax, 4), %xmm1 ;\
vmovddup 3 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup 3 * SIZE(BO, %rax, 4), %xmm3 ;\
vfmaddpd %xmm10,%xmm1, %xmm6,%xmm10 ;\ vfmaddpd %xmm10,%xmm6, %xmm1,%xmm10 ;\
vfmaddpd %xmm11,%xmm6, %xmm3,%xmm11 ;\
vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\
vfmaddpd %xmm11,%xmm3, %xmm6,%xmm11 ;\
vmovups 4 * SIZE(AO, %rax, 4), %xmm6 ;\
vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\
vmovups 4 * SIZE(AO, %rax, 4), %xmm6 ;\
vmovups 6 * SIZE(AO, %rax, 4),%xmm2 ;\
vmovddup 4 * SIZE(BO, %rax, 4), %xmm1 ;\
vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup 5 * SIZE(BO, %rax, 4), %xmm3 ;\
vmovaps %xmm6, %xmm2
#define KERNEL6(xx) \ #define KERNEL6(xx) \
vfmaddpd %xmm8,%xmm1, %xmm6,%xmm8 ;\ vfmaddpd %xmm8,%xmm6, %xmm1,%xmm8 ;\
vmovaps %xmm2, %xmm6 ;\ vfmaddpd %xmm9,%xmm6, %xmm3,%xmm9 ;\
vmovups 6 * SIZE(AO, %rax, 4),%xmm2 ;\
vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\
/*A*/ vmovups 16 * SIZE(AO, %rax, 4), %xmm0 ;\
vmovddup 6 * SIZE(BO, %rax, 4), %xmm1 ;\
vfmaddpd %xmm9,%xmm3, %xmm6,%xmm9 ;\
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\
vmovddup 6 * SIZE(BO, %rax, 4), %xmm1 ;\
vmovddup 7 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup 7 * SIZE(BO, %rax, 4), %xmm3 ;\
vfmaddpd %xmm10,%xmm1, %xmm6,%xmm10 ;\ vfmaddpd %xmm10,%xmm6, %xmm1,%xmm10 ;\
vfmaddpd %xmm11,%xmm6, %xmm3,%xmm11 ;\
vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\
/**/ vmovddup 16 * SIZE(BO, %rax, 4), %xmm1 ;\
vfmaddpd %xmm11,%xmm3, %xmm6,%xmm11 ;\
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\
/*A*/ vmovups 8 * SIZE(AO, %rax, 4), %xmm7 ;\
vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\
/**/ vmovddup 8 * SIZE(BO, %rax, 4), %xmm5 ;\
vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup 9 * SIZE(BO, %rax, 4), %xmm3 ;\
vmovaps %xmm7, %xmm2
#define KERNEL7(xx) \ #define KERNEL7(xx) \
vfmaddpd %xmm8,%xmm5, %xmm7,%xmm8 ;\ vfmaddpd %xmm8,%xmm7, %xmm5,%xmm8 ;\
vmovaps %xmm2, %xmm7 ;\ vfmaddpd %xmm9,%xmm7, %xmm3,%xmm9 ;\
vmovups 10 * SIZE(AO, %rax, 4),%xmm2 ;\
vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\ vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\
vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\
vfmaddpd %xmm9,%xmm3, %xmm7,%xmm9 ;\
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\
vmovddup 10 * SIZE(BO, %rax, 4), %xmm5 ;\
vmovddup 11 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup 11 * SIZE(BO, %rax, 4), %xmm3 ;\
vfmaddpd %xmm10,%xmm5, %xmm7,%xmm10 ;\ vfmaddpd %xmm10,%xmm7, %xmm5,%xmm10 ;\
vfmaddpd %xmm11,%xmm7, %xmm3,%xmm11 ;\
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\
vfmaddpd %xmm11,%xmm3, %xmm7,%xmm11 ;\
vmovups 12 * SIZE(AO, %rax, 4), %xmm7 ;\
vmovddup 12 * SIZE(BO, %rax, 4), %xmm5 ;\
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\
vmovups 12 * SIZE(AO, %rax, 4), %xmm7 ;\
vmovups 14 * SIZE(AO, %rax, 4),%xmm2 ;\
vmovddup 12 * SIZE(BO, %rax, 4), %xmm5 ;\
vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup 13 * SIZE(BO, %rax, 4), %xmm3 ;\
vmovaps %xmm7, %xmm2
#define KERNEL8(xx) \ #define KERNEL8(xx) \
vfmaddpd %xmm8,%xmm5, %xmm7,%xmm8 ;\ vfmaddpd %xmm8,%xmm7, %xmm5,%xmm8 ;\
vmovaps %xmm2, %xmm7 ;\ vfmaddpd %xmm9,%xmm7, %xmm3,%xmm9 ;\
vmovups 14 * SIZE(AO, %rax, 4),%xmm2 ;\
/*A*/ vmovups 24 * SIZE(AO, %rax, 4), %xmm4 ;\
vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\ vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\
vfmaddpd %xmm9,%xmm3, %xmm7,%xmm9 ;\
vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\
vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\ vfmaddpd %xmm13, %xmm2, %xmm3,%xmm13 ;\
vmovddup 14 * SIZE(BO, %rax, 4), %xmm5 ;\
vmovddup 15 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup 15 * SIZE(BO, %rax, 4), %xmm3 ;\
vfmaddpd %xmm10,%xmm5, %xmm7,%xmm10 ;\ vfmaddpd %xmm10,%xmm7, %xmm5,%xmm10 ;\
vfmaddpd %xmm11,%xmm7, %xmm3,%xmm11 ;\
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\
/**/ vmovddup 24 * SIZE(BO, %rax, 4), %xmm5 ;\
vfmaddpd %xmm11,%xmm3, %xmm7,%xmm11 ;\
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\
/*A*/ vmovups 16 * SIZE(AO, %rax, 4), %xmm0 ;\
vmovddup 16 * SIZE(BO, %rax, 4), %xmm1 ;\
vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\ vmovddup 17 * SIZE(BO, %rax, 4), %xmm3 ;\
vmovaps %xmm0, %xmm2 ;\ vmovaps %xmm0, %xmm2 ;\
addq $8 * SIZE, %rax ;\ addq $8 * SIZE, %rax ;\
#define KERNEL_SUB1(xx) \ #define KERNEL_SUB1(xx) \
vmovddup -15 * SIZE(BO), %xmm3 ;\
vmovups -16 * SIZE(AO),%xmm0 ;\
vfmaddpd %xmm8, %xmm1, %xmm0,%xmm8 ;\ vfmaddpd %xmm8, %xmm1, %xmm0,%xmm8 ;\
vmovapd %xmm2, %xmm0 ;\ vmovapd %xmm2, %xmm0 ;\
vmovups -14 * SIZE(AO),%xmm2 ;\ vmovups -14 * SIZE(AO),%xmm2 ;\
@ -255,17 +246,17 @@
vfmaddpd %xmm8,%xmm1, %xmm0,%xmm8 ;\ vfmaddpd %xmm8,%xmm1, %xmm0,%xmm8 ;\
vmovaps %xmm2, %xmm0 ;\ vmovaps %xmm2, %xmm0 ;\
vmovups -10 * SIZE(AO),%xmm2 ;\ vmovups -10 * SIZE(AO),%xmm2 ;\
vmovups -8 * SIZE(AO),%xmm4 ;\
vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\ vfmaddpd %xmm12,%xmm2, %xmm1,%xmm12 ;\
vfmaddpd %xmm9,%xmm3, %xmm0,%xmm9 ;\ vfmaddpd %xmm9,%xmm3, %xmm0,%xmm9 ;\
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\
vmovddup -10 * SIZE(BO), %xmm1 ;\ vmovddup -10 * SIZE(BO), %xmm1 ;\
vmovddup -9 * SIZE(BO), %xmm3 ;\ vmovddup -9 * SIZE(BO), %xmm3 ;\
vmovddup -8 * SIZE(BO), %xmm5 ;\
vfmaddpd %xmm10,%xmm1, %xmm0,%xmm10 ;\ vfmaddpd %xmm10,%xmm1, %xmm0,%xmm10 ;\
vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\ vfmaddpd %xmm14,%xmm2, %xmm1,%xmm14 ;\
vfmaddpd %xmm11,%xmm3, %xmm0,%xmm11 ;\ vfmaddpd %xmm11,%xmm3, %xmm0,%xmm11 ;\
vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\ vfmaddpd %xmm15,%xmm2, %xmm3,%xmm15 ;\
vmovups (AO), %xmm0 ;\
vmovddup (BO), %xmm1 ;\
vmovddup -7 * SIZE(BO), %xmm3 ;\ vmovddup -7 * SIZE(BO), %xmm3 ;\
vmovaps %xmm4, %xmm2 vmovaps %xmm4, %xmm2
@ -291,11 +282,13 @@
vfmaddpd %xmm8,%xmm5, %xmm4,%xmm8 ;\ vfmaddpd %xmm8,%xmm5, %xmm4,%xmm8 ;\
vmovaps %xmm2, %xmm4 ;\ vmovaps %xmm2, %xmm4 ;\
vmovups -2 * SIZE(AO),%xmm2 ;\ vmovups -2 * SIZE(AO),%xmm2 ;\
vmovups (AO), %xmm0 ;\
vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\ vfmaddpd %xmm12,%xmm2, %xmm5,%xmm12 ;\
vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\ vfmaddpd %xmm13,%xmm2, %xmm3,%xmm13 ;\
vmovddup -2 * SIZE(BO), %xmm5 ;\ vmovddup -2 * SIZE(BO), %xmm5 ;\
vfmaddpd %xmm9,%xmm3, %xmm4,%xmm9 ;\ vfmaddpd %xmm9,%xmm3, %xmm4,%xmm9 ;\
vmovddup -1 * SIZE(BO), %xmm3 ;\ vmovddup -1 * SIZE(BO), %xmm3 ;\
vmovddup (BO), %xmm1 ;\
vfmaddpd %xmm10,%xmm5, %xmm4,%xmm10 ;\ vfmaddpd %xmm10,%xmm5, %xmm4,%xmm10 ;\
vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\ vfmaddpd %xmm14,%xmm2, %xmm5,%xmm14 ;\
vfmaddpd %xmm11,%xmm3, %xmm4,%xmm11 ;\ vfmaddpd %xmm11,%xmm3, %xmm4,%xmm11 ;\
@ -407,16 +400,26 @@
leaq (B, %rax, 4), BO leaq (B, %rax, 4), BO
#endif #endif
vzeroall vxorpd %xmm8, %xmm8,%xmm8
prefetcht0 256(CO1) vxorpd %xmm9, %xmm9,%xmm9
prefetcht0 320(CO1) vxorpd %xmm10, %xmm10,%xmm10
prefetcht0 256(CO2) vxorpd %xmm11, %xmm11,%xmm11
prefetcht0 320(CO2) vxorpd %xmm12, %xmm12,%xmm12
vxorpd %xmm13, %xmm13,%xmm13
vxorpd %xmm14, %xmm14,%xmm14
vxorpd %xmm15, %xmm15,%xmm15
prefetcht0 (CO1)
prefetcht0 8*SIZE(CO1)
prefetcht0 (CO1,LDC)
prefetcht0 8*SIZE(CO1,LDC)
prefetcht0 (CO2)
prefetcht0 8*SIZE(CO2)
prefetcht0 (CO2,LDC)
prefetcht0 8*(CO2,LDC)
vmovups -16 * SIZE(AO), %xmm0 vmovups -16 * SIZE(AO), %xmm0
vmovddup -16 * SIZE(BO), %xmm1 vmovddup -16 * SIZE(BO), %xmm1
vmovddup -15 * SIZE(BO), %xmm3 vmovddup -15 * SIZE(BO), %xmm3
vmovups -8 * SIZE(AO), %xmm4
vmovddup -8 * SIZE(BO), %xmm5
vmovaps %xmm0, %xmm2 vmovaps %xmm0, %xmm2
@ -448,8 +451,10 @@
.align 16 .align 16
.L12: .L12:
prefetcht0 (AO,%rax,4) prefetcht0 24*SIZE(AO,%rax,4)
prefetcht0 (BO,%rax,4) prefetcht0 32*SIZE(AO,%rax,4)
prefetcht0 24*SIZE(BO,%rax,4)
prefetcht0 32*SIZE(BO,%rax,4)
KERNEL1(16 * 0) KERNEL1(16 * 0)
KERNEL2(16 * 0) KERNEL2(16 * 0)
KERNEL3(16 * 0) KERNEL3(16 * 0)
@ -460,6 +465,10 @@
KERNEL8(16 * 0) KERNEL8(16 * 0)
NOBRANCH NOBRANCH
je .L15 je .L15
prefetcht0 24*SIZE(AO,%rax,4)
prefetcht0 32*SIZE(AO,%rax,4)
prefetcht0 24*SIZE(BO,%rax,4)
prefetcht0 32*SIZE(BO,%rax,4)
KERNEL1(16 * 0) KERNEL1(16 * 0)
KERNEL2(16 * 0) KERNEL2(16 * 0)
KERNEL3(16 * 0) KERNEL3(16 * 0)
@ -470,6 +479,10 @@
KERNEL8(16 * 0) KERNEL8(16 * 0)
NOBRANCH NOBRANCH
je .L15 je .L15
prefetcht0 24*SIZE(AO,%rax,4)
prefetcht0 32*SIZE(AO,%rax,4)
prefetcht0 24*SIZE(BO,%rax,4)
prefetcht0 32*SIZE(BO,%rax,4)
KERNEL1(16 * 0) KERNEL1(16 * 0)
KERNEL2(16 * 0) KERNEL2(16 * 0)
KERNEL3(16 * 0) KERNEL3(16 * 0)
@ -480,6 +493,10 @@
KERNEL8(16 * 0) KERNEL8(16 * 0)
NOBRANCH NOBRANCH
je .L15 je .L15
prefetcht0 24*SIZE(AO,%rax,4)
prefetcht0 32*SIZE(AO,%rax,4)
prefetcht0 24*SIZE(BO,%rax,4)
prefetcht0 32*SIZE(BO,%rax,4)
KERNEL1(16 * 0) KERNEL1(16 * 0)
KERNEL2(16 * 0) KERNEL2(16 * 0)
KERNEL3(16 * 0) KERNEL3(16 * 0)
@ -490,6 +507,10 @@
KERNEL8(16 * 0) KERNEL8(16 * 0)
NOBRANCH NOBRANCH
je .L15 je .L15
prefetcht0 24*SIZE(AO,%rax,4)
prefetcht0 32*SIZE(AO,%rax,4)
prefetcht0 24*SIZE(BO,%rax,4)
prefetcht0 32*SIZE(BO,%rax,4)
KERNEL1(16 * 0) KERNEL1(16 * 0)
KERNEL2(16 * 0) KERNEL2(16 * 0)
KERNEL3(16 * 0) KERNEL3(16 * 0)
@ -500,6 +521,10 @@
KERNEL8(16 * 0) KERNEL8(16 * 0)
NOBRANCH NOBRANCH
je .L15 je .L15
prefetcht0 24*SIZE(AO,%rax,4)
prefetcht0 32*SIZE(AO,%rax,4)
prefetcht0 24*SIZE(BO,%rax,4)
prefetcht0 32*SIZE(BO,%rax,4)
KERNEL1(16 * 0) KERNEL1(16 * 0)
KERNEL2(16 * 0) KERNEL2(16 * 0)
KERNEL3(16 * 0) KERNEL3(16 * 0)
@ -510,6 +535,10 @@
KERNEL8(16 * 0) KERNEL8(16 * 0)
NOBRANCH NOBRANCH
je .L15 je .L15
prefetcht0 24*SIZE(AO,%rax,4)
prefetcht0 32*SIZE(AO,%rax,4)
prefetcht0 24*SIZE(BO,%rax,4)
prefetcht0 32*SIZE(BO,%rax,4)
KERNEL1(16 * 0) KERNEL1(16 * 0)
KERNEL2(16 * 0) KERNEL2(16 * 0)
KERNEL3(16 * 0) KERNEL3(16 * 0)
@ -520,6 +549,10 @@
KERNEL8(16 * 0) KERNEL8(16 * 0)
NOBRANCH NOBRANCH
je .L15 je .L15
prefetcht0 24*SIZE(AO,%rax,4)
prefetcht0 32*SIZE(AO,%rax,4)
prefetcht0 24*SIZE(BO,%rax,4)
prefetcht0 32*SIZE(BO,%rax,4)
KERNEL1(16 * 0) KERNEL1(16 * 0)
KERNEL2(16 * 0) KERNEL2(16 * 0)
KERNEL3(16 * 0) KERNEL3(16 * 0)
@ -619,16 +652,12 @@
#endif #endif
.align 2
vmovups %xmm8, (CO1) vmovups %xmm8, (CO1)
vmovups %xmm12, 2 * SIZE(CO1) vmovups %xmm12, 2 * SIZE(CO1)
.align 2
vmovups %xmm9, (CO1, LDC) vmovups %xmm9, (CO1, LDC)
vmovups %xmm13, 2 * SIZE(CO1, LDC) vmovups %xmm13, 2 * SIZE(CO1, LDC)
.align 2
vmovups %xmm10, (CO2) vmovups %xmm10, (CO2)
vmovups %xmm14, 2 * SIZE(CO2) vmovups %xmm14, 2 * SIZE(CO2)
.align 2
vmovups %xmm11, (CO2, LDC) vmovups %xmm11, (CO2, LDC)
vmovups %xmm15, 2 * SIZE(CO2, LDC) vmovups %xmm15, 2 * SIZE(CO2, LDC)
@ -1019,17 +1048,7 @@
vxorps %xmm13, %xmm13,%xmm13 vxorps %xmm13, %xmm13,%xmm13
vmovups -16 * SIZE(AO), %xmm0 vmovups -16 * SIZE(AO), %xmm0
vmovups -8 * SIZE(AO), %xmm4 vmovups -8 * SIZE(AO), %xmm4
// prefetcht0 256(CO1)
// prefetcht0 320(CO1)
// prefetcht0 256(CO2)
// prefetcht0 320(CO2)
// prefetchnta 24 * SIZE(CO1)
// prefetchnta 32 * SIZE(CO1)
// prefetchw 3 * SIZE(CO1)
vmovups %xmm0, %xmm2 vmovups %xmm0, %xmm2
// prefetchw 3 * SIZE(CO2)
// prefetchnta -16 * SIZE(BB)
// prefetch -16 * SIZE(BB)
subq $-8 * SIZE, BB subq $-8 * SIZE, BB
#ifndef TRMMKERNEL #ifndef TRMMKERNEL