From efb9038f7273cddc1ef30fce6ed4df7967a2fb03 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Sat, 16 Feb 2019 18:46:17 +0100 Subject: [PATCH] Fix inline assembly constraints --- kernel/x86_64/sgemv_n_microk_bulldozer-4.c | 188 ++++++++++----------- 1 file changed, 94 insertions(+), 94 deletions(-) diff --git a/kernel/x86_64/sgemv_n_microk_bulldozer-4.c b/kernel/x86_64/sgemv_n_microk_bulldozer-4.c index 31001c7f3..bbf06c84b 100644 --- a/kernel/x86_64/sgemv_n_microk_bulldozer-4.c +++ b/kernel/x86_64/sgemv_n_microk_bulldozer-4.c @@ -37,14 +37,14 @@ static void sgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO __asm__ __volatile__ ( - "vbroadcastss (%2), %%xmm12 \n\t" // x0 - "vbroadcastss 4(%2), %%xmm13 \n\t" // x1 - "vbroadcastss 8(%2), %%xmm14 \n\t" // x2 - "vbroadcastss 12(%2), %%xmm15 \n\t" // x3 - "vbroadcastss 16(%2), %%xmm0 \n\t" // x4 - "vbroadcastss 20(%2), %%xmm1 \n\t" // x5 - "vbroadcastss 24(%2), %%xmm2 \n\t" // x6 - "vbroadcastss 28(%2), %%xmm3 \n\t" // x7 + "vbroadcastss (%3), %%xmm12 \n\t" // x0 + "vbroadcastss 4(%3), %%xmm13 \n\t" // x1 + "vbroadcastss 8(%3), %%xmm14 \n\t" // x2 + "vbroadcastss 12(%3), %%xmm15 \n\t" // x3 + "vbroadcastss 16(%3), %%xmm0 \n\t" // x4 + "vbroadcastss 20(%3), %%xmm1 \n\t" // x5 + "vbroadcastss 24(%3), %%xmm2 \n\t" // x6 + "vbroadcastss 28(%3), %%xmm3 \n\t" // x7 "vbroadcastss (%9), %%xmm8 \n\t" // alpha @@ -54,22 +54,22 @@ static void sgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO "vxorps %%xmm4, %%xmm4 , %%xmm4 \n\t" "vxorps %%xmm5, %%xmm5 , %%xmm5 \n\t" - "vfmaddps %%xmm4, (%4,%0,4), %%xmm12, %%xmm4 \n\t" - "vfmaddps %%xmm5, (%5,%0,4), %%xmm13, %%xmm5 \n\t" - "vfmaddps %%xmm4, (%6,%0,4), %%xmm14, %%xmm4 \n\t" - "vfmaddps %%xmm5, (%7,%0,4), %%xmm15, %%xmm5 \n\t" + "vfmaddps %%xmm4, (%5,%0,4), %%xmm12, %%xmm4 \n\t" + "vfmaddps %%xmm5, (%6,%0,4), %%xmm13, %%xmm5 \n\t" + "vfmaddps %%xmm4, (%7,%0,4), %%xmm14, %%xmm4 \n\t" + "vfmaddps %%xmm5, (%8,%0,4), %%xmm15, %%xmm5 \n\t" "addq $4 , %0 \n\t" - "vfmaddps %%xmm4, (%4,%8,4), %%xmm0 , %%xmm4 \n\t" - "vfmaddps %%xmm5, (%5,%8,4), %%xmm1 , %%xmm5 \n\t" - "vfmaddps %%xmm4, (%6,%8,4), %%xmm2 , %%xmm4 \n\t" - "vfmaddps %%xmm5, (%7,%8,4), %%xmm3 , %%xmm5 \n\t" - "addq $4 , %8 \n\t" + "vfmaddps %%xmm4, (%5,%2,4), %%xmm0 , %%xmm4 \n\t" + "vfmaddps %%xmm5, (%6,%2,4), %%xmm1 , %%xmm5 \n\t" + "vfmaddps %%xmm4, (%7,%2,4), %%xmm2 , %%xmm4 \n\t" + "vfmaddps %%xmm5, (%8,%2,4), %%xmm3 , %%xmm5 \n\t" + "addq $4 , %2 \n\t" "vaddps %%xmm5 , %%xmm4, %%xmm4 \n\t" - "vfmaddps -16(%3,%0,4) , %%xmm4, %%xmm8,%%xmm6 \n\t" + "vfmaddps -16(%4,%0,4) , %%xmm4, %%xmm8,%%xmm6 \n\t" "subq $4 , %1 \n\t" - "vmovups %%xmm6, -16(%3,%0,4) \n\t" // 4 * y + "vmovups %%xmm6, -16(%4,%0,4) \n\t" // 4 * y "2: \n\t" @@ -79,31 +79,31 @@ static void sgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO "vxorps %%xmm4, %%xmm4 , %%xmm4 \n\t" "vxorps %%xmm5, %%xmm5 , %%xmm5 \n\t" - "vfmaddps %%xmm4, (%4,%0,4), %%xmm12, %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%4,%0,4), %%xmm12, %%xmm5 \n\t" - "vfmaddps %%xmm4, (%5,%0,4), %%xmm13, %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%5,%0,4), %%xmm13, %%xmm5 \n\t" - "vfmaddps %%xmm4, (%6,%0,4), %%xmm14, %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%6,%0,4), %%xmm14, %%xmm5 \n\t" - "vfmaddps %%xmm4, (%7,%0,4), %%xmm15, %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%7,%0,4), %%xmm15, %%xmm5 \n\t" + "vfmaddps %%xmm4, (%5,%0,4), %%xmm12, %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%5,%0,4), %%xmm12, %%xmm5 \n\t" + "vfmaddps %%xmm4, (%6,%0,4), %%xmm13, %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%6,%0,4), %%xmm13, %%xmm5 \n\t" + "vfmaddps %%xmm4, (%7,%0,4), %%xmm14, %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%7,%0,4), %%xmm14, %%xmm5 \n\t" + "vfmaddps %%xmm4, (%8,%0,4), %%xmm15, %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%8,%0,4), %%xmm15, %%xmm5 \n\t" - "vfmaddps %%xmm4, (%4,%8,4), %%xmm0 , %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%4,%8,4), %%xmm0 , %%xmm5 \n\t" - "vfmaddps %%xmm4, (%5,%8,4), %%xmm1 , %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%5,%8,4), %%xmm1 , %%xmm5 \n\t" - "vfmaddps %%xmm4, (%6,%8,4), %%xmm2 , %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%6,%8,4), %%xmm2 , %%xmm5 \n\t" - "vfmaddps %%xmm4, (%7,%8,4), %%xmm3 , %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%7,%8,4), %%xmm3 , %%xmm5 \n\t" + "vfmaddps %%xmm4, (%5,%2,4), %%xmm0 , %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%5,%2,4), %%xmm0 , %%xmm5 \n\t" + "vfmaddps %%xmm4, (%6,%2,4), %%xmm1 , %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%6,%2,4), %%xmm1 , %%xmm5 \n\t" + "vfmaddps %%xmm4, (%7,%2,4), %%xmm2 , %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%7,%2,4), %%xmm2 , %%xmm5 \n\t" + "vfmaddps %%xmm4, (%8,%2,4), %%xmm3 , %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%8,%2,4), %%xmm3 , %%xmm5 \n\t" - "vfmaddps (%3,%0,4) , %%xmm4,%%xmm8,%%xmm4 \n\t" - "vfmaddps 16(%3,%0,4) , %%xmm5,%%xmm8,%%xmm5 \n\t" - "vmovups %%xmm4, (%3,%0,4) \n\t" // 4 * y - "vmovups %%xmm5, 16(%3,%0,4) \n\t" // 4 * y + "vfmaddps (%4,%0,4) , %%xmm4,%%xmm8,%%xmm4 \n\t" + "vfmaddps 16(%4,%0,4) , %%xmm5,%%xmm8,%%xmm5 \n\t" + "vmovups %%xmm4, (%4,%0,4) \n\t" // 4 * y + "vmovups %%xmm5, 16(%4,%0,4) \n\t" // 4 * y "addq $8 , %0 \n\t" - "addq $8 , %8 \n\t" + "addq $8 , %2 \n\t" "subq $8 , %1 \n\t" @@ -120,62 +120,62 @@ static void sgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO "vxorps %%xmm6, %%xmm6 , %%xmm6 \n\t" "vxorps %%xmm7, %%xmm7 , %%xmm7 \n\t" - "prefetcht0 192(%4,%0,4) \n\t" - "vfmaddps %%xmm4, (%4,%0,4), %%xmm12, %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%4,%0,4), %%xmm12, %%xmm5 \n\t" "prefetcht0 192(%5,%0,4) \n\t" - "vfmaddps %%xmm4, (%5,%0,4), %%xmm13, %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%5,%0,4), %%xmm13, %%xmm5 \n\t" + "vfmaddps %%xmm4, (%5,%0,4), %%xmm12, %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%5,%0,4), %%xmm12, %%xmm5 \n\t" "prefetcht0 192(%6,%0,4) \n\t" - "vfmaddps %%xmm4, (%6,%0,4), %%xmm14, %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%6,%0,4), %%xmm14, %%xmm5 \n\t" + "vfmaddps %%xmm4, (%6,%0,4), %%xmm13, %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%6,%0,4), %%xmm13, %%xmm5 \n\t" "prefetcht0 192(%7,%0,4) \n\t" - "vfmaddps %%xmm4, (%7,%0,4), %%xmm15, %%xmm4 \n\t" + "vfmaddps %%xmm4, (%7,%0,4), %%xmm14, %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%7,%0,4), %%xmm14, %%xmm5 \n\t" + "prefetcht0 192(%8,%0,4) \n\t" + "vfmaddps %%xmm4, (%8,%0,4), %%xmm15, %%xmm4 \n\t" ".align 2 \n\t" - "vfmaddps %%xmm5, 16(%7,%0,4), %%xmm15, %%xmm5 \n\t" + "vfmaddps %%xmm5, 16(%8,%0,4), %%xmm15, %%xmm5 \n\t" - "vfmaddps %%xmm6, 32(%4,%0,4), %%xmm12, %%xmm6 \n\t" - "vfmaddps %%xmm7, 48(%4,%0,4), %%xmm12, %%xmm7 \n\t" - "vfmaddps %%xmm6, 32(%5,%0,4), %%xmm13, %%xmm6 \n\t" - "vfmaddps %%xmm7, 48(%5,%0,4), %%xmm13, %%xmm7 \n\t" - "vfmaddps %%xmm6, 32(%6,%0,4), %%xmm14, %%xmm6 \n\t" - "vfmaddps %%xmm7, 48(%6,%0,4), %%xmm14, %%xmm7 \n\t" - "vfmaddps %%xmm6, 32(%7,%0,4), %%xmm15, %%xmm6 \n\t" - "vfmaddps %%xmm7, 48(%7,%0,4), %%xmm15, %%xmm7 \n\t" + "vfmaddps %%xmm6, 32(%5,%0,4), %%xmm12, %%xmm6 \n\t" + "vfmaddps %%xmm7, 48(%5,%0,4), %%xmm12, %%xmm7 \n\t" + "vfmaddps %%xmm6, 32(%6,%0,4), %%xmm13, %%xmm6 \n\t" + "vfmaddps %%xmm7, 48(%6,%0,4), %%xmm13, %%xmm7 \n\t" + "vfmaddps %%xmm6, 32(%7,%0,4), %%xmm14, %%xmm6 \n\t" + "vfmaddps %%xmm7, 48(%7,%0,4), %%xmm14, %%xmm7 \n\t" + "vfmaddps %%xmm6, 32(%8,%0,4), %%xmm15, %%xmm6 \n\t" + "vfmaddps %%xmm7, 48(%8,%0,4), %%xmm15, %%xmm7 \n\t" - "prefetcht0 192(%4,%8,4) \n\t" - "vfmaddps %%xmm4, (%4,%8,4), %%xmm0 , %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%4,%8,4), %%xmm0 , %%xmm5 \n\t" - "prefetcht0 192(%5,%8,4) \n\t" - "vfmaddps %%xmm4, (%5,%8,4), %%xmm1 , %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%5,%8,4), %%xmm1 , %%xmm5 \n\t" - "prefetcht0 192(%6,%8,4) \n\t" - "vfmaddps %%xmm4, (%6,%8,4), %%xmm2 , %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%6,%8,4), %%xmm2 , %%xmm5 \n\t" - "prefetcht0 192(%7,%8,4) \n\t" - "vfmaddps %%xmm4, (%7,%8,4), %%xmm3 , %%xmm4 \n\t" - "vfmaddps %%xmm5, 16(%7,%8,4), %%xmm3 , %%xmm5 \n\t" + "prefetcht0 192(%5,%2,4) \n\t" + "vfmaddps %%xmm4, (%5,%2,4), %%xmm0 , %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%5,%2,4), %%xmm0 , %%xmm5 \n\t" + "prefetcht0 192(%6,%2,4) \n\t" + "vfmaddps %%xmm4, (%6,%2,4), %%xmm1 , %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%6,%2,4), %%xmm1 , %%xmm5 \n\t" + "prefetcht0 192(%7,%2,4) \n\t" + "vfmaddps %%xmm4, (%7,%2,4), %%xmm2 , %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%7,%2,4), %%xmm2 , %%xmm5 \n\t" + "prefetcht0 192(%8,%2,4) \n\t" + "vfmaddps %%xmm4, (%8,%2,4), %%xmm3 , %%xmm4 \n\t" + "vfmaddps %%xmm5, 16(%8,%2,4), %%xmm3 , %%xmm5 \n\t" - "vfmaddps %%xmm6, 32(%4,%8,4), %%xmm0 , %%xmm6 \n\t" - "vfmaddps %%xmm7, 48(%4,%8,4), %%xmm0 , %%xmm7 \n\t" - "vfmaddps %%xmm6, 32(%5,%8,4), %%xmm1 , %%xmm6 \n\t" - "vfmaddps %%xmm7, 48(%5,%8,4), %%xmm1 , %%xmm7 \n\t" - "vfmaddps %%xmm6, 32(%6,%8,4), %%xmm2 , %%xmm6 \n\t" - "vfmaddps %%xmm7, 48(%6,%8,4), %%xmm2 , %%xmm7 \n\t" - "vfmaddps %%xmm6, 32(%7,%8,4), %%xmm3 , %%xmm6 \n\t" - "vfmaddps %%xmm7, 48(%7,%8,4), %%xmm3 , %%xmm7 \n\t" + "vfmaddps %%xmm6, 32(%5,%2,4), %%xmm0 , %%xmm6 \n\t" + "vfmaddps %%xmm7, 48(%5,%2,4), %%xmm0 , %%xmm7 \n\t" + "vfmaddps %%xmm6, 32(%6,%2,4), %%xmm1 , %%xmm6 \n\t" + "vfmaddps %%xmm7, 48(%6,%2,4), %%xmm1 , %%xmm7 \n\t" + "vfmaddps %%xmm6, 32(%7,%2,4), %%xmm2 , %%xmm6 \n\t" + "vfmaddps %%xmm7, 48(%7,%2,4), %%xmm2 , %%xmm7 \n\t" + "vfmaddps %%xmm6, 32(%8,%2,4), %%xmm3 , %%xmm6 \n\t" + "vfmaddps %%xmm7, 48(%8,%2,4), %%xmm3 , %%xmm7 \n\t" - "vfmaddps (%3,%0,4) , %%xmm4,%%xmm8,%%xmm4 \n\t" - "vfmaddps 16(%3,%0,4) , %%xmm5,%%xmm8,%%xmm5 \n\t" - "vfmaddps 32(%3,%0,4) , %%xmm6,%%xmm8,%%xmm6 \n\t" - "vfmaddps 48(%3,%0,4) , %%xmm7,%%xmm8,%%xmm7 \n\t" + "vfmaddps (%4,%0,4) , %%xmm4,%%xmm8,%%xmm4 \n\t" + "vfmaddps 16(%4,%0,4) , %%xmm5,%%xmm8,%%xmm5 \n\t" + "vfmaddps 32(%4,%0,4) , %%xmm6,%%xmm8,%%xmm6 \n\t" + "vfmaddps 48(%4,%0,4) , %%xmm7,%%xmm8,%%xmm7 \n\t" "addq $16, %0 \n\t" - "vmovups %%xmm4,-64(%3,%0,4) \n\t" // 4 * y - "vmovups %%xmm5,-48(%3,%0,4) \n\t" // 4 * y - "addq $16, %8 \n\t" - "vmovups %%xmm6,-32(%3,%0,4) \n\t" // 4 * y - "vmovups %%xmm7,-16(%3,%0,4) \n\t" // 4 * y + "vmovups %%xmm4,-64(%4,%0,4) \n\t" // 4 * y + "vmovups %%xmm5,-48(%4,%0,4) \n\t" // 4 * y + "addq $16, %2 \n\t" + "vmovups %%xmm6,-32(%4,%0,4) \n\t" // 4 * y + "vmovups %%xmm7,-16(%4,%0,4) \n\t" // 4 * y "subq $16, %1 \n\t" "jnz 1b \n\t" @@ -184,15 +184,15 @@ static void sgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO : "+r" (i), // 0 - "+r" (n) // 1 + "+r" (n), // 1 + "+r" (lda4) // 2 : - "r" (x), // 2 - "r" (y), // 3 - "r" (ap[0]), // 4 - "r" (ap[1]), // 5 - "r" (ap[2]), // 6 - "r" (ap[3]), // 7 - "r" (lda4), // 8 + "r" (x), // 3 + "r" (y), // 4 + "r" (ap[0]), // 5 + "r" (ap[1]), // 6 + "r" (ap[2]), // 7 + "r" (ap[3]), // 8 "r" (alpha) // 9 : "cc", "%xmm0", "%xmm1",