diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index 9f124c97f..c8ccae1ea 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -69,24 +69,24 @@ ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) CGEMM3MKERNEL = zgemm3m_kernel_8x4_barcelona.S ZGEMM3MKERNEL = zgemm3m_kernel_4x4_barcelona.S -STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +STRSMKERNEL_LN = strsm_kernel_LN_bulldozer.c +STRSMKERNEL_LT = strsm_kernel_LT_bulldozer.c +STRSMKERNEL_RN = strsm_kernel_RN_bulldozer.c +STRSMKERNEL_RT = strsm_kernel_RT_bulldozer.c -DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LN = dtrsm_kernel_LN_bulldozer.c DTRSMKERNEL_LT = dtrsm_kernel_LT_8x2_bulldozer.S DTRSMKERNEL_RN = dtrsm_kernel_RN_8x2_bulldozer.S -DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +DTRSMKERNEL_RT = dtrsm_kernel_RT_bulldozer.c -CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +CTRSMKERNEL_LN = ctrsm_kernel_LN_bulldozer.c +CTRSMKERNEL_LT = ctrsm_kernel_LT_bulldozer.c +CTRSMKERNEL_RN = ctrsm_kernel_RN_bulldozer.c +CTRSMKERNEL_RT = ctrsm_kernel_RT_bulldozer.c -ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +ZTRSMKERNEL_LN = ztrsm_kernel_LN_bulldozer.c +ZTRSMKERNEL_LT = ztrsm_kernel_LT_bulldozer.c +ZTRSMKERNEL_RN = ztrsm_kernel_RN_bulldozer.c +ZTRSMKERNEL_RT = ztrsm_kernel_RT_bulldozer.c diff --git a/kernel/x86_64/KERNEL.HASWELL b/kernel/x86_64/KERNEL.HASWELL index a4686debb..f2e1374d3 100644 --- a/kernel/x86_64/KERNEL.HASWELL +++ b/kernel/x86_64/KERNEL.HASWELL @@ -80,7 +80,7 @@ STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +DTRSMKERNEL_RN = dtrsm_kernel_RN_haswell.c DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c diff --git a/kernel/x86_64/KERNEL.PILEDRIVER b/kernel/x86_64/KERNEL.PILEDRIVER index 5d3c7a2af..6c726a6e9 100644 --- a/kernel/x86_64/KERNEL.PILEDRIVER +++ b/kernel/x86_64/KERNEL.PILEDRIVER @@ -66,25 +66,23 @@ ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) CGEMM3MKERNEL = zgemm3m_kernel_8x4_barcelona.S ZGEMM3MKERNEL = zgemm3m_kernel_4x4_barcelona.S -STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +STRSMKERNEL_LN = strsm_kernel_LN_bulldozer.c +STRSMKERNEL_LT = strsm_kernel_LT_bulldozer.c +STRSMKERNEL_RN = strsm_kernel_RN_bulldozer.c +STRSMKERNEL_RT = strsm_kernel_RT_bulldozer.c - -DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LN = dtrsm_kernel_LN_bulldozer.c DTRSMKERNEL_LT = dtrsm_kernel_LT_8x2_bulldozer.S DTRSMKERNEL_RN = dtrsm_kernel_RN_8x2_bulldozer.S -DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +DTRSMKERNEL_RT = dtrsm_kernel_RT_bulldozer.c -CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c - -ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +CTRSMKERNEL_LN = ctrsm_kernel_LN_bulldozer.c +CTRSMKERNEL_LT = ctrsm_kernel_LT_bulldozer.c +CTRSMKERNEL_RN = ctrsm_kernel_RN_bulldozer.c +CTRSMKERNEL_RT = ctrsm_kernel_RT_bulldozer.c +ZTRSMKERNEL_LN = ztrsm_kernel_LN_bulldozer.c +ZTRSMKERNEL_LT = ztrsm_kernel_LT_bulldozer.c +ZTRSMKERNEL_RN = ztrsm_kernel_RN_bulldozer.c +ZTRSMKERNEL_RT = ztrsm_kernel_RT_bulldozer.c diff --git a/kernel/x86_64/KERNEL.STEAMROLLER b/kernel/x86_64/KERNEL.STEAMROLLER index 51e6d616a..5291cc624 100644 --- a/kernel/x86_64/KERNEL.STEAMROLLER +++ b/kernel/x86_64/KERNEL.STEAMROLLER @@ -72,25 +72,23 @@ ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) CGEMM3MKERNEL = zgemm3m_kernel_8x4_barcelona.S ZGEMM3MKERNEL = zgemm3m_kernel_4x4_barcelona.S -STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +STRSMKERNEL_LN = strsm_kernel_LN_bulldozer.c +STRSMKERNEL_LT = strsm_kernel_LT_bulldozer.c +STRSMKERNEL_RN = strsm_kernel_RN_bulldozer.c +STRSMKERNEL_RT = strsm_kernel_RT_bulldozer.c - -DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LN = dtrsm_kernel_LN_bulldozer.c DTRSMKERNEL_LT = dtrsm_kernel_LT_8x2_bulldozer.S DTRSMKERNEL_RN = dtrsm_kernel_RN_8x2_bulldozer.S -DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +DTRSMKERNEL_RT = dtrsm_kernel_RT_bulldozer.c -CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c - -ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +CTRSMKERNEL_LN = ctrsm_kernel_LN_bulldozer.c +CTRSMKERNEL_LT = ctrsm_kernel_LT_bulldozer.c +CTRSMKERNEL_RN = ctrsm_kernel_RN_bulldozer.c +CTRSMKERNEL_RT = ctrsm_kernel_RT_bulldozer.c +ZTRSMKERNEL_LN = ztrsm_kernel_LN_bulldozer.c +ZTRSMKERNEL_LT = ztrsm_kernel_LT_bulldozer.c +ZTRSMKERNEL_RN = ztrsm_kernel_RN_bulldozer.c +ZTRSMKERNEL_RT = ztrsm_kernel_RT_bulldozer.c diff --git a/kernel/x86_64/ctrsm_kernel_LN_bulldozer.c b/kernel/x86_64/ctrsm_kernel_LN_bulldozer.c new file mode 100644 index 000000000..dc93842d6 --- /dev/null +++ b/kernel/x86_64/ctrsm_kernel_LN_bulldozer.c @@ -0,0 +1,472 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_L +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + + +#ifndef CONJ + +static void ctrsm_LN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void ctrsm_LN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc*2 ; + BLASLONG n1 = n * 4; + BLASLONG i=0; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorps %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorps %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorps %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorps %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorps %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorps %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorps %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorps %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 3f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " vbroadcastss (%3,%1,4), %%xmm0 \n\t" // b0 real, b0 real + " vbroadcastss 4(%3,%1,4), %%xmm1 \n\t" // b0 imag, b0 imag + " vbroadcastss 8(%3,%1,4), %%xmm2 \n\t" // b1 real, b1 real + " vbroadcastss 12(%3,%1,4), %%xmm3 \n\t" // b1 imag, b1 imag + + " vmovups (%2,%1,8), %%xmm4 \n\t" // a0 real , a0 imag + " vmovups 16(%2,%1,8), %%xmm5 \n\t" // a1 real , a1 imag + + " vfnmaddps %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm4 , %%xmm9 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddps %%xmm10, %%xmm0 , %%xmm5 , %%xmm10 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm11, %%xmm1 , %%xmm5 , %%xmm11 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddps %%xmm12, %%xmm2 , %%xmm4 , %%xmm12 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm13, %%xmm3 , %%xmm4 , %%xmm13 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddps %%xmm14, %%xmm2 , %%xmm5 , %%xmm14 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm15, %%xmm3 , %%xmm5 , %%xmm15 \n\t" // a_real * b_imag , a_imag * b_imag + + " addq $4, %1 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + + "2: \n\t" + + " vshufps $0xb1 , %%xmm9 , %%xmm9, %%xmm9 \n\t" + " vshufps $0xb1 , %%xmm11 , %%xmm11 , %%xmm11 \n\t" + " vshufps $0xb1 , %%xmm13 , %%xmm13 , %%xmm13 \n\t" + " vshufps $0xb1 , %%xmm15 , %%xmm15 , %%xmm15 \n\t" + + " vaddsubps %%xmm8 , %%xmm9 , %%xmm8 \n\t" + " vaddsubps %%xmm10, %%xmm11, %%xmm10 \n\t" + " vaddsubps %%xmm12, %%xmm13, %%xmm12 \n\t" + " vaddsubps %%xmm14, %%xmm15, %%xmm14 \n\t" + + " vxorps %%xmm7 , %%xmm7 , %%xmm7 \n\t" + + " vaddsubps %%xmm8 , %%xmm7 , %%xmm8 \n\t" + " vaddsubps %%xmm10, %%xmm7 , %%xmm10 \n\t" + " vaddsubps %%xmm12, %%xmm7 , %%xmm12 \n\t" + " vaddsubps %%xmm14, %%xmm7 , %%xmm14 \n\t" + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + + " vaddps %%xmm0 , %%xmm8 , %%xmm8 \n\t" + " vaddps %%xmm1 , %%xmm10, %%xmm10 \n\t" + " vaddps %%xmm4 , %%xmm12, %%xmm12 \n\t" + " vaddps %%xmm5 , %%xmm14, %%xmm14 \n\t" + + " vmovups %%xmm8 , (%4) \n\t" + " vmovups %%xmm10 ,16(%4) \n\t" + + " vmovups %%xmm12 , (%5) \n\t" + " vmovups %%xmm14 ,16(%5) \n\t" + + "3: \n\t" + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +#endif + + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + a += (m - 1) * m; + b += (m - 1) * n; + + for (i = m - 1; i >= 0; i--) { + + aa = *(a + i); + + for (j = 0; j < n; j ++) { + bb = *(c + i + j * ldc); + bb *= aa; + *b = bb; + *(c + i + j * ldc) = bb; + b ++; + + for (k = 0; k < i; k ++){ + *(c + k + j * ldc) -= bb * *(a + k); + } + + } + a -= m; + b -= 2 * n; + } + +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + a += (m - 1) * m * 2; + b += (m - 1) * n * 2; + + for (i = m - 1; i >= 0; i--) { + + aa1 = *(a + i * 2 + 0); + aa2 = *(a + i * 2 + 1); + + for (j = 0; j < n; j ++) { + bb1 = *(c + i * 2 + 0 + j * ldc); + bb2 = *(c + i * 2 + 1 + j * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = aa1 * bb2 - aa2 * bb1; +#endif + + + *(b + 0) = cc1; + *(b + 1) = cc2; + *(c + i * 2 + 0 + j * ldc) = cc1; + *(c + i * 2 + 1 + j * ldc) = cc2; + b += 2; + + for (k = 0; k < i; k ++){ +#ifndef CONJ + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#else + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= - cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#endif + } + + } + a -= m * 2; + b -= 4 * n; + } + +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + BLASLONG i, j; + FLOAT *aa, *cc; + BLASLONG kk; + +#if 0 + fprintf(stderr, "TRSM KERNEL LN : m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + j = (n >> GEMM_UNROLL_N_SHIFT); + + while (j > 0) { + + kk = m + offset; + + if (m & (GEMM_UNROLL_M - 1)) { + for (i = 1; i < GEMM_UNROLL_M; i *= 2){ + if (m & i) { + aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE; + cc = c + ((m & ~(i - 1)) - i) * COMPSIZE; + + if (k - kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(i, GEMM_UNROLL_N, + aa + (kk - i) * i * COMPSIZE, + b + (kk - i) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + kk -= i; + } + } + } + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE; + cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE; + + do { + +#ifdef CONJ + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +#else + + ctrsm_LN_solve_opt(k-kk, aa + GEMM_UNROLL_M * kk * COMPSIZE, b + GEMM_UNROLL_N * kk * COMPSIZE, cc, ldc, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE); + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +#endif + + aa -= GEMM_UNROLL_M * k * COMPSIZE; + cc -= GEMM_UNROLL_M * COMPSIZE; + kk -= GEMM_UNROLL_M; + i --; + } while (i > 0); + } + + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + kk = m + offset; + + if (m & (GEMM_UNROLL_M - 1)) { + for (i = 1; i < GEMM_UNROLL_M; i *= 2){ + if (m & i) { + aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE; + cc = c + ((m & ~(i - 1)) - i) * COMPSIZE; + + if (k - kk > 0) { + GEMM_KERNEL(i, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, ldc); + } + + solve(i, j, + aa + (kk - i) * i * COMPSIZE, + b + (kk - i) * j * COMPSIZE, + cc, ldc); + + kk -= i; + } + } + } + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE; + cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE; + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * j * COMPSIZE, + cc, ldc); + + aa -= GEMM_UNROLL_M * k * COMPSIZE; + cc -= GEMM_UNROLL_M * COMPSIZE; + kk -= GEMM_UNROLL_M; + i --; + } while (i > 0); + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/x86_64/ctrsm_kernel_LT_bulldozer.c b/kernel/x86_64/ctrsm_kernel_LT_bulldozer.c new file mode 100644 index 000000000..f56c24684 --- /dev/null +++ b/kernel/x86_64/ctrsm_kernel_LT_bulldozer.c @@ -0,0 +1,455 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_L +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + + +#ifndef CONJ + +static void ctrsm_LT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void ctrsm_LT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc*2 ; + BLASLONG n1 = n * 4; + BLASLONG i=0; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorps %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorps %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorps %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorps %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorps %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorps %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorps %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorps %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 3f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " vbroadcastss (%3,%1,4), %%xmm0 \n\t" // b0 real, b0 real + " vbroadcastss 4(%3,%1,4), %%xmm1 \n\t" // b0 imag, b0 imag + " vbroadcastss 8(%3,%1,4), %%xmm2 \n\t" // b1 real, b1 real + " vbroadcastss 12(%3,%1,4), %%xmm3 \n\t" // b1 imag, b1 imag + + " vmovups (%2,%1,8), %%xmm4 \n\t" // a0 real , a0 imag + " vmovups 16(%2,%1,8), %%xmm5 \n\t" // a1 real , a1 imag + + " vfnmaddps %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm4 , %%xmm9 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddps %%xmm10, %%xmm0 , %%xmm5 , %%xmm10 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm11, %%xmm1 , %%xmm5 , %%xmm11 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddps %%xmm12, %%xmm2 , %%xmm4 , %%xmm12 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm13, %%xmm3 , %%xmm4 , %%xmm13 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddps %%xmm14, %%xmm2 , %%xmm5 , %%xmm14 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm15, %%xmm3 , %%xmm5 , %%xmm15 \n\t" // a_real * b_imag , a_imag * b_imag + + " addq $4, %1 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + + "2: \n\t" + + " vshufps $0xb1 , %%xmm9 , %%xmm9, %%xmm9 \n\t" + " vshufps $0xb1 , %%xmm11 , %%xmm11 , %%xmm11 \n\t" + " vshufps $0xb1 , %%xmm13 , %%xmm13 , %%xmm13 \n\t" + " vshufps $0xb1 , %%xmm15 , %%xmm15 , %%xmm15 \n\t" + + " vaddsubps %%xmm8 , %%xmm9 , %%xmm8 \n\t" + " vaddsubps %%xmm10, %%xmm11, %%xmm10 \n\t" + " vaddsubps %%xmm12, %%xmm13, %%xmm12 \n\t" + " vaddsubps %%xmm14, %%xmm15, %%xmm14 \n\t" + + " vxorps %%xmm7 , %%xmm7 , %%xmm7 \n\t" + + " vaddsubps %%xmm8 , %%xmm7 , %%xmm8 \n\t" + " vaddsubps %%xmm10, %%xmm7 , %%xmm10 \n\t" + " vaddsubps %%xmm12, %%xmm7 , %%xmm12 \n\t" + " vaddsubps %%xmm14, %%xmm7 , %%xmm14 \n\t" + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + + " vaddps %%xmm0 , %%xmm8 , %%xmm8 \n\t" + " vaddps %%xmm1 , %%xmm10, %%xmm10 \n\t" + " vaddps %%xmm4 , %%xmm12, %%xmm12 \n\t" + " vaddps %%xmm5 , %%xmm14, %%xmm14 \n\t" + + " vmovups %%xmm8 , (%4) \n\t" + " vmovups %%xmm10 ,16(%4) \n\t" + + " vmovups %%xmm12 , (%5) \n\t" + " vmovups %%xmm14 ,16(%5) \n\t" + + "3: \n\t" + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +#endif + + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + for (i = 0; i < m; i++) { + + aa = *(a + i); + + for (j = 0; j < n; j ++) { + bb = *(c + i + j * ldc); + bb *= aa; + *b = bb; + *(c + i + j * ldc) = bb; + b ++; + + for (k = i + 1; k < m; k ++){ + *(c + k + j * ldc) -= bb * *(a + k); + } + + } + a += m; + } +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + for (i = 0; i < m; i++) { + + aa1 = *(a + i * 2 + 0); + aa2 = *(a + i * 2 + 1); + + for (j = 0; j < n; j ++) { + bb1 = *(c + i * 2 + 0 + j * ldc); + bb2 = *(c + i * 2 + 1 + j * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = aa1 * bb2 - aa2 * bb1; +#endif + + *(b + 0) = cc1; + *(b + 1) = cc2; + *(c + i * 2 + 0 + j * ldc) = cc1; + *(c + i * 2 + 1 + j * ldc) = cc2; + b += 2; + + for (k = i + 1; k < m; k ++){ +#ifndef CONJ + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#else + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= -cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#endif + } + + } + a += m * 2; + } +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + FLOAT *aa, *cc; + BLASLONG kk; + BLASLONG i, j, jj; + +#if 0 + fprintf(stderr, "TRSM KERNEL LT : m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + jj = 0; + + j = (n >> GEMM_UNROLL_N_SHIFT); + + while (j > 0) { + + kk = offset; + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + +#ifdef CONJ + + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +#else + + ctrsm_LT_solve_opt(kk, aa, b, cc, ldc, aa + kk * GEMM_UNROLL_M * COMPSIZE, b + kk * GEMM_UNROLL_N * COMPSIZE); + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +#endif + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + kk += GEMM_UNROLL_M; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + solve(i, GEMM_UNROLL_N, + aa + kk * i * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + kk += i; + } + i >>= 1; + } + } + + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + jj += GEMM_UNROLL_M; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + kk = offset; + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + kk += GEMM_UNROLL_M; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(i, j, + aa + kk * i * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + kk += i; + } + i >>= 1; + } + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/x86_64/ctrsm_kernel_RN_bulldozer.c b/kernel/x86_64/ctrsm_kernel_RN_bulldozer.c new file mode 100644 index 000000000..700867b24 --- /dev/null +++ b/kernel/x86_64/ctrsm_kernel_RN_bulldozer.c @@ -0,0 +1,454 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_R +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + + +#ifndef CONJ + +static void ctrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void ctrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc*2 ; + BLASLONG n1 = n * 4; + BLASLONG i=0; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorps %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorps %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorps %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorps %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorps %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorps %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorps %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorps %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 3f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " vbroadcastss (%3,%1,4), %%xmm0 \n\t" // b0 real, b0 real + " vbroadcastss 4(%3,%1,4), %%xmm1 \n\t" // b0 imag, b0 imag + " vbroadcastss 8(%3,%1,4), %%xmm2 \n\t" // b1 real, b1 real + " vbroadcastss 12(%3,%1,4), %%xmm3 \n\t" // b1 imag, b1 imag + + " vmovups (%2,%1,8), %%xmm4 \n\t" // a0 real , a0 imag + " vmovups 16(%2,%1,8), %%xmm5 \n\t" // a1 real , a1 imag + + " vfnmaddps %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm4 , %%xmm9 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddps %%xmm10, %%xmm0 , %%xmm5 , %%xmm10 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm11, %%xmm1 , %%xmm5 , %%xmm11 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddps %%xmm12, %%xmm2 , %%xmm4 , %%xmm12 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm13, %%xmm3 , %%xmm4 , %%xmm13 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddps %%xmm14, %%xmm2 , %%xmm5 , %%xmm14 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm15, %%xmm3 , %%xmm5 , %%xmm15 \n\t" // a_real * b_imag , a_imag * b_imag + + " addq $4, %1 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + + "2: \n\t" + + " vshufps $0xb1 , %%xmm9 , %%xmm9, %%xmm9 \n\t" + " vshufps $0xb1 , %%xmm11 , %%xmm11 , %%xmm11 \n\t" + " vshufps $0xb1 , %%xmm13 , %%xmm13 , %%xmm13 \n\t" + " vshufps $0xb1 , %%xmm15 , %%xmm15 , %%xmm15 \n\t" + + " vaddsubps %%xmm8 , %%xmm9 , %%xmm8 \n\t" + " vaddsubps %%xmm10, %%xmm11, %%xmm10 \n\t" + " vaddsubps %%xmm12, %%xmm13, %%xmm12 \n\t" + " vaddsubps %%xmm14, %%xmm15, %%xmm14 \n\t" + + " vxorps %%xmm7 , %%xmm7 , %%xmm7 \n\t" + + " vaddsubps %%xmm8 , %%xmm7 , %%xmm8 \n\t" + " vaddsubps %%xmm10, %%xmm7 , %%xmm10 \n\t" + " vaddsubps %%xmm12, %%xmm7 , %%xmm12 \n\t" + " vaddsubps %%xmm14, %%xmm7 , %%xmm14 \n\t" + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + + " vaddps %%xmm0 , %%xmm8 , %%xmm8 \n\t" + " vaddps %%xmm1 , %%xmm10, %%xmm10 \n\t" + " vaddps %%xmm4 , %%xmm12, %%xmm12 \n\t" + " vaddps %%xmm5 , %%xmm14, %%xmm14 \n\t" + + " vmovups %%xmm8 , (%4) \n\t" + " vmovups %%xmm10 ,16(%4) \n\t" + + " vmovups %%xmm12 , (%5) \n\t" + " vmovups %%xmm14 ,16(%5) \n\t" + + "3: \n\t" + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +#endif + + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + for (i = 0; i < n; i++) { + + bb = *(b + i); + + for (j = 0; j < m; j ++) { + aa = *(c + j + i * ldc); + aa *= bb; + *a = aa; + *(c + j + i * ldc) = aa; + a ++; + + for (k = i + 1; k < n; k ++){ + *(c + j + k * ldc) -= aa * *(b + k); + } + + } + b += n; + } +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + for (i = 0; i < n; i++) { + + bb1 = *(b + i * 2 + 0); + bb2 = *(b + i * 2 + 1); + + for (j = 0; j < m; j ++) { + aa1 = *(c + j * 2 + 0 + i * ldc); + aa2 = *(c + j * 2 + 1 + i * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = -aa1 * bb2 + aa2 * bb1; +#endif + + *(a + 0) = cc1; + *(a + 1) = cc2; + *(c + j * 2 + 0 + i * ldc) = cc1; + *(c + j * 2 + 1 + i * ldc) = cc2; + a += 2; + + for (k = i + 1; k < n; k ++){ +#ifndef CONJ + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#else + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= - cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#endif + } + + } + b += n * 2; + } +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + FLOAT *aa, *cc; + BLASLONG kk; + BLASLONG i, j, jj; + +#if 0 + fprintf(stderr, "TRSM RN KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + jj = 0; + j = (n >> GEMM_UNROLL_N_SHIFT); + kk = -offset; + + while (j > 0) { + + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + if (i > 0) { + do { + +#ifndef CONJ + + ctrsm_RN_solve_opt(kk, aa, b, cc, ldc, aa + kk * GEMM_UNROLL_M * COMPSIZE, b + kk * GEMM_UNROLL_N * COMPSIZE); + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +#else + + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +#endif + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + solve(i, GEMM_UNROLL_N, + aa + kk * i * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } + } + + kk += GEMM_UNROLL_N; + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + jj += GEMM_UNROLL_M; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(i, j, + aa + kk * i * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + kk += j; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/x86_64/ctrsm_kernel_RT_bulldozer.c b/kernel/x86_64/ctrsm_kernel_RT_bulldozer.c new file mode 100644 index 000000000..b00b9701f --- /dev/null +++ b/kernel/x86_64/ctrsm_kernel_RT_bulldozer.c @@ -0,0 +1,482 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_R +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + + +#ifndef CONJ + +static void ctrsm_RT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void ctrsm_RT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc*2 ; + BLASLONG n1 = n * 4; + BLASLONG i=0; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorps %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorps %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorps %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorps %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorps %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorps %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorps %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorps %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 3f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " vbroadcastss (%3,%1,4), %%xmm0 \n\t" // b0 real, b0 real + " vbroadcastss 4(%3,%1,4), %%xmm1 \n\t" // b0 imag, b0 imag + " vbroadcastss 8(%3,%1,4), %%xmm2 \n\t" // b1 real, b1 real + " vbroadcastss 12(%3,%1,4), %%xmm3 \n\t" // b1 imag, b1 imag + + " vmovups (%2,%1,8), %%xmm4 \n\t" // a0 real , a0 imag + " vmovups 16(%2,%1,8), %%xmm5 \n\t" // a1 real , a1 imag + + " vfnmaddps %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm4 , %%xmm9 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddps %%xmm10, %%xmm0 , %%xmm5 , %%xmm10 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm11, %%xmm1 , %%xmm5 , %%xmm11 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddps %%xmm12, %%xmm2 , %%xmm4 , %%xmm12 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm13, %%xmm3 , %%xmm4 , %%xmm13 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddps %%xmm14, %%xmm2 , %%xmm5 , %%xmm14 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddps %%xmm15, %%xmm3 , %%xmm5 , %%xmm15 \n\t" // a_real * b_imag , a_imag * b_imag + + " addq $4, %1 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + + "2: \n\t" + + " vshufps $0xb1 , %%xmm9 , %%xmm9, %%xmm9 \n\t" + " vshufps $0xb1 , %%xmm11 , %%xmm11 , %%xmm11 \n\t" + " vshufps $0xb1 , %%xmm13 , %%xmm13 , %%xmm13 \n\t" + " vshufps $0xb1 , %%xmm15 , %%xmm15 , %%xmm15 \n\t" + + " vaddsubps %%xmm8 , %%xmm9 , %%xmm8 \n\t" + " vaddsubps %%xmm10, %%xmm11, %%xmm10 \n\t" + " vaddsubps %%xmm12, %%xmm13, %%xmm12 \n\t" + " vaddsubps %%xmm14, %%xmm15, %%xmm14 \n\t" + + " vxorps %%xmm7 , %%xmm7 , %%xmm7 \n\t" + + " vaddsubps %%xmm8 , %%xmm7 , %%xmm8 \n\t" + " vaddsubps %%xmm10, %%xmm7 , %%xmm10 \n\t" + " vaddsubps %%xmm12, %%xmm7 , %%xmm12 \n\t" + " vaddsubps %%xmm14, %%xmm7 , %%xmm14 \n\t" + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + + " vaddps %%xmm0 , %%xmm8 , %%xmm8 \n\t" + " vaddps %%xmm1 , %%xmm10, %%xmm10 \n\t" + " vaddps %%xmm4 , %%xmm12, %%xmm12 \n\t" + " vaddps %%xmm5 , %%xmm14, %%xmm14 \n\t" + + " vmovups %%xmm8 , (%4) \n\t" + " vmovups %%xmm10 ,16(%4) \n\t" + + " vmovups %%xmm12 , (%5) \n\t" + " vmovups %%xmm14 ,16(%5) \n\t" + + "3: \n\t" + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +#endif + + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + a += (n - 1) * m; + b += (n - 1) * n; + + for (i = n - 1; i >= 0; i--) { + + bb = *(b + i); + + for (j = 0; j < m; j ++) { + aa = *(c + j + i * ldc); + aa *= bb; + *a = aa; + *(c + j + i * ldc) = aa; + a ++; + + for (k = 0; k < i; k ++){ + *(c + j + k * ldc) -= aa * *(b + k); + } + + } + b -= n; + a -= 2 * m; + } + +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + a += (n - 1) * m * 2; + b += (n - 1) * n * 2; + + for (i = n - 1; i >= 0; i--) { + + bb1 = *(b + i * 2 + 0); + bb2 = *(b + i * 2 + 1); + + for (j = 0; j < m; j ++) { + + aa1 = *(c + j * 2 + 0 + i * ldc); + aa2 = *(c + j * 2 + 1 + i * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = - aa1 * bb2 + aa2 * bb1; +#endif + + *(a + 0) = cc1; + *(a + 1) = cc2; + + *(c + j * 2 + 0 + i * ldc) = cc1; + *(c + j * 2 + 1 + i * ldc) = cc2; + a += 2; + + for (k = 0; k < i; k ++){ +#ifndef CONJ + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#else + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= -cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#endif + } + + } + b -= n * 2; + a -= 4 * m; + } + +} + +#endif + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + BLASLONG i, j; + FLOAT *aa, *cc; + BLASLONG kk; + +#if 0 + fprintf(stderr, "TRSM RT KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + kk = n - offset; + c += n * ldc * COMPSIZE; + b += n * k * COMPSIZE; + + if (n & (GEMM_UNROLL_N - 1)) { + + j = 1; + while (j < GEMM_UNROLL_N) { + if (n & j) { + + aa = a; + b -= j * k * COMPSIZE; + c -= j * ldc* COMPSIZE; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + (kk - j) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - j) * j * COMPSIZE, + cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + do { + if (m & i) { + + if (k - kk > 0) { + GEMM_KERNEL(i, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, ldc); + } + + solve(i, j, + aa + (kk - j) * i * COMPSIZE, + b + (kk - j) * j * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + + } + i >>= 1; + } while (i > 0); + } + kk -= j; + } + j <<= 1; + } + } + + j = (n >> GEMM_UNROLL_N_SHIFT); + + if (j > 0) { + + do { + aa = a; + b -= GEMM_UNROLL_N * k * COMPSIZE; + c -= GEMM_UNROLL_N * ldc * COMPSIZE; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + do { + +#ifndef CONJ + + ctrsm_RT_solve_opt(k-kk, aa + GEMM_UNROLL_M * kk * COMPSIZE, b + GEMM_UNROLL_N * kk * COMPSIZE, cc, ldc, + aa + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_M * COMPSIZE, b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE); + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + +#else + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + +#endif + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + do { + if (m & i) { + if (k - kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(i, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_N) * i * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } while (i > 0); + } + + kk -= GEMM_UNROLL_N; + j --; + } while (j > 0); + } + + return 0; +} + + diff --git a/kernel/x86_64/dtrsm_kernel_LN_bulldozer.c b/kernel/x86_64/dtrsm_kernel_LN_bulldozer.c new file mode 100644 index 000000000..efd8a4972 --- /dev/null +++ b/kernel/x86_64/dtrsm_kernel_LN_bulldozer.c @@ -0,0 +1,697 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_L +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + +static void dtrsm_LN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void dtrsm_LN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc ; + BLASLONG n1 = n * 8; + BLASLONG i=0; + + bs += 14; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorpd %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorpd %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorpd %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorpd %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorpd %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorpd %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorpd %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorpd %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 2f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " prefetcht0 384(%2,%1,8) \n\t" + " prefetcht0 384(%3,%1,8) \n\t" + " vmovddup (%3,%1,2), %%xmm0 \n\t" // read b + " vmovups (%2,%1,8), %%xmm4 \n\t" + " vmovddup 8(%3,%1,2), %%xmm1 \n\t" + " vmovups 16(%2,%1,8), %%xmm5 \n\t" + " vmovups 32(%2,%1,8), %%xmm6 \n\t" + " vmovups 48(%2,%1,8), %%xmm7 \n\t" + + " vfmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" + " vfmaddpd %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + " vfmaddpd %%xmm9 , %%xmm0 , %%xmm5 , %%xmm9 \n\t" + " vfmaddpd %%xmm13, %%xmm1 , %%xmm5 , %%xmm13 \n\t" + " vfmaddpd %%xmm10, %%xmm0 , %%xmm6 , %%xmm10 \n\t" + " vfmaddpd %%xmm14, %%xmm1 , %%xmm6 , %%xmm14 \n\t" + " addq $8, %1 \n\t" + " vfmaddpd %%xmm11, %%xmm0 , %%xmm7 , %%xmm11 \n\t" + " vfmaddpd %%xmm15, %%xmm1 , %%xmm7 , %%xmm15 \n\t" + " cmpq %1, %0 \n\t" + + " jz 2f \n\t" + + " prefetcht0 384(%2,%1,8) \n\t" + " vmovddup (%3,%1,2), %%xmm0 \n\t" // read b + " vmovups (%2,%1,8), %%xmm4 \n\t" + " vmovddup 8(%3,%1,2), %%xmm1 \n\t" + " vmovups 16(%2,%1,8), %%xmm5 \n\t" + " vmovups 32(%2,%1,8), %%xmm6 \n\t" + " vmovups 48(%2,%1,8), %%xmm7 \n\t" + + " vfmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" + " vfmaddpd %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + " vfmaddpd %%xmm9 , %%xmm0 , %%xmm5 , %%xmm9 \n\t" + " vfmaddpd %%xmm13, %%xmm1 , %%xmm5 , %%xmm13 \n\t" + " vfmaddpd %%xmm10, %%xmm0 , %%xmm6 , %%xmm10 \n\t" + " vfmaddpd %%xmm14, %%xmm1 , %%xmm6 , %%xmm14 \n\t" + " addq $8, %1 \n\t" + " vfmaddpd %%xmm11, %%xmm0 , %%xmm7 , %%xmm11 \n\t" + " vfmaddpd %%xmm15, %%xmm1 , %%xmm7 , %%xmm15 \n\t" + " cmpq %1, %0 \n\t" + + " jz 2f \n\t" + + " prefetcht0 384(%2,%1,8) \n\t" + " vmovddup (%3,%1,2), %%xmm0 \n\t" // read b + " vmovups (%2,%1,8), %%xmm4 \n\t" + " vmovddup 8(%3,%1,2), %%xmm1 \n\t" + " vmovups 16(%2,%1,8), %%xmm5 \n\t" + " vmovups 32(%2,%1,8), %%xmm6 \n\t" + " vmovups 48(%2,%1,8), %%xmm7 \n\t" + + " vfmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" + " vfmaddpd %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + " vfmaddpd %%xmm9 , %%xmm0 , %%xmm5 , %%xmm9 \n\t" + " vfmaddpd %%xmm13, %%xmm1 , %%xmm5 , %%xmm13 \n\t" + " vfmaddpd %%xmm10, %%xmm0 , %%xmm6 , %%xmm10 \n\t" + " vfmaddpd %%xmm14, %%xmm1 , %%xmm6 , %%xmm14 \n\t" + " addq $8, %1 \n\t" + " vfmaddpd %%xmm11, %%xmm0 , %%xmm7 , %%xmm11 \n\t" + " vfmaddpd %%xmm15, %%xmm1 , %%xmm7 , %%xmm15 \n\t" + " cmpq %1, %0 \n\t" + + " jz 2f \n\t" + + " prefetcht0 384(%2,%1,8) \n\t" + " vmovddup (%3,%1,2), %%xmm0 \n\t" // read b + " vmovddup 8(%3,%1,2), %%xmm1 \n\t" + " vmovups (%2,%1,8), %%xmm4 \n\t" + " vmovups 16(%2,%1,8), %%xmm5 \n\t" + " vmovups 32(%2,%1,8), %%xmm6 \n\t" + " vmovups 48(%2,%1,8), %%xmm7 \n\t" + + " vfmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" + " vfmaddpd %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + " vfmaddpd %%xmm9 , %%xmm0 , %%xmm5 , %%xmm9 \n\t" + " vfmaddpd %%xmm13, %%xmm1 , %%xmm5 , %%xmm13 \n\t" + " vfmaddpd %%xmm10, %%xmm0 , %%xmm6 , %%xmm10 \n\t" + " vfmaddpd %%xmm14, %%xmm1 , %%xmm6 , %%xmm14 \n\t" + " addq $8, %1 \n\t" + " vfmaddpd %%xmm11, %%xmm0 , %%xmm7 , %%xmm11 \n\t" + " vfmaddpd %%xmm15, %%xmm1 , %%xmm7 , %%xmm15 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + "2: \n\t" + + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + " vmovups 32(%4) , %%xmm2 \n\t" + " vmovups 48(%4) , %%xmm3 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + " vmovups 32(%5) , %%xmm6 \n\t" + " vmovups 48(%5) , %%xmm7 \n\t" + + " vsubpd %%xmm8 , %%xmm0 , %%xmm8 \n\t" + " vsubpd %%xmm9 , %%xmm1 , %%xmm9 \n\t" + " vsubpd %%xmm10, %%xmm2 , %%xmm10 \n\t" + " vsubpd %%xmm11, %%xmm3 , %%xmm11 \n\t" + + " vsubpd %%xmm12, %%xmm4 , %%xmm12 \n\t" + " vsubpd %%xmm13, %%xmm5 , %%xmm13 \n\t" + " vsubpd %%xmm14, %%xmm6 , %%xmm14 \n\t" + " vsubpd %%xmm15, %%xmm7 , %%xmm15 \n\t" + + "3: \n\t" + + " movq $56, %1 \n\t" // i = 7 + " xorq %0, %0 \n\t" // pointer for bs + + " vmovddup 56(%6, %1, 8) , %%xmm0 \n\t" // read aa[i] + " vunpckhpd %%xmm11 , %%xmm11, %%xmm1 \n\t" // extract bb0 + " vunpckhpd %%xmm15 , %%xmm15, %%xmm2 \n\t" // extract bb1 + " vmulpd %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulpd %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb0 * aa + " vmovsd %%xmm1 , 56(%4) \n\t" // c[i] = bb0 * aa + " vmovsd %%xmm2 , 56(%5) \n\t" // c[i] = bb1 * aa + " vmovsd %%xmm1 , (%7 , %0, 8) \n\t" // b[0] = bb0 * aa + " vmovsd %%xmm2 , 8(%7 , %0, 8) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6, %1, 8) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6, %1, 8) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6, %1, 8) , %%xmm6 \n\t" // read a[k] + " vmovsd 48(%6, %1, 8) , %%xmm7 \n\t" // read a[k] + " vfnmaddpd %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddpd %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddpd %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddpd %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddpd %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddpd %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddsd %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddsd %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " subq $8, %1 \n\t" // i = 6 + " subq $2, %0 \n\t" // b-= n + + " vmovddup 48(%6, %1, 8) , %%xmm0 \n\t" // read aa[i] + " vunpcklpd %%xmm11 , %%xmm11, %%xmm1 \n\t" // extract bb0 + " vunpcklpd %%xmm15 , %%xmm15, %%xmm2 \n\t" // extract bb1 + " vmulpd %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulpd %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovsd %%xmm1 , 48(%4) \n\t" // c[i] = bb0 * aa + " vmovsd %%xmm2 , 48(%5) \n\t" // c[i] = bb1 * aa + " vmovsd %%xmm1 , (%7 , %0, 8) \n\t" // b[0] = bb0 * aa + " vmovsd %%xmm2 , 8(%7 , %0, 8) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6, %1, 8) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6, %1, 8) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6, %1, 8) , %%xmm6 \n\t" // read a[k] + " vfnmaddpd %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddpd %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddpd %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddpd %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddpd %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddpd %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + + " subq $8, %1 \n\t" // i = 5 + " subq $2, %0 \n\t" // b-= n + + " vmovddup 40(%6, %1, 8) , %%xmm0 \n\t" // read aa[i] + " vunpckhpd %%xmm10 , %%xmm10, %%xmm1 \n\t" // extract bb0 + " vunpckhpd %%xmm14 , %%xmm14, %%xmm2 \n\t" // extract bb1 + " vmulpd %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulpd %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb0 * aa + " vmovsd %%xmm1 , 40(%4) \n\t" // c[i] = bb0 * aa + " vmovsd %%xmm2 , 40(%5) \n\t" // c[i] = bb1 * aa + " vmovsd %%xmm1 , (%7 , %0, 8) \n\t" // b[0] = bb0 * aa + " vmovsd %%xmm2 , 8(%7 , %0, 8) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6, %1, 8) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6, %1, 8) , %%xmm5 \n\t" // read a[k] + " vmovsd 32(%6, %1, 8) , %%xmm6 \n\t" // read a[k] + " vfnmaddpd %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddpd %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddpd %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddpd %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddsd %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddsd %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + + " subq $8, %1 \n\t" // i = 4 + " subq $2, %0 \n\t" // b-= n + + " vmovddup 32(%6, %1, 8) , %%xmm0 \n\t" // read aa[i] + " vunpcklpd %%xmm10 , %%xmm10, %%xmm1 \n\t" // extract bb0 + " vunpcklpd %%xmm14 , %%xmm14, %%xmm2 \n\t" // extract bb1 + " vmulpd %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulpd %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb0 * aa + " vmovsd %%xmm1 , 32(%4) \n\t" // c[i] = bb0 * aa + " vmovsd %%xmm2 , 32(%5) \n\t" // c[i] = bb1 * aa + " vmovsd %%xmm1 , (%7 , %0, 8) \n\t" // b[0] = bb0 * aa + " vmovsd %%xmm2 , 8(%7 , %0, 8) \n\t" // b[1] = bb1 * aa + + + " vmovups 0(%6, %1, 8) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6, %1, 8) , %%xmm5 \n\t" // read a[k] + " vfnmaddpd %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddpd %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddpd %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddpd %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + + " subq $8, %1 \n\t" // i = 3 + " subq $2, %0 \n\t" // b-= n + + " vmovddup 24(%6, %1, 8) , %%xmm0 \n\t" // read aa[i] + " vunpckhpd %%xmm9 , %%xmm9 , %%xmm1 \n\t" // extract bb0 + " vunpckhpd %%xmm13 , %%xmm13, %%xmm2 \n\t" // extract bb1 + " vmulpd %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulpd %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb0 * aa + " vmovsd %%xmm1 , 24(%4) \n\t" // c[i] = bb0 * aa + " vmovsd %%xmm2 , 24(%5) \n\t" // c[i] = bb1 * aa + " vmovsd %%xmm1 , (%7 , %0, 8) \n\t" // b[0] = bb0 * aa + " vmovsd %%xmm2 , 8(%7 , %0, 8) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6, %1, 8) , %%xmm4 \n\t" // read a[k] + " vmovsd 16(%6, %1, 8) , %%xmm5 \n\t" // read a[k] + " vfnmaddpd %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddpd %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddsd %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddsd %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + + " subq $8, %1 \n\t" // i = 2 + " subq $2, %0 \n\t" // b-= n + + " vmovddup 16(%6, %1, 8) , %%xmm0 \n\t" // read aa[i] + " vunpcklpd %%xmm9 , %%xmm9 , %%xmm1 \n\t" // extract bb0 + " vunpcklpd %%xmm13 , %%xmm13, %%xmm2 \n\t" // extract bb1 + " vmulpd %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulpd %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb0 * aa + " vmovsd %%xmm1 , 16(%4) \n\t" // c[i] = bb0 * aa + " vmovsd %%xmm2 , 16(%5) \n\t" // c[i] = bb1 * aa + " vmovsd %%xmm1 , (%7 , %0, 8) \n\t" // b[0] = bb0 * aa + " vmovsd %%xmm2 , 8(%7 , %0, 8) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6, %1, 8) , %%xmm4 \n\t" // read a[k] + " vfnmaddpd %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddpd %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + + " subq $8, %1 \n\t" // i = 1 + " subq $2, %0 \n\t" // b-= n + + " vmovddup 8(%6, %1, 8) , %%xmm0 \n\t" // read aa[i] + " vunpckhpd %%xmm8 , %%xmm8 , %%xmm1 \n\t" // extract bb0 + " vunpckhpd %%xmm12 , %%xmm12, %%xmm2 \n\t" // extract bb1 + " vmulpd %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulpd %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb0 * aa + " vmovsd %%xmm1 , 8(%4) \n\t" // c[i] = bb0 * aa + " vmovsd %%xmm2 , 8(%5) \n\t" // c[i] = bb1 * aa + " vmovsd %%xmm1 , (%7 , %0, 8) \n\t" // b[0] = bb0 * aa + " vmovsd %%xmm2 , 8(%7 , %0, 8) \n\t" // b[1] = bb1 * aa + + " vmovsd 0(%6, %1, 8) , %%xmm4 \n\t" // read a[k] + " vfnmaddsd %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddsd %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + + " subq $8, %1 \n\t" // i = 0 + " subq $2, %0 \n\t" // b-= n + + " vmovddup 0(%6, %1, 8) , %%xmm0 \n\t" // read aa[i] + " vunpcklpd %%xmm8 , %%xmm8 , %%xmm1 \n\t" // extract bb0 + " vunpcklpd %%xmm12 , %%xmm12, %%xmm2 \n\t" // extract bb1 + " vmulpd %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulpd %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb0 * aa + " vmovsd %%xmm1 , 0(%4) \n\t" // c[i] = bb0 * aa + " vmovsd %%xmm2 , 0(%5) \n\t" // c[i] = bb1 * aa + " vmovsd %%xmm1 , (%7 , %0, 8) \n\t" // b[0] = bb0 * aa + " vmovsd %%xmm2 , 8(%7 , %0, 8) \n\t" // b[1] = bb1 * aa + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + +#ifndef COMPLEX +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + FLOAT *cj; + + + BLASLONG i, j, k; + + a += (m - 1) * m; + b += (m - 1) * n; + + for (i = m - 1; i >= 0; i--) { + + aa = *(a + i); + + for (j = 0; j < n; j ++) { + cj = c + j * ldc; + bb = *(cj + i); + bb *= aa; + *b = bb; + *(cj + i) = bb; + b ++; + + BLASLONG i1 = i & -4 ; + FLOAT t0,t1,t2,t3; + + k=0; + + + if ( i & 4 ) + { + t0 = cj[k]; + t1 = cj[k+1]; + t2 = cj[k+2]; + t3 = cj[k+3]; + + t0 -= bb * a[k+0]; + t1 -= bb * a[k+1]; + t2 -= bb * a[k+2]; + t3 -= bb * a[k+3]; + + cj[k+0] = t0; + cj[k+1] = t1; + cj[k+2] = t2; + cj[k+3] = t3; + + k+=4; + } + + if ( i & 2 ) + { + t0 = a[k]; + t1 = a[k+1]; + + t0 *= bb; + t1 *= bb; + + cj[k+0] -= t0; + cj[k+1] -= t1; + + k+=2; + } + + if ( i & 1 ) + { + t0 = bb * a[k]; + cj[k+0] -= t0; + + } + + + } + a -= m; + b -= 2 * n; + } + +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + a += (m - 1) * m * 2; + b += (m - 1) * n * 2; + + for (i = m - 1; i >= 0; i--) { + + aa1 = *(a + i * 2 + 0); + aa2 = *(a + i * 2 + 1); + + for (j = 0; j < n; j ++) { + bb1 = *(c + i * 2 + 0 + j * ldc); + bb2 = *(c + i * 2 + 1 + j * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = aa1 * bb2 - aa2 * bb1; +#endif + + + *(b + 0) = cc1; + *(b + 1) = cc2; + *(c + i * 2 + 0 + j * ldc) = cc1; + *(c + i * 2 + 1 + j * ldc) = cc2; + b += 2; + + for (k = 0; k < i; k ++){ +#ifndef CONJ + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#else + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= - cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#endif + } + + } + a -= m * 2; + b -= 4 * n; + } + +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + BLASLONG i, j; + FLOAT *aa, *cc; + BLASLONG kk; + +#if 0 + fprintf(stderr, "TRSM KERNEL LN : m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + j = (n >> GEMM_UNROLL_N_SHIFT); + + while (j > 0) { + + kk = m + offset; + + if (m & (GEMM_UNROLL_M - 1)) { + for (i = 1; i < GEMM_UNROLL_M; i *= 2){ + if (m & i) { + aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE; + cc = c + ((m & ~(i - 1)) - i) * COMPSIZE; + + if (k - kk > 0) { + + GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + + } + + solve(i, GEMM_UNROLL_N, + aa + (kk - i) * i * COMPSIZE, + b + (kk - i) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + kk -= i; + } + } + } + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE; + cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE; + + do { + dtrsm_LN_solve_opt(k-kk, aa + GEMM_UNROLL_M * kk * COMPSIZE, b + GEMM_UNROLL_N * kk * COMPSIZE, cc, ldc + , aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE,b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE); + + + aa -= GEMM_UNROLL_M * k * COMPSIZE; + cc -= GEMM_UNROLL_M * COMPSIZE; + kk -= GEMM_UNROLL_M; + i --; + } while (i > 0); + } + + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + kk = m + offset; + + if (m & (GEMM_UNROLL_M - 1)) { + for (i = 1; i < GEMM_UNROLL_M; i *= 2){ + if (m & i) { + aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE; + cc = c + ((m & ~(i - 1)) - i) * COMPSIZE; + + if (k - kk > 0) { + GEMM_KERNEL(i, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, ldc); + } + + solve(i, j, + aa + (kk - i) * i * COMPSIZE, + b + (kk - i) * j * COMPSIZE, + cc, ldc); + + kk -= i; + } + } + } + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE; + cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE; + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * j * COMPSIZE, + cc, ldc); + + aa -= GEMM_UNROLL_M * k * COMPSIZE; + cc -= GEMM_UNROLL_M * COMPSIZE; + kk -= GEMM_UNROLL_M; + i --; + } while (i > 0); + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/x86_64/dtrsm_kernel_RN_haswell.c b/kernel/x86_64/dtrsm_kernel_RN_haswell.c new file mode 100644 index 000000000..da90e40c7 --- /dev/null +++ b/kernel/x86_64/dtrsm_kernel_RN_haswell.c @@ -0,0 +1,677 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_R +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + + +static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c3 = c + ldc + ldc*2 ; + FLOAT *c6 = c + ldc*4 + ldc*2 ; + ldc = ldc *8; + BLASLONG n1 = n * 8; + BLASLONG i=0; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + + " vxorpd %%ymm8 , %%ymm8 , %%ymm8 \n\t" + " vxorpd %%ymm9 , %%ymm9 , %%ymm9 \n\t" + " vxorpd %%ymm10, %%ymm10, %%ymm10 \n\t" + " vxorpd %%ymm11, %%ymm11, %%ymm11 \n\t" + " vxorpd %%ymm12, %%ymm12, %%ymm12 \n\t" + " vxorpd %%ymm13, %%ymm13, %%ymm13 \n\t" + " vxorpd %%ymm14, %%ymm14, %%ymm14 \n\t" + " vxorpd %%ymm15, %%ymm15, %%ymm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 4f \n\t" + + " vmovups (%2,%1,4), %%ymm0 \n\t" // read a + " vmovups (%3,%1,8), %%ymm1 \n\t" // read b0 + " vmovups 32(%3,%1,8), %%ymm2 \n\t" // read b1 + + + " addq $8, %1 \n\t" + " cmpq %1, %0 \n\t" + " je 21f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " vmovups (%2,%1,4), %%ymm4 \n\t" // read a + " vpermpd $0xb1 , %%ymm0 , %%ymm3 \n\t" + + " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm8 \n\t" + " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm12 \n\t" + + " vmovups (%3,%1,8), %%ymm5 \n\t" // read b0 + " vfmadd231pd %%ymm3 , %%ymm1 , %%ymm9 \n\t" + " vfmadd231pd %%ymm3 , %%ymm2 , %%ymm13 \n\t" + + " vpermpd $0x1b , %%ymm3 , %%ymm0 \n\t" + " vmovups 32(%3,%1,8), %%ymm6 \n\t" // read b1 + " vpermpd $0xb1 , %%ymm0 , %%ymm3 \n\t" + " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm10 \n\t" + " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm14 \n\t" + + " addq $8, %1 \n\t" + " vfmadd231pd %%ymm3 , %%ymm1 , %%ymm11 \n\t" + " vfmadd231pd %%ymm3 , %%ymm2 , %%ymm15 \n\t" + + " cmpq %1, %0 \n\t" + + " jz 22f \n\t" + + " vmovups (%2,%1,4), %%ymm0 \n\t" // read a + + " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm8 \n\t" + " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm12 \n\t" + + " vpermpd $0xb1 , %%ymm4 , %%ymm4 \n\t" + " vmovups (%3,%1,8), %%ymm1 \n\t" // read b0 + " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm9 \n\t" + " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm13 \n\t" + + " vpermpd $0x1b , %%ymm4 , %%ymm4 \n\t" + " vmovups 32(%3,%1,8), %%ymm2 \n\t" // read b1 + " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm10 \n\t" + " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm14 \n\t" + + " vpermpd $0xb1 , %%ymm4 , %%ymm4 \n\t" + " addq $8, %1 \n\t" + " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm11 \n\t" + " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm15 \n\t" + + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + + "21: \n\t" + + " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm8 \n\t" + " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm12 \n\t" + + " vpermpd $0xb1 , %%ymm0 , %%ymm0 \n\t" + " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm9 \n\t" + " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm13 \n\t" + + " vpermpd $0x1b , %%ymm0 , %%ymm0 \n\t" + " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm10 \n\t" + " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm14 \n\t" + + " vpermpd $0xb1 , %%ymm0 , %%ymm0 \n\t" + " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm11 \n\t" + " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm15 \n\t" + + " jmp 3f \n\t" + + "22: \n\t" + + " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm8 \n\t" + " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm12 \n\t" + + " vpermpd $0xb1 , %%ymm4 , %%ymm4 \n\t" + " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm9 \n\t" + " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm13 \n\t" + + " vpermpd $0x1b , %%ymm4 , %%ymm4 \n\t" + " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm10 \n\t" + " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm14 \n\t" + + " vpermpd $0xb1 , %%ymm4 , %%ymm4 \n\t" + " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm11 \n\t" + " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm15 \n\t" + + "3: \n\t" + + " vpermpd $0xb1 , %%ymm9 , %%ymm9 \n\t" + " vpermpd $0xb1 , %%ymm11, %%ymm11 \n\t" + + " vblendpd $0x0a , %%ymm9 , %%ymm8 , %%ymm0 \n\t" + " vblendpd $0x05 , %%ymm9 , %%ymm8 , %%ymm1 \n\t" + " vblendpd $0x0a , %%ymm11, %%ymm10, %%ymm2 \n\t" + " vblendpd $0x05 , %%ymm11, %%ymm10, %%ymm3 \n\t" + + " vpermpd $0x1b , %%ymm2 , %%ymm2 \n\t" + " vpermpd $0x1b , %%ymm3 , %%ymm3 \n\t" + " vpermpd $0xb1 , %%ymm2 , %%ymm2 \n\t" + " vpermpd $0xb1 , %%ymm3 , %%ymm3 \n\t" + + " vblendpd $0x03 , %%ymm0 , %%ymm2 , %%ymm8 \n\t" + " vblendpd $0x03 , %%ymm1 , %%ymm3 , %%ymm9 \n\t" + " vblendpd $0x03 , %%ymm2 , %%ymm0 , %%ymm10 \n\t" + " vblendpd $0x03 , %%ymm3 , %%ymm1 , %%ymm11 \n\t" + + " vpermpd $0xb1 , %%ymm13, %%ymm13 \n\t" + " vpermpd $0xb1 , %%ymm15, %%ymm15 \n\t" + + " vblendpd $0x0a , %%ymm13, %%ymm12, %%ymm0 \n\t" + " vblendpd $0x05 , %%ymm13, %%ymm12, %%ymm1 \n\t" + " vblendpd $0x0a , %%ymm15, %%ymm14, %%ymm2 \n\t" + " vblendpd $0x05 , %%ymm15, %%ymm14, %%ymm3 \n\t" + + " vpermpd $0x1b , %%ymm2 , %%ymm2 \n\t" + " vpermpd $0x1b , %%ymm3 , %%ymm3 \n\t" + " vpermpd $0xb1 , %%ymm2 , %%ymm2 \n\t" + " vpermpd $0xb1 , %%ymm3 , %%ymm3 \n\t" + + " vblendpd $0x03 , %%ymm0 , %%ymm2 , %%ymm12 \n\t" + " vblendpd $0x03 , %%ymm1 , %%ymm3 , %%ymm13 \n\t" + " vblendpd $0x03 , %%ymm2 , %%ymm0 , %%ymm14 \n\t" + " vblendpd $0x03 , %%ymm3 , %%ymm1 , %%ymm15 \n\t" + + + "4: \n\t" + + " vmovups (%4) , %%ymm0 \n\t" // read c0 + " vmovups (%4,%7,1) , %%ymm1 \n\t" // read c1 + " vmovups (%4,%7,2) , %%ymm2 \n\t" // read c2 + " vmovups (%5) , %%ymm3 \n\t" // read c3 + + " vmovups (%5,%7,1) , %%ymm4 \n\t" // read c4 + " vmovups (%5,%7,2) , %%ymm5 \n\t" // read c5 + " vmovups (%6) , %%ymm6 \n\t" // read c6 + " vmovups (%6,%7,1) , %%ymm7 \n\t" // read c7 + + " vsubpd %%ymm8 , %%ymm0 , %%ymm8 \n\t" + " vmovups (%9), %%ymm0 \n\t" + " vsubpd %%ymm9 , %%ymm1 , %%ymm9 \n\t" + " vpermpd $0x55 , %%ymm0 , %%ymm1 \n\t" + " vsubpd %%ymm10, %%ymm2 , %%ymm10 \n\t" + " vpermpd $0xaa , %%ymm0 , %%ymm2 \n\t" + " vsubpd %%ymm11, %%ymm3 , %%ymm11 \n\t" + " vpermpd $0xff , %%ymm0 , %%ymm3 \n\t" + " vpermpd $0x00 , %%ymm0 , %%ymm0 \n\t" + + " vsubpd %%ymm12, %%ymm4 , %%ymm12 \n\t" + " vmovups 32(%9), %%ymm4 \n\t" + " vsubpd %%ymm13, %%ymm5 , %%ymm13 \n\t" + " vpermpd $0x55 , %%ymm4 , %%ymm5 \n\t" + " vsubpd %%ymm14, %%ymm6 , %%ymm14 \n\t" + " vpermpd $0xaa , %%ymm4 , %%ymm6 \n\t" + " vsubpd %%ymm15, %%ymm7 , %%ymm15 \n\t" + " vpermpd $0xff , %%ymm4 , %%ymm7 \n\t" + " vpermpd $0x00 , %%ymm4 , %%ymm4 \n\t" + + + "5: \n\t" // i = 0 + + " addq $64, %9 \n\t" // b=b+8 + + " vmulpd %%ymm8 , %%ymm0, %%ymm8 \n\t" // a *bb + " vmovups (%9), %%ymm0 \n\t" + " vmovups %%ymm8 , (%8) \n\t" // write a + " vmovups %%ymm8 , (%4) \n\t" // write c + + " vfnmadd231pd %%ymm8 , %%ymm1 , %%ymm9 \n\t" + " vmovups 32(%9), %%ymm1 \n\t" + " vfnmadd231pd %%ymm8 , %%ymm2 , %%ymm10 \n\t" + " vpermpd $0xaa , %%ymm0 , %%ymm2 \n\t" + " vfnmadd231pd %%ymm8 , %%ymm3 , %%ymm11 \n\t" + " vpermpd $0xff , %%ymm0 , %%ymm3 \n\t" + " vfnmadd231pd %%ymm8 , %%ymm4 , %%ymm12 \n\t" + " vpermpd $0x55 , %%ymm0 , %%ymm0 \n\t" + " vfnmadd231pd %%ymm8 , %%ymm5 , %%ymm13 \n\t" + " vpermpd $0x55 , %%ymm1 , %%ymm5 \n\t" + " vfnmadd231pd %%ymm8 , %%ymm6 , %%ymm14 \n\t" + " vpermpd $0xaa , %%ymm1 , %%ymm6 \n\t" + " vfnmadd231pd %%ymm8 , %%ymm7 , %%ymm15 \n\t" + " vpermpd $0xff , %%ymm1 , %%ymm7 \n\t" + " vpermpd $0x00 , %%ymm1 , %%ymm4 \n\t" + + " addq $64, %9 \n\t" // b=b+8 + " addq $32, %8 \n\t" // a=a+8 + + + + " vmulpd %%ymm9 , %%ymm0, %%ymm9 \n\t" // a *bb + " vmovups (%9), %%ymm0 \n\t" + " vmovups 32(%9), %%ymm1 \n\t" + " vmovups %%ymm9 , (%8) \n\t" // write a + " vmovups %%ymm9 , (%4,%7,1) \n\t" // write c + + " vfnmadd231pd %%ymm9 , %%ymm2 , %%ymm10 \n\t" + " vfnmadd231pd %%ymm9 , %%ymm3 , %%ymm11 \n\t" + " vpermpd $0xff , %%ymm0 , %%ymm3 \n\t" + " vfnmadd231pd %%ymm9 , %%ymm4 , %%ymm12 \n\t" + " vpermpd $0xaa , %%ymm0 , %%ymm0 \n\t" + " vfnmadd231pd %%ymm9 , %%ymm5 , %%ymm13 \n\t" + " vpermpd $0x55 , %%ymm1 , %%ymm5 \n\t" + " vfnmadd231pd %%ymm9 , %%ymm6 , %%ymm14 \n\t" + " vpermpd $0xaa , %%ymm1 , %%ymm6 \n\t" + " vfnmadd231pd %%ymm9 , %%ymm7 , %%ymm15 \n\t" + " vpermpd $0xff , %%ymm1 , %%ymm7 \n\t" + " vpermpd $0x00 , %%ymm1 , %%ymm4 \n\t" + + " addq $64, %9 \n\t" // b=b+8 + " addq $32, %8 \n\t" // a=a+8 + + " vmulpd %%ymm10, %%ymm0, %%ymm10 \n\t" // a *bb + " vmovups (%9), %%ymm0 \n\t" + " vmovups 32(%9), %%ymm1 \n\t" + " vmovups %%ymm10, (%8) \n\t" // write a + " vmovups %%ymm10, (%4,%7,2) \n\t" // write c + + " vfnmadd231pd %%ymm10, %%ymm3 , %%ymm11 \n\t" + " vpermpd $0xff , %%ymm0 , %%ymm0 \n\t" + " vfnmadd231pd %%ymm10, %%ymm4 , %%ymm12 \n\t" + " vfnmadd231pd %%ymm10, %%ymm5 , %%ymm13 \n\t" + " vpermpd $0x55 , %%ymm1 , %%ymm5 \n\t" + " vfnmadd231pd %%ymm10, %%ymm6 , %%ymm14 \n\t" + " vpermpd $0xaa , %%ymm1 , %%ymm6 \n\t" + " vfnmadd231pd %%ymm10, %%ymm7 , %%ymm15 \n\t" + " vpermpd $0xff , %%ymm1 , %%ymm7 \n\t" + " vpermpd $0x00 , %%ymm1 , %%ymm4 \n\t" + + + " addq $64, %9 \n\t" // b=b+8 + " addq $32, %8 \n\t" // a=a+8 + + + + " vmulpd %%ymm11, %%ymm0, %%ymm11 \n\t" // a *bb + " vmovups 32(%9), %%ymm1 \n\t" + " vmovups %%ymm11, (%8) \n\t" // write a + " vmovups %%ymm11, (%5) \n\t" // write c + + " vfnmadd231pd %%ymm11, %%ymm4 , %%ymm12 \n\t" + " vfnmadd231pd %%ymm11, %%ymm5 , %%ymm13 \n\t" + " vpermpd $0x55 , %%ymm1 , %%ymm5 \n\t" + " vfnmadd231pd %%ymm11, %%ymm6 , %%ymm14 \n\t" + " vpermpd $0xaa , %%ymm1 , %%ymm6 \n\t" + " vfnmadd231pd %%ymm11, %%ymm7 , %%ymm15 \n\t" + " vpermpd $0xff , %%ymm1 , %%ymm7 \n\t" + " vpermpd $0x00 , %%ymm1 , %%ymm0 \n\t" + + + " addq $64, %9 \n\t" // b=b+8 + " addq $32, %8 \n\t" // a=a+8 + + + " vmulpd %%ymm12, %%ymm0, %%ymm12 \n\t" // a *bb + " vmovups 32(%9), %%ymm1 \n\t" + " vmovups %%ymm12, (%8) \n\t" // write a + " vmovups %%ymm12, (%5,%7,1) \n\t" // write c + + " vfnmadd231pd %%ymm12, %%ymm5 , %%ymm13 \n\t" + " vfnmadd231pd %%ymm12, %%ymm6 , %%ymm14 \n\t" + " vpermpd $0xaa , %%ymm1 , %%ymm6 \n\t" + " vfnmadd231pd %%ymm12, %%ymm7 , %%ymm15 \n\t" + " vpermpd $0xff , %%ymm1 , %%ymm7 \n\t" + " vpermpd $0x55 , %%ymm1 , %%ymm0 \n\t" + + " addq $64, %9 \n\t" // b=b+8 + " addq $32, %8 \n\t" // a=a+8 + + " vmulpd %%ymm13, %%ymm0, %%ymm13 \n\t" // a *bb + " vmovups 32(%9), %%ymm1 \n\t" + " vmovups %%ymm13, (%8) \n\t" // write a + " vmovups %%ymm13, (%5,%7,2) \n\t" // write c + + " vfnmadd231pd %%ymm13, %%ymm6 , %%ymm14 \n\t" + " vfnmadd231pd %%ymm13, %%ymm7 , %%ymm15 \n\t" + " vpermpd $0xff , %%ymm1 , %%ymm7 \n\t" + " vpermpd $0xaa , %%ymm1 , %%ymm0 \n\t" + + + " addq $64, %9 \n\t" // b=b+8 + " addq $32, %8 \n\t" // a=a+8 + + + " vmulpd %%ymm14, %%ymm0, %%ymm14 \n\t" // a *bb + " vmovups 32(%9), %%ymm1 \n\t" + " vmovups %%ymm14, (%8) \n\t" // write a + " vmovups %%ymm14, (%6) \n\t" // write c + + " vfnmadd231pd %%ymm14, %%ymm7 , %%ymm15 \n\t" + + " vpermpd $0xff , %%ymm1 , %%ymm0 \n\t" + + " addq $32, %8 \n\t" // a=a+8 + + " vmulpd %%ymm15, %%ymm0, %%ymm15 \n\t" // a *bb + " vmovups %%ymm15, (%8) \n\t" // write a + " vmovups %%ymm15, (%6,%7,1) \n\t" // write c + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c3), // 5 + "r" (c6), // 6 + "r" (ldc), // 7 + "r" (as), // 8 + "r" (bs) // 9 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + for (i = 0; i < n; i++) { + + bb = *(b + i); + + for (j = 0; j < m; j ++) { + aa = *(c + j + i * ldc); + aa *= bb; + *a = aa; + *(c + j + i * ldc) = aa; + a ++; + + for (k = i + 1; k < n; k ++){ + *(c + j + k * ldc) -= aa * *(b + k); + } + + } + b += n; + } +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + for (i = 0; i < n; i++) { + + bb1 = *(b + i * 2 + 0); + bb2 = *(b + i * 2 + 1); + + for (j = 0; j < m; j ++) { + aa1 = *(c + j * 2 + 0 + i * ldc); + aa2 = *(c + j * 2 + 1 + i * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = -aa1 * bb2 + aa2 * bb1; +#endif + + *(a + 0) = cc1; + *(a + 1) = cc2; + *(c + j * 2 + 0 + i * ldc) = cc1; + *(c + j * 2 + 1 + i * ldc) = cc2; + a += 2; + + for (k = i + 1; k < n; k ++){ +#ifndef CONJ + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#else + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= - cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#endif + } + + } + b += n * 2; + } +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + FLOAT *aa, *cc; + BLASLONG kk; + BLASLONG i, j, jj; + +#if 0 + fprintf(stderr, "TRSM RN KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + jj = 0; + j = (n >> GEMM_UNROLL_N_SHIFT); + kk = -offset; + + while (j > 0) { + + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + if (i > 0) { + do { + + dtrsm_RN_solve_opt(kk, aa, b, cc, ldc, aa + kk * GEMM_UNROLL_M * COMPSIZE, b + kk * GEMM_UNROLL_N * COMPSIZE); +/* + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +*/ + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + solve(i, GEMM_UNROLL_N, + aa + kk * i * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } + } + + kk += GEMM_UNROLL_N; + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + jj += GEMM_UNROLL_M; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(i, j, + aa + kk * i * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + kk += j; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/x86_64/dtrsm_kernel_RT_bulldozer.c b/kernel/x86_64/dtrsm_kernel_RT_bulldozer.c new file mode 100644 index 000000000..54df5b359 --- /dev/null +++ b/kernel/x86_64/dtrsm_kernel_RT_bulldozer.c @@ -0,0 +1,546 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_R +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + + +static void dtrsm_RT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void dtrsm_RT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc ; + BLASLONG n1 = n * 8; + BLASLONG i=0; + + as += (2 - 1) * 8; + bs += (2 - 1) * 2; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorpd %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorpd %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorpd %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorpd %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorpd %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorpd %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorpd %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorpd %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 2f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " prefetcht0 384(%2,%1,8) \n\t" + " prefetcht0 384(%3,%1,8) \n\t" + " vmovddup (%3,%1,2), %%xmm0 \n\t" // read b + " vmovups (%2,%1,8), %%xmm4 \n\t" + " vmovddup 8(%3,%1,2), %%xmm1 \n\t" + " vmovups 16(%2,%1,8), %%xmm5 \n\t" + " vmovups 32(%2,%1,8), %%xmm6 \n\t" + " vmovups 48(%2,%1,8), %%xmm7 \n\t" + + " vfmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" + " vfmaddpd %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + " vfmaddpd %%xmm9 , %%xmm0 , %%xmm5 , %%xmm9 \n\t" + " vfmaddpd %%xmm13, %%xmm1 , %%xmm5 , %%xmm13 \n\t" + " vfmaddpd %%xmm10, %%xmm0 , %%xmm6 , %%xmm10 \n\t" + " vfmaddpd %%xmm14, %%xmm1 , %%xmm6 , %%xmm14 \n\t" + " addq $8, %1 \n\t" + " vfmaddpd %%xmm11, %%xmm0 , %%xmm7 , %%xmm11 \n\t" + " vfmaddpd %%xmm15, %%xmm1 , %%xmm7 , %%xmm15 \n\t" + " cmpq %1, %0 \n\t" + + " jz 2f \n\t" + + " prefetcht0 384(%2,%1,8) \n\t" + " vmovddup (%3,%1,2), %%xmm0 \n\t" // read b + " vmovups (%2,%1,8), %%xmm4 \n\t" + " vmovddup 8(%3,%1,2), %%xmm1 \n\t" + " vmovups 16(%2,%1,8), %%xmm5 \n\t" + " vmovups 32(%2,%1,8), %%xmm6 \n\t" + " vmovups 48(%2,%1,8), %%xmm7 \n\t" + + " vfmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" + " vfmaddpd %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + " vfmaddpd %%xmm9 , %%xmm0 , %%xmm5 , %%xmm9 \n\t" + " vfmaddpd %%xmm13, %%xmm1 , %%xmm5 , %%xmm13 \n\t" + " vfmaddpd %%xmm10, %%xmm0 , %%xmm6 , %%xmm10 \n\t" + " vfmaddpd %%xmm14, %%xmm1 , %%xmm6 , %%xmm14 \n\t" + " addq $8, %1 \n\t" + " vfmaddpd %%xmm11, %%xmm0 , %%xmm7 , %%xmm11 \n\t" + " vfmaddpd %%xmm15, %%xmm1 , %%xmm7 , %%xmm15 \n\t" + " cmpq %1, %0 \n\t" + + " jz 2f \n\t" + + " prefetcht0 384(%2,%1,8) \n\t" + " vmovddup (%3,%1,2), %%xmm0 \n\t" // read b + " vmovups (%2,%1,8), %%xmm4 \n\t" + " vmovddup 8(%3,%1,2), %%xmm1 \n\t" + " vmovups 16(%2,%1,8), %%xmm5 \n\t" + " vmovups 32(%2,%1,8), %%xmm6 \n\t" + " vmovups 48(%2,%1,8), %%xmm7 \n\t" + + " vfmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" + " vfmaddpd %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + " vfmaddpd %%xmm9 , %%xmm0 , %%xmm5 , %%xmm9 \n\t" + " vfmaddpd %%xmm13, %%xmm1 , %%xmm5 , %%xmm13 \n\t" + " vfmaddpd %%xmm10, %%xmm0 , %%xmm6 , %%xmm10 \n\t" + " vfmaddpd %%xmm14, %%xmm1 , %%xmm6 , %%xmm14 \n\t" + " addq $8, %1 \n\t" + " vfmaddpd %%xmm11, %%xmm0 , %%xmm7 , %%xmm11 \n\t" + " vfmaddpd %%xmm15, %%xmm1 , %%xmm7 , %%xmm15 \n\t" + " cmpq %1, %0 \n\t" + + " jz 2f \n\t" + + " prefetcht0 384(%2,%1,8) \n\t" + " vmovddup (%3,%1,2), %%xmm0 \n\t" // read b + " vmovddup 8(%3,%1,2), %%xmm1 \n\t" + " vmovups (%2,%1,8), %%xmm4 \n\t" + " vmovups 16(%2,%1,8), %%xmm5 \n\t" + " vmovups 32(%2,%1,8), %%xmm6 \n\t" + " vmovups 48(%2,%1,8), %%xmm7 \n\t" + + " vfmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" + " vfmaddpd %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + " vfmaddpd %%xmm9 , %%xmm0 , %%xmm5 , %%xmm9 \n\t" + " vfmaddpd %%xmm13, %%xmm1 , %%xmm5 , %%xmm13 \n\t" + " vfmaddpd %%xmm10, %%xmm0 , %%xmm6 , %%xmm10 \n\t" + " vfmaddpd %%xmm14, %%xmm1 , %%xmm6 , %%xmm14 \n\t" + " addq $8, %1 \n\t" + " vfmaddpd %%xmm11, %%xmm0 , %%xmm7 , %%xmm11 \n\t" + " vfmaddpd %%xmm15, %%xmm1 , %%xmm7 , %%xmm15 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + "2: \n\t" + + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + " vmovups 32(%4) , %%xmm2 \n\t" + " vmovups 48(%4) , %%xmm3 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + " vmovups 32(%5) , %%xmm6 \n\t" + " vmovups 48(%5) , %%xmm7 \n\t" + + " vsubpd %%xmm8 , %%xmm0 , %%xmm8 \n\t" + " vsubpd %%xmm9 , %%xmm1 , %%xmm9 \n\t" + " vsubpd %%xmm10, %%xmm2 , %%xmm10 \n\t" + " vsubpd %%xmm11, %%xmm3 , %%xmm11 \n\t" + + " vsubpd %%xmm12, %%xmm4 , %%xmm12 \n\t" + " vsubpd %%xmm13, %%xmm5 , %%xmm13 \n\t" + " vsubpd %%xmm14, %%xmm6 , %%xmm14 \n\t" + " vsubpd %%xmm15, %%xmm7 , %%xmm15 \n\t" + + "3: \n\t" // i = 1 + + " vmovddup (%7), %%xmm1 \n\t" // read b + " vmovddup 8(%7), %%xmm0 \n\t" // read bb + + " vmulpd %%xmm12 , %%xmm0 , %%xmm12 \n\t" // aa * bb + " vmulpd %%xmm13 , %%xmm0 , %%xmm13 \n\t" // aa * bb + " vmulpd %%xmm14 , %%xmm0 , %%xmm14 \n\t" // aa * bb + " vmulpd %%xmm15 , %%xmm0 , %%xmm15 \n\t" // aa * bb + + " vmovups %%xmm12 , (%6) \n\t" // write a + " vmovups %%xmm13 , 16(%6) \n\t" // write a + " vmovups %%xmm14 , 32(%6) \n\t" // write a + " vmovups %%xmm15 , 48(%6) \n\t" // write a + + " vmovups %%xmm12 , (%5) \n\t" // write c1 + " vmovups %%xmm13 , 16(%5) \n\t" + " vmovups %%xmm14 , 32(%5) \n\t" + " vmovups %%xmm15 , 48(%5) \n\t" + + " vfnmaddpd %%xmm8 , %%xmm12 , %%xmm1 , %%xmm8 \n\t" // c = c - aa * b + " vfnmaddpd %%xmm9 , %%xmm13 , %%xmm1 , %%xmm9 \n\t" + " vfnmaddpd %%xmm10 , %%xmm14 , %%xmm1 , %%xmm10 \n\t" + " vfnmaddpd %%xmm11 , %%xmm15 , %%xmm1 , %%xmm11 \n\t" + + " \n\t" // i = 0 + " subq $16 , %7 \n\t" // b = b - 2 + " subq $64 , %6 \n\t" // a = a - 8 + + " vmovddup (%7), %%xmm0 \n\t" // read bb + + " vmulpd %%xmm8 , %%xmm0 , %%xmm8 \n\t" // aa * bb + " vmulpd %%xmm9 , %%xmm0 , %%xmm9 \n\t" + " vmulpd %%xmm10 , %%xmm0 , %%xmm10 \n\t" + " vmulpd %%xmm11 , %%xmm0 , %%xmm11 \n\t" + + " vmovups %%xmm8 , (%6) \n\t" // write a + " vmovups %%xmm9 , 16(%6) \n\t" + " vmovups %%xmm10 , 32(%6) \n\t" + " vmovups %%xmm11 , 48(%6) \n\t" + + " vmovups %%xmm8 , (%4) \n\t" // write c0 + " vmovups %%xmm9 , 16(%4) \n\t" + " vmovups %%xmm10 , 32(%4) \n\t" + " vmovups %%xmm11 , 48(%4) \n\t" + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + a += (n - 1) * m; + b += (n - 1) * n; + + for (i = n - 1; i >= 0; i--) { + + bb = *(b + i); + + for (j = 0; j < m; j ++) { + aa = *(c + j + i * ldc); + aa *= bb; + *a = aa; + *(c + j + i * ldc) = aa; + a ++; + + for (k = 0; k < i; k ++){ + *(c + j + k * ldc) -= aa * *(b + k); + } + + } + b -= n; + a -= 2 * m; + } + +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + a += (n - 1) * m * 2; + b += (n - 1) * n * 2; + + for (i = n - 1; i >= 0; i--) { + + bb1 = *(b + i * 2 + 0); + bb2 = *(b + i * 2 + 1); + + for (j = 0; j < m; j ++) { + + aa1 = *(c + j * 2 + 0 + i * ldc); + aa2 = *(c + j * 2 + 1 + i * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = - aa1 * bb2 + aa2 * bb1; +#endif + + *(a + 0) = cc1; + *(a + 1) = cc2; + + *(c + j * 2 + 0 + i * ldc) = cc1; + *(c + j * 2 + 1 + i * ldc) = cc2; + a += 2; + + for (k = 0; k < i; k ++){ +#ifndef CONJ + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#else + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= -cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#endif + } + + } + b -= n * 2; + a -= 4 * m; + } + +} + +#endif + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + BLASLONG i, j; + FLOAT *aa, *cc; + BLASLONG kk; + +#if 0 + fprintf(stderr, "TRSM RT KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + kk = n - offset; + c += n * ldc * COMPSIZE; + b += n * k * COMPSIZE; + + if (n & (GEMM_UNROLL_N - 1)) { + + j = 1; + while (j < GEMM_UNROLL_N) { + if (n & j) { + + aa = a; + b -= j * k * COMPSIZE; + c -= j * ldc* COMPSIZE; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + (kk - j) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - j) * j * COMPSIZE, + cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + do { + if (m & i) { + + if (k - kk > 0) { + GEMM_KERNEL(i, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, ldc); + } + + solve(i, j, + aa + (kk - j) * i * COMPSIZE, + b + (kk - j) * j * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + + } + i >>= 1; + } while (i > 0); + } + kk -= j; + } + j <<= 1; + } + } + + j = (n >> GEMM_UNROLL_N_SHIFT); + + if (j > 0) { + + do { + aa = a; + b -= GEMM_UNROLL_N * k * COMPSIZE; + c -= GEMM_UNROLL_N * ldc * COMPSIZE; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + do { + + dtrsm_RT_solve_opt(k - kk, aa + GEMM_UNROLL_M * kk * COMPSIZE, b + GEMM_UNROLL_N * kk * COMPSIZE, cc, ldc, + aa + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_M * COMPSIZE , b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE ); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + do { + if (m & i) { + if (k - kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(i, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_N) * i * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } while (i > 0); + } + + kk -= GEMM_UNROLL_N; + j --; + } while (j > 0); + } + + return 0; +} + + diff --git a/kernel/x86_64/strsm_kernel_LN_bulldozer.c b/kernel/x86_64/strsm_kernel_LN_bulldozer.c new file mode 100644 index 000000000..1b8991c6c --- /dev/null +++ b/kernel/x86_64/strsm_kernel_LN_bulldozer.c @@ -0,0 +1,756 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_L +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + +#ifndef COMPLEX + + + +static void strsm_LN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void strsm_LN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc ; + BLASLONG n1 = n * 8; + BLASLONG i=0; + + as += (16 - 1) * 16; + bs += (16 - 1) * 2; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorps %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorps %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorps %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorps %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorps %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorps %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorps %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorps %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 2f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " vbroadcastss (%3,%1,1), %%xmm0 \n\t" // read b + " vmovups (%2,%1,8), %%xmm4 \n\t" + " vbroadcastss 4(%3,%1,1), %%xmm1 \n\t" + " vmovups 16(%2,%1,8), %%xmm5 \n\t" + " vmovups 32(%2,%1,8), %%xmm6 \n\t" + " vmovups 48(%2,%1,8), %%xmm7 \n\t" + + " vfmaddps %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" + " vfmaddps %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + " vfmaddps %%xmm9 , %%xmm0 , %%xmm5 , %%xmm9 \n\t" + " vfmaddps %%xmm13, %%xmm1 , %%xmm5 , %%xmm13 \n\t" + " vfmaddps %%xmm10, %%xmm0 , %%xmm6 , %%xmm10 \n\t" + " vfmaddps %%xmm14, %%xmm1 , %%xmm6 , %%xmm14 \n\t" + " addq $8, %1 \n\t" + " vfmaddps %%xmm11, %%xmm0 , %%xmm7 , %%xmm11 \n\t" + " vfmaddps %%xmm15, %%xmm1 , %%xmm7 , %%xmm15 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + "2: \n\t" + + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + " vmovups 32(%4) , %%xmm2 \n\t" + " vmovups 48(%4) , %%xmm3 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + " vmovups 32(%5) , %%xmm6 \n\t" + " vmovups 48(%5) , %%xmm7 \n\t" + + " vsubps %%xmm8 , %%xmm0 , %%xmm8 \n\t" + " vsubps %%xmm9 , %%xmm1 , %%xmm9 \n\t" + " vsubps %%xmm10, %%xmm2 , %%xmm10 \n\t" + " vsubps %%xmm11, %%xmm3 , %%xmm11 \n\t" + + " vsubps %%xmm12, %%xmm4 , %%xmm12 \n\t" + " vsubps %%xmm13, %%xmm5 , %%xmm13 \n\t" + " vsubps %%xmm14, %%xmm6 , %%xmm14 \n\t" + " vsubps %%xmm15, %%xmm7 , %%xmm15 \n\t" + + "3: \n\t" + + " vbroadcastss 60(%6) , %%xmm0 \n\t" // i=15, read aa[i] + " vshufps $0xff , %%xmm11 , %%xmm11 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xff , %%xmm15 , %%xmm15 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 60(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 60(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 56(%6) , %%xmm0 \n\t" // i=14, read aa[i] + " vshufps $0xaa , %%xmm11 , %%xmm11 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xaa , %%xmm15 , %%xmm15 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 56(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 56(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 52(%6) , %%xmm0 \n\t" // i=13, read aa[i] + " vshufps $0x55 , %%xmm11 , %%xmm11 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x55 , %%xmm15 , %%xmm15 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 52(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 52(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 48(%6) , %%xmm0 \n\t" // i=12, read aa[i] + " vshufps $0x00 , %%xmm11 , %%xmm11 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x00 , %%xmm15 , %%xmm15 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 48(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 48(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 44(%6) , %%xmm0 \n\t" // i=11, read aa[i] + " vshufps $0xff , %%xmm10 , %%xmm10 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xff , %%xmm14 , %%xmm14 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 44(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 44(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 40(%6) , %%xmm0 \n\t" // i=10, read aa[i] + " vshufps $0xaa , %%xmm10 , %%xmm10 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xaa , %%xmm14 , %%xmm14 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 40(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 40(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 36(%6) , %%xmm0 \n\t" // i=9 , read aa[i] + " vshufps $0x55 , %%xmm10 , %%xmm10 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x55 , %%xmm14 , %%xmm14 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 36(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 36(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 32(%6) , %%xmm0 \n\t" // i=8 , read aa[i] + " vshufps $0x00 , %%xmm10 , %%xmm10 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x00 , %%xmm14 , %%xmm14 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 32(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 32(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 28(%6) , %%xmm0 \n\t" // i=7 , read aa[i] + " vshufps $0xff , %%xmm9 , %%xmm9 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xff , %%xmm13 , %%xmm13 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 28(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 28(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 24(%6) , %%xmm0 \n\t" // i=6 , read aa[i] + " vshufps $0xaa , %%xmm9 , %%xmm9 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xaa , %%xmm13 , %%xmm13 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 24(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 24(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 20(%6) , %%xmm0 \n\t" // i=5 , read aa[i] + " vshufps $0x55 , %%xmm9 , %%xmm9 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x55 , %%xmm13 , %%xmm13 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 20(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 20(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 16(%6) , %%xmm0 \n\t" // i=4 , read aa[i] + " vshufps $0x00 , %%xmm9 , %%xmm9 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x00 , %%xmm13 , %%xmm13 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 16(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 16(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 12(%6) , %%xmm0 \n\t" // i=3 , read aa[i] + " vshufps $0xff , %%xmm8 , %%xmm8 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xff , %%xmm12 , %%xmm12 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 12(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 12(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 8(%6) , %%xmm0 \n\t" // i=2 , read aa[i] + " vshufps $0xaa , %%xmm8 , %%xmm8 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xaa , %%xmm12 , %%xmm12 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 8(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 8(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 4(%6) , %%xmm0 \n\t" // i=1 , read aa[i] + " vshufps $0x55 , %%xmm8 , %%xmm8 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x55 , %%xmm12 , %%xmm12 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 4(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 4(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + + " subq $64 , %6 \n\t" // a -= m + " subq $8 , %7 \n\t" // b -= n + + " vbroadcastss 0(%6) , %%xmm0 \n\t" // i=0 , read aa[i] + " vshufps $0x00 , %%xmm8 , %%xmm8 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x00 , %%xmm12 , %%xmm12 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 0(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 0(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + a += (m - 1) * m; + b += (m - 1) * n; + + for (i = m - 1; i >= 0; i--) { + + aa = *(a + i); + + for (j = 0; j < n; j ++) { + bb = *(c + i + j * ldc); + bb *= aa; + *b = bb; + *(c + i + j * ldc) = bb; + b ++; + + for (k = 0; k < i; k ++){ + *(c + k + j * ldc) -= bb * *(a + k); + } + + } + a -= m; + b -= 2 * n; + } + +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + a += (m - 1) * m * 2; + b += (m - 1) * n * 2; + + for (i = m - 1; i >= 0; i--) { + + aa1 = *(a + i * 2 + 0); + aa2 = *(a + i * 2 + 1); + + for (j = 0; j < n; j ++) { + bb1 = *(c + i * 2 + 0 + j * ldc); + bb2 = *(c + i * 2 + 1 + j * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = aa1 * bb2 - aa2 * bb1; +#endif + + + *(b + 0) = cc1; + *(b + 1) = cc2; + *(c + i * 2 + 0 + j * ldc) = cc1; + *(c + i * 2 + 1 + j * ldc) = cc2; + b += 2; + + for (k = 0; k < i; k ++){ +#ifndef CONJ + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#else + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= - cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#endif + } + + } + a -= m * 2; + b -= 4 * n; + } + +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + BLASLONG i, j; + FLOAT *aa, *cc; + BLASLONG kk; + +#if 0 + fprintf(stderr, "TRSM KERNEL LN : m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + j = (n >> GEMM_UNROLL_N_SHIFT); + + while (j > 0) { + + kk = m + offset; + + if (m & (GEMM_UNROLL_M - 1)) { + for (i = 1; i < GEMM_UNROLL_M; i *= 2){ + if (m & i) { + aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE; + cc = c + ((m & ~(i - 1)) - i) * COMPSIZE; + + if (k - kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(i, GEMM_UNROLL_N, + aa + (kk - i) * i * COMPSIZE, + b + (kk - i) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + kk -= i; + } + } + } + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE; + cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE; + + do { + strsm_LN_solve_opt(k-kk, aa + GEMM_UNROLL_M * kk * COMPSIZE, b + GEMM_UNROLL_N * kk * COMPSIZE, cc, ldc, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE,b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE); + + aa -= GEMM_UNROLL_M * k * COMPSIZE; + cc -= GEMM_UNROLL_M * COMPSIZE; + kk -= GEMM_UNROLL_M; + i --; + } while (i > 0); + } + + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + kk = m + offset; + + if (m & (GEMM_UNROLL_M - 1)) { + for (i = 1; i < GEMM_UNROLL_M; i *= 2){ + if (m & i) { + aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE; + cc = c + ((m & ~(i - 1)) - i) * COMPSIZE; + + if (k - kk > 0) { + GEMM_KERNEL(i, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, ldc); + } + + solve(i, j, + aa + (kk - i) * i * COMPSIZE, + b + (kk - i) * j * COMPSIZE, + cc, ldc); + + kk -= i; + } + } + } + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE; + cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE; + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * j * COMPSIZE, + cc, ldc); + + aa -= GEMM_UNROLL_M * k * COMPSIZE; + cc -= GEMM_UNROLL_M * COMPSIZE; + kk -= GEMM_UNROLL_M; + i --; + } while (i > 0); + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/x86_64/strsm_kernel_LT_bulldozer.c b/kernel/x86_64/strsm_kernel_LT_bulldozer.c new file mode 100644 index 000000000..0623dddb0 --- /dev/null +++ b/kernel/x86_64/strsm_kernel_LT_bulldozer.c @@ -0,0 +1,739 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_L +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + + +static void strsm_LT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void strsm_LT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc ; + BLASLONG n1 = n * 8; + BLASLONG i=0; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorps %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorps %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorps %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorps %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorps %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorps %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorps %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorps %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 2f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " vbroadcastss (%3,%1,1), %%xmm0 \n\t" // read b + " vmovups (%2,%1,8), %%xmm4 \n\t" + " vbroadcastss 4(%3,%1,1), %%xmm1 \n\t" + " vmovups 16(%2,%1,8), %%xmm5 \n\t" + " vmovups 32(%2,%1,8), %%xmm6 \n\t" + " vmovups 48(%2,%1,8), %%xmm7 \n\t" + + " vfmaddps %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" + " vfmaddps %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + " vfmaddps %%xmm9 , %%xmm0 , %%xmm5 , %%xmm9 \n\t" + " vfmaddps %%xmm13, %%xmm1 , %%xmm5 , %%xmm13 \n\t" + " vfmaddps %%xmm10, %%xmm0 , %%xmm6 , %%xmm10 \n\t" + " vfmaddps %%xmm14, %%xmm1 , %%xmm6 , %%xmm14 \n\t" + " addq $8, %1 \n\t" + " vfmaddps %%xmm11, %%xmm0 , %%xmm7 , %%xmm11 \n\t" + " vfmaddps %%xmm15, %%xmm1 , %%xmm7 , %%xmm15 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + "2: \n\t" + + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + " vmovups 32(%4) , %%xmm2 \n\t" + " vmovups 48(%4) , %%xmm3 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + " vmovups 32(%5) , %%xmm6 \n\t" + " vmovups 48(%5) , %%xmm7 \n\t" + + " vsubps %%xmm8 , %%xmm0 , %%xmm8 \n\t" + " vsubps %%xmm9 , %%xmm1 , %%xmm9 \n\t" + " vsubps %%xmm10, %%xmm2 , %%xmm10 \n\t" + " vsubps %%xmm11, %%xmm3 , %%xmm11 \n\t" + + " vsubps %%xmm12, %%xmm4 , %%xmm12 \n\t" + " vsubps %%xmm13, %%xmm5 , %%xmm13 \n\t" + " vsubps %%xmm14, %%xmm6 , %%xmm14 \n\t" + " vsubps %%xmm15, %%xmm7 , %%xmm15 \n\t" + + "3: \n\t" + + " vbroadcastss 0(%6) , %%xmm0 \n\t" // i=0, read aa[i] + " vshufps $0x00 , %%xmm8 , %%xmm8 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x00 , %%xmm12 , %%xmm12 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 0(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 0(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 4(%6) , %%xmm0 \n\t" // i=1, read aa[i] + " vshufps $0x55 , %%xmm8 , %%xmm8 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x55 , %%xmm12 , %%xmm12 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 4(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 4(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 8(%6) , %%xmm0 \n\t" // i=2, read aa[i] + " vshufps $0xaa , %%xmm8 , %%xmm8 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xaa , %%xmm12 , %%xmm12 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 8(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 8(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 0(%6) , %%xmm4 \n\t" // read a[k] + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm8 , %%xmm1 , %%xmm4 , %%xmm8 \n\t" + " vfnmaddps %%xmm12 , %%xmm2 , %%xmm4 , %%xmm12 \n\t" + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 12(%6) , %%xmm0 \n\t" // i=3, read aa[i] + " vshufps $0xff , %%xmm8 , %%xmm8 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xff , %%xmm12 , %%xmm12 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 12(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 12(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 16(%6) , %%xmm0 \n\t" // i=4, read aa[i] + " vshufps $0x00 , %%xmm9 , %%xmm9 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x00 , %%xmm13 , %%xmm13 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 16(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 16(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 20(%6) , %%xmm0 \n\t" // i=5, read aa[i] + " vshufps $0x55 , %%xmm9 , %%xmm9 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x55 , %%xmm13 , %%xmm13 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 20(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 20(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 24(%6) , %%xmm0 \n\t" // i=6, read aa[i] + " vshufps $0xaa , %%xmm9 , %%xmm9 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xaa , %%xmm13 , %%xmm13 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 24(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 24(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 16(%6) , %%xmm5 \n\t" // read a[k] + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm9 , %%xmm1 , %%xmm5 , %%xmm9 \n\t" + " vfnmaddps %%xmm13 , %%xmm2 , %%xmm5 , %%xmm13 \n\t" + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 28(%6) , %%xmm0 \n\t" // i=7, read aa[i] + " vshufps $0xff , %%xmm9 , %%xmm9 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xff , %%xmm13 , %%xmm13 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 28(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 28(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 32(%6) , %%xmm0 \n\t" // i=8, read aa[i] + " vshufps $0x00 , %%xmm10 , %%xmm10 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x00 , %%xmm14 , %%xmm14 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 32(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 32(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 36(%6) , %%xmm0 \n\t" // i=9, read aa[i] + " vshufps $0x55 , %%xmm10 , %%xmm10 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x55 , %%xmm14 , %%xmm14 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 36(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 36(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 40(%6) , %%xmm0 \n\t" // i=10, read aa[i] + " vshufps $0xaa , %%xmm10 , %%xmm10 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xaa , %%xmm14 , %%xmm14 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 40(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 40(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 32(%6) , %%xmm6 \n\t" // read a[k] + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm10 , %%xmm1 , %%xmm6 , %%xmm10 \n\t" + " vfnmaddps %%xmm14 , %%xmm2 , %%xmm6 , %%xmm14 \n\t" + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 44(%6) , %%xmm0 \n\t" // i=11, read aa[i] + " vshufps $0xff , %%xmm10 , %%xmm10 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xff , %%xmm14 , %%xmm14 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 44(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 44(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 48(%6) , %%xmm0 \n\t" // i=12, read aa[i] + " vshufps $0x00 , %%xmm11 , %%xmm11 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x00 , %%xmm15 , %%xmm15 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 48(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 48(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 52(%6) , %%xmm0 \n\t" // i=13, read aa[i] + " vshufps $0x55 , %%xmm11 , %%xmm11 , %%xmm1 \n\t" // extract bb0 + " vshufps $0x55 , %%xmm15 , %%xmm15 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 52(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 52(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 56(%6) , %%xmm0 \n\t" // i=14, read aa[i] + " vshufps $0xaa , %%xmm11 , %%xmm11 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xaa , %%xmm15 , %%xmm15 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 56(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 56(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vmovups 48(%6) , %%xmm7 \n\t" // read a[k] + " vfnmaddps %%xmm11 , %%xmm1 , %%xmm7 , %%xmm11 \n\t" + " vfnmaddps %%xmm15 , %%xmm2 , %%xmm7 , %%xmm15 \n\t" + + " addq $64 , %6 \n\t" // a -= m + " addq $8 , %7 \n\t" // b -= n + + " vbroadcastss 60(%6) , %%xmm0 \n\t" // i=15, read aa[i] + " vshufps $0xff , %%xmm11 , %%xmm11 , %%xmm1 \n\t" // extract bb0 + " vshufps $0xff , %%xmm15 , %%xmm15 , %%xmm2 \n\t" // extract bb1 + " vmulps %%xmm0 , %%xmm1 , %%xmm1 \n\t" // bb0 * aa + " vmulps %%xmm0 , %%xmm2 , %%xmm2 \n\t" // bb1 * aa + " vmovss %%xmm1 , 60(%4) \n\t" // c[i] = bb0 * aa + " vmovss %%xmm2 , 60(%5) \n\t" // c[i] = bb1 * aa + " vmovss %%xmm1 , (%7) \n\t" // b[0] = bb0 * aa + " vmovss %%xmm2 , 4(%7) \n\t" // b[1] = bb1 * aa + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + for (i = 0; i < m; i++) { + + aa = *(a + i); + + for (j = 0; j < n; j ++) { + bb = *(c + i + j * ldc); + bb *= aa; + *b = bb; + *(c + i + j * ldc) = bb; + b ++; + + for (k = i + 1; k < m; k ++){ + *(c + k + j * ldc) -= bb * *(a + k); + } + + } + a += m; + } +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + for (i = 0; i < m; i++) { + + aa1 = *(a + i * 2 + 0); + aa2 = *(a + i * 2 + 1); + + for (j = 0; j < n; j ++) { + bb1 = *(c + i * 2 + 0 + j * ldc); + bb2 = *(c + i * 2 + 1 + j * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = aa1 * bb2 - aa2 * bb1; +#endif + + *(b + 0) = cc1; + *(b + 1) = cc2; + *(c + i * 2 + 0 + j * ldc) = cc1; + *(c + i * 2 + 1 + j * ldc) = cc2; + b += 2; + + for (k = i + 1; k < m; k ++){ +#ifndef CONJ + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#else + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= -cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#endif + } + + } + a += m * 2; + } +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + FLOAT *aa, *cc; + BLASLONG kk; + BLASLONG i, j, jj; + +#if 0 + fprintf(stderr, "TRSM KERNEL LT : m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + jj = 0; + + j = (n >> GEMM_UNROLL_N_SHIFT); + + while (j > 0) { + + kk = offset; + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + + strsm_LT_solve_opt(kk , aa , b , cc, ldc, aa + kk * GEMM_UNROLL_M * COMPSIZE, b + kk * GEMM_UNROLL_N * COMPSIZE); + + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + kk += GEMM_UNROLL_M; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + solve(i, GEMM_UNROLL_N, + aa + kk * i * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + kk += i; + } + i >>= 1; + } + } + + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + jj += GEMM_UNROLL_M; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + kk = offset; + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + kk += GEMM_UNROLL_M; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(i, j, + aa + kk * i * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + kk += i; + } + i >>= 1; + } + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/x86_64/strsm_kernel_RN_bulldozer.c b/kernel/x86_64/strsm_kernel_RN_bulldozer.c new file mode 100644 index 000000000..4cc557d55 --- /dev/null +++ b/kernel/x86_64/strsm_kernel_RN_bulldozer.c @@ -0,0 +1,454 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_R +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + + +static void strsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void strsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc ; + BLASLONG n1 = n * 8; + BLASLONG i=0; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorps %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorps %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorps %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorps %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorps %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorps %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorps %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorps %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 2f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " vbroadcastss (%3,%1,1), %%xmm0 \n\t" // read b + " vmovups (%2,%1,8), %%xmm4 \n\t" + " vbroadcastss 4(%3,%1,1), %%xmm1 \n\t" + " vmovups 16(%2,%1,8), %%xmm5 \n\t" + " vmovups 32(%2,%1,8), %%xmm6 \n\t" + " vmovups 48(%2,%1,8), %%xmm7 \n\t" + + " vfmaddps %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" + " vfmaddps %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + " vfmaddps %%xmm9 , %%xmm0 , %%xmm5 , %%xmm9 \n\t" + " vfmaddps %%xmm13, %%xmm1 , %%xmm5 , %%xmm13 \n\t" + " vfmaddps %%xmm10, %%xmm0 , %%xmm6 , %%xmm10 \n\t" + " vfmaddps %%xmm14, %%xmm1 , %%xmm6 , %%xmm14 \n\t" + " addq $8, %1 \n\t" + " vfmaddps %%xmm11, %%xmm0 , %%xmm7 , %%xmm11 \n\t" + " vfmaddps %%xmm15, %%xmm1 , %%xmm7 , %%xmm15 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + "2: \n\t" + + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + " vmovups 32(%4) , %%xmm2 \n\t" + " vmovups 48(%4) , %%xmm3 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + " vmovups 32(%5) , %%xmm6 \n\t" + " vmovups 48(%5) , %%xmm7 \n\t" + + " vsubps %%xmm8 , %%xmm0 , %%xmm8 \n\t" + " vsubps %%xmm9 , %%xmm1 , %%xmm9 \n\t" + " vsubps %%xmm10, %%xmm2 , %%xmm10 \n\t" + " vsubps %%xmm11, %%xmm3 , %%xmm11 \n\t" + + " vsubps %%xmm12, %%xmm4 , %%xmm12 \n\t" + " vsubps %%xmm13, %%xmm5 , %%xmm13 \n\t" + " vsubps %%xmm14, %%xmm6 , %%xmm14 \n\t" + " vsubps %%xmm15, %%xmm7 , %%xmm15 \n\t" + + "3: \n\t" // i = 0 + + " vbroadcastss (%7), %%xmm0 \n\t" // read bb + " vbroadcastss 4(%7), %%xmm1 \n\t" // read b + + " vmulps %%xmm8 , %%xmm0 , %%xmm8 \n\t" // aa * bb + " vmulps %%xmm9 , %%xmm0 , %%xmm9 \n\t" + " vmulps %%xmm10 , %%xmm0 , %%xmm10 \n\t" + " vmulps %%xmm11 , %%xmm0 , %%xmm11 \n\t" + + " vmovups %%xmm8 , (%6) \n\t" // write a + " vmovups %%xmm9 , 16(%6) \n\t" + " vmovups %%xmm10 , 32(%6) \n\t" + " vmovups %%xmm11 , 48(%6) \n\t" + + " vmovups %%xmm8 , (%4) \n\t" // write c0 + " vmovups %%xmm9 , 16(%4) \n\t" + " vmovups %%xmm10 , 32(%4) \n\t" + " vmovups %%xmm11 , 48(%4) \n\t" + + " vfnmaddps %%xmm12 , %%xmm8 , %%xmm1 , %%xmm12 \n\t" // c = c - aa * b + " vfnmaddps %%xmm13 , %%xmm9 , %%xmm1 , %%xmm13 \n\t" + " vfnmaddps %%xmm14 , %%xmm10 , %%xmm1 , %%xmm14 \n\t" + " vfnmaddps %%xmm15 , %%xmm11 , %%xmm1 , %%xmm15 \n\t" + + " \n\t" // i = 1 + " addq $8 , %7 \n\t" // b = b + 2 + " addq $64 , %6 \n\t" // a = a + 16 + + " vbroadcastss 4(%7), %%xmm0 \n\t" // read bb + + " vmulps %%xmm12 , %%xmm0 , %%xmm12 \n\t" // aa * bb + " vmulps %%xmm13 , %%xmm0 , %%xmm13 \n\t" // aa * bb + " vmulps %%xmm14 , %%xmm0 , %%xmm14 \n\t" // aa * bb + " vmulps %%xmm15 , %%xmm0 , %%xmm15 \n\t" // aa * bb + + " vmovups %%xmm12 , (%6) \n\t" // write a + " vmovups %%xmm13 , 16(%6) \n\t" // write a + " vmovups %%xmm14 , 32(%6) \n\t" // write a + " vmovups %%xmm15 , 48(%6) \n\t" // write a + + " vmovups %%xmm12 , (%5) \n\t" // write c1 + " vmovups %%xmm13 , 16(%5) \n\t" + " vmovups %%xmm14 , 32(%5) \n\t" + " vmovups %%xmm15 , 48(%5) \n\t" + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + for (i = 0; i < n; i++) { + + bb = *(b + i); + + for (j = 0; j < m; j ++) { + aa = *(c + j + i * ldc); + aa *= bb; + *a = aa; + *(c + j + i * ldc) = aa; + a ++; + + for (k = i + 1; k < n; k ++){ + *(c + j + k * ldc) -= aa * *(b + k); + } + + } + b += n; + } +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + for (i = 0; i < n; i++) { + + bb1 = *(b + i * 2 + 0); + bb2 = *(b + i * 2 + 1); + + for (j = 0; j < m; j ++) { + aa1 = *(c + j * 2 + 0 + i * ldc); + aa2 = *(c + j * 2 + 1 + i * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = -aa1 * bb2 + aa2 * bb1; +#endif + + *(a + 0) = cc1; + *(a + 1) = cc2; + *(c + j * 2 + 0 + i * ldc) = cc1; + *(c + j * 2 + 1 + i * ldc) = cc2; + a += 2; + + for (k = i + 1; k < n; k ++){ +#ifndef CONJ + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#else + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= - cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#endif + } + + } + b += n * 2; + } +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + FLOAT *aa, *cc; + BLASLONG kk; + BLASLONG i, j, jj; + +#if 0 + fprintf(stderr, "TRSM RN KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + jj = 0; + j = (n >> GEMM_UNROLL_N_SHIFT); + kk = -offset; + + while (j > 0) { + + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + if (i > 0) { + do { + + strsm_RN_solve_opt(kk, aa, b, cc, ldc, aa + kk * GEMM_UNROLL_M * COMPSIZE, b + kk * GEMM_UNROLL_N * COMPSIZE); + + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + solve(i, GEMM_UNROLL_N, + aa + kk * i * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } + } + + kk += GEMM_UNROLL_N; + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + jj += GEMM_UNROLL_M; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(i, j, + aa + kk * i * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + kk += j; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/x86_64/strsm_kernel_RT_bulldozer.c b/kernel/x86_64/strsm_kernel_RT_bulldozer.c new file mode 100644 index 000000000..73f6e8a95 --- /dev/null +++ b/kernel/x86_64/strsm_kernel_RT_bulldozer.c @@ -0,0 +1,481 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_R +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + + +static void strsm_RT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void strsm_RT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc ; + BLASLONG n1 = n * 8; + BLASLONG i=0; + + as += (2 - 1) * 16; + bs += (2 - 1) * 2; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorps %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorps %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorps %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorps %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorps %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorps %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorps %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorps %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 2f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " vbroadcastss (%3,%1,1), %%xmm0 \n\t" // read b + " vmovups (%2,%1,8), %%xmm4 \n\t" + " vbroadcastss 4(%3,%1,1), %%xmm1 \n\t" + " vmovups 16(%2,%1,8), %%xmm5 \n\t" + " vmovups 32(%2,%1,8), %%xmm6 \n\t" + " vmovups 48(%2,%1,8), %%xmm7 \n\t" + + " vfmaddps %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" + " vfmaddps %%xmm12, %%xmm1 , %%xmm4 , %%xmm12 \n\t" + " vfmaddps %%xmm9 , %%xmm0 , %%xmm5 , %%xmm9 \n\t" + " vfmaddps %%xmm13, %%xmm1 , %%xmm5 , %%xmm13 \n\t" + " vfmaddps %%xmm10, %%xmm0 , %%xmm6 , %%xmm10 \n\t" + " vfmaddps %%xmm14, %%xmm1 , %%xmm6 , %%xmm14 \n\t" + " addq $8, %1 \n\t" + " vfmaddps %%xmm11, %%xmm0 , %%xmm7 , %%xmm11 \n\t" + " vfmaddps %%xmm15, %%xmm1 , %%xmm7 , %%xmm15 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + "2: \n\t" + + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + " vmovups 32(%4) , %%xmm2 \n\t" + " vmovups 48(%4) , %%xmm3 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + " vmovups 32(%5) , %%xmm6 \n\t" + " vmovups 48(%5) , %%xmm7 \n\t" + + " vsubps %%xmm8 , %%xmm0 , %%xmm8 \n\t" + " vsubps %%xmm9 , %%xmm1 , %%xmm9 \n\t" + " vsubps %%xmm10, %%xmm2 , %%xmm10 \n\t" + " vsubps %%xmm11, %%xmm3 , %%xmm11 \n\t" + + " vsubps %%xmm12, %%xmm4 , %%xmm12 \n\t" + " vsubps %%xmm13, %%xmm5 , %%xmm13 \n\t" + " vsubps %%xmm14, %%xmm6 , %%xmm14 \n\t" + " vsubps %%xmm15, %%xmm7 , %%xmm15 \n\t" + + "3: \n\t" // i = 1 + + " vbroadcastss (%7), %%xmm1 \n\t" // read b + " vbroadcastss 4(%7), %%xmm0 \n\t" // read bb + + " vmulps %%xmm12 , %%xmm0 , %%xmm12 \n\t" // aa * bb + " vmulps %%xmm13 , %%xmm0 , %%xmm13 \n\t" // aa * bb + " vmulps %%xmm14 , %%xmm0 , %%xmm14 \n\t" // aa * bb + " vmulps %%xmm15 , %%xmm0 , %%xmm15 \n\t" // aa * bb + + " vmovups %%xmm12 , (%6) \n\t" // write a + " vmovups %%xmm13 , 16(%6) \n\t" // write a + " vmovups %%xmm14 , 32(%6) \n\t" // write a + " vmovups %%xmm15 , 48(%6) \n\t" // write a + + " vmovups %%xmm12 , (%5) \n\t" // write c1 + " vmovups %%xmm13 , 16(%5) \n\t" + " vmovups %%xmm14 , 32(%5) \n\t" + " vmovups %%xmm15 , 48(%5) \n\t" + + " vfnmaddps %%xmm8 , %%xmm12 , %%xmm1 , %%xmm8 \n\t" // c = c - aa * b + " vfnmaddps %%xmm9 , %%xmm13 , %%xmm1 , %%xmm9 \n\t" + " vfnmaddps %%xmm10 , %%xmm14 , %%xmm1 , %%xmm10 \n\t" + " vfnmaddps %%xmm11 , %%xmm15 , %%xmm1 , %%xmm11 \n\t" + + " \n\t" // i = 0 + " subq $8 , %7 \n\t" // b = b - 2 + " subq $64 , %6 \n\t" // a = a - 16 + + " vbroadcastss (%7), %%xmm0 \n\t" // read bb + + " vmulps %%xmm8 , %%xmm0 , %%xmm8 \n\t" // aa * bb + " vmulps %%xmm9 , %%xmm0 , %%xmm9 \n\t" + " vmulps %%xmm10 , %%xmm0 , %%xmm10 \n\t" + " vmulps %%xmm11 , %%xmm0 , %%xmm11 \n\t" + + " vmovups %%xmm8 , (%6) \n\t" // write a + " vmovups %%xmm9 , 16(%6) \n\t" + " vmovups %%xmm10 , 32(%6) \n\t" + " vmovups %%xmm11 , 48(%6) \n\t" + + " vmovups %%xmm8 , (%4) \n\t" // write c0 + " vmovups %%xmm9 , 16(%4) \n\t" + " vmovups %%xmm10 , 32(%4) \n\t" + " vmovups %%xmm11 , 48(%4) \n\t" + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + a += (n - 1) * m; + b += (n - 1) * n; + + for (i = n - 1; i >= 0; i--) { + + bb = *(b + i); + + for (j = 0; j < m; j ++) { + aa = *(c + j + i * ldc); + aa *= bb; + *a = aa; + *(c + j + i * ldc) = aa; + a ++; + + for (k = 0; k < i; k ++){ + *(c + j + k * ldc) -= aa * *(b + k); + } + + } + b -= n; + a -= 2 * m; + } + +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + a += (n - 1) * m * 2; + b += (n - 1) * n * 2; + + for (i = n - 1; i >= 0; i--) { + + bb1 = *(b + i * 2 + 0); + bb2 = *(b + i * 2 + 1); + + for (j = 0; j < m; j ++) { + + aa1 = *(c + j * 2 + 0 + i * ldc); + aa2 = *(c + j * 2 + 1 + i * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = - aa1 * bb2 + aa2 * bb1; +#endif + + *(a + 0) = cc1; + *(a + 1) = cc2; + + *(c + j * 2 + 0 + i * ldc) = cc1; + *(c + j * 2 + 1 + i * ldc) = cc2; + a += 2; + + for (k = 0; k < i; k ++){ +#ifndef CONJ + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#else + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= -cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#endif + } + + } + b -= n * 2; + a -= 4 * m; + } + +} + +#endif + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + BLASLONG i, j; + FLOAT *aa, *cc; + BLASLONG kk; + +#if 0 + fprintf(stderr, "TRSM RT KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + kk = n - offset; + c += n * ldc * COMPSIZE; + b += n * k * COMPSIZE; + + if (n & (GEMM_UNROLL_N - 1)) { + + j = 1; + while (j < GEMM_UNROLL_N) { + if (n & j) { + + aa = a; + b -= j * k * COMPSIZE; + c -= j * ldc* COMPSIZE; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + (kk - j) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - j) * j * COMPSIZE, + cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + do { + if (m & i) { + + if (k - kk > 0) { + GEMM_KERNEL(i, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, ldc); + } + + solve(i, j, + aa + (kk - j) * i * COMPSIZE, + b + (kk - j) * j * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + + } + i >>= 1; + } while (i > 0); + } + kk -= j; + } + j <<= 1; + } + } + + j = (n >> GEMM_UNROLL_N_SHIFT); + + if (j > 0) { + + do { + aa = a; + b -= GEMM_UNROLL_N * k * COMPSIZE; + c -= GEMM_UNROLL_N * ldc * COMPSIZE; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + do { + + strsm_RT_solve_opt(k - kk, aa + GEMM_UNROLL_M * kk * COMPSIZE, b + GEMM_UNROLL_N * kk * COMPSIZE, cc, ldc, + aa + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_M * COMPSIZE , b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE ); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + do { + if (m & i) { + if (k - kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(i, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_N) * i * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } while (i > 0); + } + + kk -= GEMM_UNROLL_N; + j --; + } while (j > 0); + } + + return 0; +} + + diff --git a/kernel/x86_64/ztrsm_kernel_LN_bulldozer.c b/kernel/x86_64/ztrsm_kernel_LN_bulldozer.c new file mode 100644 index 000000000..317ffbb94 --- /dev/null +++ b/kernel/x86_64/ztrsm_kernel_LN_bulldozer.c @@ -0,0 +1,497 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_L +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + + +#ifndef CONJ + +static void ztrsm_LN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void ztrsm_LN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc*2 ; + BLASLONG n1 = n * 4; + BLASLONG i=0; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorpd %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorpd %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorpd %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorpd %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorpd %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorpd %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorpd %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorpd %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 3f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " prefetcht0 256(%3,%1,8) \n\t" + " prefetcht0 256(%2,%1,8) \n\t" + " vmovddup (%3,%1,8), %%xmm0 \n\t" // b0 real, b0 real + " vmovddup 8(%3,%1,8), %%xmm1 \n\t" // b0 imag, b0 imag + " vmovups (%2,%1,8), %%xmm4 \n\t" // a0 real , a0 imag + " vmovups 16(%2,%1,8), %%xmm5 \n\t" // a1 real , a1 imag + " vmovddup 16(%3,%1,8), %%xmm2 \n\t" // b1 real, b1 real + " vmovddup 24(%3,%1,8), %%xmm3 \n\t" // b1 imag, b1 imag + + " vfnmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm9 , %%xmm1 , %%xmm4 , %%xmm9 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm10, %%xmm0 , %%xmm5 , %%xmm10 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm11, %%xmm1 , %%xmm5 , %%xmm11 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm12, %%xmm2 , %%xmm4 , %%xmm12 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm13, %%xmm3 , %%xmm4 , %%xmm13 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm14, %%xmm2 , %%xmm5 , %%xmm14 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm15, %%xmm3 , %%xmm5 , %%xmm15 \n\t" // a_real * b_imag , a_imag * b_imag + + " addq $4, %1 \n\t" + " cmpq %1, %0 \n\t" + + " jz 2f \n\t" + + " vmovddup (%3,%1,8), %%xmm0 \n\t" // b0 real, b0 real + " vmovddup 8(%3,%1,8), %%xmm1 \n\t" // b0 imag, b0 imag + " vmovups (%2,%1,8), %%xmm4 \n\t" // a0 real , a0 imag + " vmovups 16(%2,%1,8), %%xmm5 \n\t" // a1 real , a1 imag + " vmovddup 16(%3,%1,8), %%xmm2 \n\t" // b1 real, b1 real + " vmovddup 24(%3,%1,8), %%xmm3 \n\t" // b1 imag, b1 imag + + " vfnmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm9 , %%xmm1 , %%xmm4 , %%xmm9 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm10, %%xmm0 , %%xmm5 , %%xmm10 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm11, %%xmm1 , %%xmm5 , %%xmm11 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm12, %%xmm2 , %%xmm4 , %%xmm12 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm13, %%xmm3 , %%xmm4 , %%xmm13 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm14, %%xmm2 , %%xmm5 , %%xmm14 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm15, %%xmm3 , %%xmm5 , %%xmm15 \n\t" // a_real * b_imag , a_imag * b_imag + + " addq $4, %1 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + + "2: \n\t" + + " vshufpd $0x01 , %%xmm9 , %%xmm9, %%xmm9 \n\t" + " vshufpd $0x01 , %%xmm11 , %%xmm11 , %%xmm11 \n\t" + " vshufpd $0x01 , %%xmm13 , %%xmm13 , %%xmm13 \n\t" + " vshufpd $0x01 , %%xmm15 , %%xmm15 , %%xmm15 \n\t" + + " vaddsubpd %%xmm8 , %%xmm9 , %%xmm8 \n\t" + " vaddsubpd %%xmm10, %%xmm11, %%xmm10 \n\t" + " vaddsubpd %%xmm12, %%xmm13, %%xmm12 \n\t" + " vaddsubpd %%xmm14, %%xmm15, %%xmm14 \n\t" + + " vxorpd %%xmm7 , %%xmm7 , %%xmm7 \n\t" + + " vaddsubpd %%xmm8 , %%xmm7 , %%xmm8 \n\t" + " vaddsubpd %%xmm10, %%xmm7 , %%xmm10 \n\t" + " vaddsubpd %%xmm12, %%xmm7 , %%xmm12 \n\t" + " vaddsubpd %%xmm14, %%xmm7 , %%xmm14 \n\t" + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + + " vaddpd %%xmm0 , %%xmm8 , %%xmm8 \n\t" + " vaddpd %%xmm1 , %%xmm10, %%xmm10 \n\t" + " vaddpd %%xmm4 , %%xmm12, %%xmm12 \n\t" + " vaddpd %%xmm5 , %%xmm14, %%xmm14 \n\t" + + " vmovups %%xmm8 , (%4) \n\t" + " vmovups %%xmm10 ,16(%4) \n\t" + + " vmovups %%xmm12 , (%5) \n\t" + " vmovups %%xmm14 ,16(%5) \n\t" + + "3: \n\t" + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +#endif + + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + a += (m - 1) * m; + b += (m - 1) * n; + + for (i = m - 1; i >= 0; i--) { + + aa = *(a + i); + + for (j = 0; j < n; j ++) { + bb = *(c + i + j * ldc); + bb *= aa; + *b = bb; + *(c + i + j * ldc) = bb; + b ++; + + for (k = 0; k < i; k ++){ + *(c + k + j * ldc) -= bb * *(a + k); + } + + } + a -= m; + b -= 2 * n; + } + +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + a += (m - 1) * m * 2; + b += (m - 1) * n * 2; + + for (i = m - 1; i >= 0; i--) { + + aa1 = *(a + i * 2 + 0); + aa2 = *(a + i * 2 + 1); + + for (j = 0; j < n; j ++) { + bb1 = *(c + i * 2 + 0 + j * ldc); + bb2 = *(c + i * 2 + 1 + j * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = aa1 * bb2 - aa2 * bb1; +#endif + + + *(b + 0) = cc1; + *(b + 1) = cc2; + *(c + i * 2 + 0 + j * ldc) = cc1; + *(c + i * 2 + 1 + j * ldc) = cc2; + b += 2; + + for (k = 0; k < i; k ++){ +#ifndef CONJ + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#else + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= - cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#endif + } + + } + a -= m * 2; + b -= 4 * n; + } + +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + BLASLONG i, j; + FLOAT *aa, *cc; + BLASLONG kk; + +#if 0 + fprintf(stderr, "TRSM KERNEL LN : m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + j = (n >> GEMM_UNROLL_N_SHIFT); + + while (j > 0) { + + kk = m + offset; + + if (m & (GEMM_UNROLL_M - 1)) { + for (i = 1; i < GEMM_UNROLL_M; i *= 2){ + if (m & i) { + aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE; + cc = c + ((m & ~(i - 1)) - i) * COMPSIZE; + + if (k - kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(i, GEMM_UNROLL_N, + aa + (kk - i) * i * COMPSIZE, + b + (kk - i) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + kk -= i; + } + } + } + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE; + cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE; + + do { + +#ifdef CONJ + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +#else + + ztrsm_LN_solve_opt(k-kk, aa + GEMM_UNROLL_M * kk * COMPSIZE, b + GEMM_UNROLL_N * kk * COMPSIZE, cc, ldc, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE); + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +#endif + + aa -= GEMM_UNROLL_M * k * COMPSIZE; + cc -= GEMM_UNROLL_M * COMPSIZE; + kk -= GEMM_UNROLL_M; + i --; + } while (i > 0); + } + + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + kk = m + offset; + + if (m & (GEMM_UNROLL_M - 1)) { + for (i = 1; i < GEMM_UNROLL_M; i *= 2){ + if (m & i) { + aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE; + cc = c + ((m & ~(i - 1)) - i) * COMPSIZE; + + if (k - kk > 0) { + GEMM_KERNEL(i, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, ldc); + } + + solve(i, j, + aa + (kk - i) * i * COMPSIZE, + b + (kk - i) * j * COMPSIZE, + cc, ldc); + + kk -= i; + } + } + } + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE; + cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE; + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_M) * j * COMPSIZE, + cc, ldc); + + aa -= GEMM_UNROLL_M * k * COMPSIZE; + cc -= GEMM_UNROLL_M * COMPSIZE; + kk -= GEMM_UNROLL_M; + i --; + } while (i > 0); + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/x86_64/ztrsm_kernel_LT_bulldozer.c b/kernel/x86_64/ztrsm_kernel_LT_bulldozer.c new file mode 100644 index 000000000..f240887a1 --- /dev/null +++ b/kernel/x86_64/ztrsm_kernel_LT_bulldozer.c @@ -0,0 +1,480 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_L +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + + +#ifndef CONJ + +static void ztrsm_LT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void ztrsm_LT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc*2 ; + BLASLONG n1 = n * 4; + BLASLONG i=0; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorpd %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorpd %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorpd %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorpd %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorpd %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorpd %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorpd %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorpd %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 3f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " prefetcht0 256(%3,%1,8) \n\t" + " prefetcht0 256(%2,%1,8) \n\t" + " vmovddup (%3,%1,8), %%xmm0 \n\t" // b0 real, b0 real + " vmovddup 8(%3,%1,8), %%xmm1 \n\t" // b0 imag, b0 imag + " vmovups (%2,%1,8), %%xmm4 \n\t" // a0 real , a0 imag + " vmovups 16(%2,%1,8), %%xmm5 \n\t" // a1 real , a1 imag + " vmovddup 16(%3,%1,8), %%xmm2 \n\t" // b1 real, b1 real + " vmovddup 24(%3,%1,8), %%xmm3 \n\t" // b1 imag, b1 imag + + " vfnmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm9 , %%xmm1 , %%xmm4 , %%xmm9 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm10, %%xmm0 , %%xmm5 , %%xmm10 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm11, %%xmm1 , %%xmm5 , %%xmm11 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm12, %%xmm2 , %%xmm4 , %%xmm12 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm13, %%xmm3 , %%xmm4 , %%xmm13 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm14, %%xmm2 , %%xmm5 , %%xmm14 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm15, %%xmm3 , %%xmm5 , %%xmm15 \n\t" // a_real * b_imag , a_imag * b_imag + + " addq $4, %1 \n\t" + " cmpq %1, %0 \n\t" + + " jz 2f \n\t" + + " vmovddup (%3,%1,8), %%xmm0 \n\t" // b0 real, b0 real + " vmovddup 8(%3,%1,8), %%xmm1 \n\t" // b0 imag, b0 imag + " vmovups (%2,%1,8), %%xmm4 \n\t" // a0 real , a0 imag + " vmovups 16(%2,%1,8), %%xmm5 \n\t" // a1 real , a1 imag + " vmovddup 16(%3,%1,8), %%xmm2 \n\t" // b1 real, b1 real + " vmovddup 24(%3,%1,8), %%xmm3 \n\t" // b1 imag, b1 imag + + " vfnmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm9 , %%xmm1 , %%xmm4 , %%xmm9 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm10, %%xmm0 , %%xmm5 , %%xmm10 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm11, %%xmm1 , %%xmm5 , %%xmm11 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm12, %%xmm2 , %%xmm4 , %%xmm12 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm13, %%xmm3 , %%xmm4 , %%xmm13 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm14, %%xmm2 , %%xmm5 , %%xmm14 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm15, %%xmm3 , %%xmm5 , %%xmm15 \n\t" // a_real * b_imag , a_imag * b_imag + + " addq $4, %1 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + + "2: \n\t" + + " vshufpd $0x01 , %%xmm9 , %%xmm9, %%xmm9 \n\t" + " vshufpd $0x01 , %%xmm11 , %%xmm11 , %%xmm11 \n\t" + " vshufpd $0x01 , %%xmm13 , %%xmm13 , %%xmm13 \n\t" + " vshufpd $0x01 , %%xmm15 , %%xmm15 , %%xmm15 \n\t" + + " vaddsubpd %%xmm8 , %%xmm9 , %%xmm8 \n\t" + " vaddsubpd %%xmm10, %%xmm11, %%xmm10 \n\t" + " vaddsubpd %%xmm12, %%xmm13, %%xmm12 \n\t" + " vaddsubpd %%xmm14, %%xmm15, %%xmm14 \n\t" + + " vxorpd %%xmm7 , %%xmm7 , %%xmm7 \n\t" + + " vaddsubpd %%xmm8 , %%xmm7 , %%xmm8 \n\t" + " vaddsubpd %%xmm10, %%xmm7 , %%xmm10 \n\t" + " vaddsubpd %%xmm12, %%xmm7 , %%xmm12 \n\t" + " vaddsubpd %%xmm14, %%xmm7 , %%xmm14 \n\t" + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + + " vaddpd %%xmm0 , %%xmm8 , %%xmm8 \n\t" + " vaddpd %%xmm1 , %%xmm10, %%xmm10 \n\t" + " vaddpd %%xmm4 , %%xmm12, %%xmm12 \n\t" + " vaddpd %%xmm5 , %%xmm14, %%xmm14 \n\t" + + " vmovups %%xmm8 , (%4) \n\t" + " vmovups %%xmm10 ,16(%4) \n\t" + + " vmovups %%xmm12 , (%5) \n\t" + " vmovups %%xmm14 ,16(%5) \n\t" + + "3: \n\t" + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +#endif + + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + for (i = 0; i < m; i++) { + + aa = *(a + i); + + for (j = 0; j < n; j ++) { + bb = *(c + i + j * ldc); + bb *= aa; + *b = bb; + *(c + i + j * ldc) = bb; + b ++; + + for (k = i + 1; k < m; k ++){ + *(c + k + j * ldc) -= bb * *(a + k); + } + + } + a += m; + } +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + for (i = 0; i < m; i++) { + + aa1 = *(a + i * 2 + 0); + aa2 = *(a + i * 2 + 1); + + for (j = 0; j < n; j ++) { + bb1 = *(c + i * 2 + 0 + j * ldc); + bb2 = *(c + i * 2 + 1 + j * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = aa1 * bb2 - aa2 * bb1; +#endif + + *(b + 0) = cc1; + *(b + 1) = cc2; + *(c + i * 2 + 0 + j * ldc) = cc1; + *(c + i * 2 + 1 + j * ldc) = cc2; + b += 2; + + for (k = i + 1; k < m; k ++){ +#ifndef CONJ + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#else + *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1); + *(c + k * 2 + 1 + j * ldc) -= -cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0); +#endif + } + + } + a += m * 2; + } +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + FLOAT *aa, *cc; + BLASLONG kk; + BLASLONG i, j, jj; + +#if 0 + fprintf(stderr, "TRSM KERNEL LT : m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + jj = 0; + + j = (n >> GEMM_UNROLL_N_SHIFT); + + while (j > 0) { + + kk = offset; + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + +#ifdef CONJ + + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +#else + + ztrsm_LT_solve_opt(kk, aa, b, cc, ldc, aa + kk * GEMM_UNROLL_M * COMPSIZE, b + kk * GEMM_UNROLL_N * COMPSIZE); + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +#endif + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + kk += GEMM_UNROLL_M; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + solve(i, GEMM_UNROLL_N, + aa + kk * i * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + kk += i; + } + i >>= 1; + } + } + + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + jj += GEMM_UNROLL_M; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + kk = offset; + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + kk += GEMM_UNROLL_M; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(i, j, + aa + kk * i * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + kk += i; + } + i >>= 1; + } + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/x86_64/ztrsm_kernel_RN_bulldozer.c b/kernel/x86_64/ztrsm_kernel_RN_bulldozer.c new file mode 100644 index 000000000..798601b16 --- /dev/null +++ b/kernel/x86_64/ztrsm_kernel_RN_bulldozer.c @@ -0,0 +1,476 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_R +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + +#ifndef CONJ + +static void ztrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void ztrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc*2 ; + BLASLONG n1 = n * 4; + BLASLONG i=0; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorpd %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorpd %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorpd %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorpd %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorpd %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorpd %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorpd %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorpd %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 3f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " prefetcht0 256(%3,%1,8) \n\t" + " prefetcht0 256(%2,%1,8) \n\t" + " vmovddup (%3,%1,8), %%xmm0 \n\t" // b0 real, b0 real + " vmovddup 8(%3,%1,8), %%xmm1 \n\t" // b0 imag, b0 imag + " vmovups (%2,%1,8), %%xmm4 \n\t" // a0 real , a0 imag + " vmovups 16(%2,%1,8), %%xmm5 \n\t" // a1 real , a1 imag + " vmovddup 16(%3,%1,8), %%xmm2 \n\t" // b1 real, b1 real + " vmovddup 24(%3,%1,8), %%xmm3 \n\t" // b1 imag, b1 imag + + " vfnmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm9 , %%xmm1 , %%xmm4 , %%xmm9 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm10, %%xmm0 , %%xmm5 , %%xmm10 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm11, %%xmm1 , %%xmm5 , %%xmm11 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm12, %%xmm2 , %%xmm4 , %%xmm12 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm13, %%xmm3 , %%xmm4 , %%xmm13 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm14, %%xmm2 , %%xmm5 , %%xmm14 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm15, %%xmm3 , %%xmm5 , %%xmm15 \n\t" // a_real * b_imag , a_imag * b_imag + + " addq $4, %1 \n\t" + " cmpq %1, %0 \n\t" + + " jz 2f \n\t" + + " vmovddup (%3,%1,8), %%xmm0 \n\t" // b0 real, b0 real + " vmovddup 8(%3,%1,8), %%xmm1 \n\t" // b0 imag, b0 imag + " vmovups (%2,%1,8), %%xmm4 \n\t" // a0 real , a0 imag + " vmovups 16(%2,%1,8), %%xmm5 \n\t" // a1 real , a1 imag + " vmovddup 16(%3,%1,8), %%xmm2 \n\t" // b1 real, b1 real + " vmovddup 24(%3,%1,8), %%xmm3 \n\t" // b1 imag, b1 imag + + " vfnmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm9 , %%xmm1 , %%xmm4 , %%xmm9 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm10, %%xmm0 , %%xmm5 , %%xmm10 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm11, %%xmm1 , %%xmm5 , %%xmm11 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm12, %%xmm2 , %%xmm4 , %%xmm12 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm13, %%xmm3 , %%xmm4 , %%xmm13 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm14, %%xmm2 , %%xmm5 , %%xmm14 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm15, %%xmm3 , %%xmm5 , %%xmm15 \n\t" // a_real * b_imag , a_imag * b_imag + + " addq $4, %1 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + + "2: \n\t" + + " vshufpd $0x01 , %%xmm9 , %%xmm9, %%xmm9 \n\t" + " vshufpd $0x01 , %%xmm11 , %%xmm11 , %%xmm11 \n\t" + " vshufpd $0x01 , %%xmm13 , %%xmm13 , %%xmm13 \n\t" + " vshufpd $0x01 , %%xmm15 , %%xmm15 , %%xmm15 \n\t" + + " vaddsubpd %%xmm8 , %%xmm9 , %%xmm8 \n\t" + " vaddsubpd %%xmm10, %%xmm11, %%xmm10 \n\t" + " vaddsubpd %%xmm12, %%xmm13, %%xmm12 \n\t" + " vaddsubpd %%xmm14, %%xmm15, %%xmm14 \n\t" + + " vxorpd %%xmm7 , %%xmm7 , %%xmm7 \n\t" + + " vaddsubpd %%xmm8 , %%xmm7 , %%xmm8 \n\t" + " vaddsubpd %%xmm10, %%xmm7 , %%xmm10 \n\t" + " vaddsubpd %%xmm12, %%xmm7 , %%xmm12 \n\t" + " vaddsubpd %%xmm14, %%xmm7 , %%xmm14 \n\t" + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + + " vaddpd %%xmm0 , %%xmm8 , %%xmm8 \n\t" + " vaddpd %%xmm1 , %%xmm10, %%xmm10 \n\t" + " vaddpd %%xmm4 , %%xmm12, %%xmm12 \n\t" + " vaddpd %%xmm5 , %%xmm14, %%xmm14 \n\t" + + " vmovups %%xmm8 , (%4) \n\t" + " vmovups %%xmm10 ,16(%4) \n\t" + + " vmovups %%xmm12 , (%5) \n\t" + " vmovups %%xmm14 ,16(%5) \n\t" + + "3: \n\t" + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +#endif + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + for (i = 0; i < n; i++) { + + bb = *(b + i); + + for (j = 0; j < m; j ++) { + aa = *(c + j + i * ldc); + aa *= bb; + *a = aa; + *(c + j + i * ldc) = aa; + a ++; + + for (k = i + 1; k < n; k ++){ + *(c + j + k * ldc) -= aa * *(b + k); + } + + } + b += n; + } +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + for (i = 0; i < n; i++) { + + bb1 = *(b + i * 2 + 0); + bb2 = *(b + i * 2 + 1); + + for (j = 0; j < m; j ++) { + aa1 = *(c + j * 2 + 0 + i * ldc); + aa2 = *(c + j * 2 + 1 + i * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = -aa1 * bb2 + aa2 * bb1; +#endif + + *(a + 0) = cc1; + *(a + 1) = cc2; + *(c + j * 2 + 0 + i * ldc) = cc1; + *(c + j * 2 + 1 + i * ldc) = cc2; + a += 2; + + for (k = i + 1; k < n; k ++){ +#ifndef CONJ + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#else + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= - cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#endif + } + + } + b += n * 2; + } +} + +#endif + + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + FLOAT *aa, *cc; + BLASLONG kk; + BLASLONG i, j, jj; + +#if 0 + fprintf(stderr, "TRSM RN KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + jj = 0; + j = (n >> GEMM_UNROLL_N_SHIFT); + kk = -offset; + + while (j > 0) { + + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + if (i > 0) { + do { + +#ifndef CONJ + + ztrsm_RN_solve_opt(kk, aa, b, cc, ldc, aa + kk * GEMM_UNROLL_M * COMPSIZE, b + kk * GEMM_UNROLL_N * COMPSIZE); + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +#else + + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); +#endif + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, b, cc, ldc); + } + solve(i, GEMM_UNROLL_N, + aa + kk * i * COMPSIZE, + b + kk * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } + } + + kk += GEMM_UNROLL_N; + b += GEMM_UNROLL_N * k * COMPSIZE; + c += GEMM_UNROLL_N * ldc * COMPSIZE; + j --; + jj += GEMM_UNROLL_M; + } + + if (n & (GEMM_UNROLL_N - 1)) { + + j = (GEMM_UNROLL_N >> 1); + while (j > 0) { + if (n & j) { + + aa = a; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + + while (i > 0) { + if (kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + kk * GEMM_UNROLL_M * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + while (i > 0) { + if (m & i) { + if (kk > 0) { + GEMM_KERNEL(i, j, kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa, + b, + cc, + ldc); + } + + solve(i, j, + aa + kk * i * COMPSIZE, + b + kk * j * COMPSIZE, cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } + } + + b += j * k * COMPSIZE; + c += j * ldc * COMPSIZE; + kk += j; + } + j >>= 1; + } + } + + return 0; +} diff --git a/kernel/x86_64/ztrsm_kernel_RT_bulldozer.c b/kernel/x86_64/ztrsm_kernel_RT_bulldozer.c new file mode 100644 index 000000000..1948cbeed --- /dev/null +++ b/kernel/x86_64/ztrsm_kernel_RT_bulldozer.c @@ -0,0 +1,506 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +static FLOAT dm1 = -1.; + +#ifdef CONJ +#define GEMM_KERNEL GEMM_KERNEL_R +#else +#define GEMM_KERNEL GEMM_KERNEL_N +#endif + +#if GEMM_DEFAULT_UNROLL_M == 1 +#define GEMM_UNROLL_M_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 2 +#define GEMM_UNROLL_M_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 4 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 6 +#define GEMM_UNROLL_M_SHIFT 2 +#endif + + +#if GEMM_DEFAULT_UNROLL_M == 8 +#define GEMM_UNROLL_M_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_M == 16 +#define GEMM_UNROLL_M_SHIFT 4 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 1 +#define GEMM_UNROLL_N_SHIFT 0 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 2 +#define GEMM_UNROLL_N_SHIFT 1 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 4 +#define GEMM_UNROLL_N_SHIFT 2 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 8 +#define GEMM_UNROLL_N_SHIFT 3 +#endif + +#if GEMM_DEFAULT_UNROLL_N == 16 +#define GEMM_UNROLL_N_SHIFT 4 +#endif + + + +#ifndef CONJ + +static void ztrsm_RT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline)); + +static void ztrsm_RT_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) +{ + + FLOAT *c1 = c + ldc*2 ; + BLASLONG n1 = n * 4; + BLASLONG i=0; + + __asm__ __volatile__ + ( + " vzeroupper \n\t" + " prefetcht0 (%4) \n\t" + " prefetcht0 (%5) \n\t" + " vxorpd %%xmm8 , %%xmm8 , %%xmm8 \n\t" + " vxorpd %%xmm9 , %%xmm9 , %%xmm9 \n\t" + " vxorpd %%xmm10, %%xmm10, %%xmm10 \n\t" + " vxorpd %%xmm11, %%xmm11, %%xmm11 \n\t" + " vxorpd %%xmm12, %%xmm12, %%xmm12 \n\t" + " vxorpd %%xmm13, %%xmm13, %%xmm13 \n\t" + " vxorpd %%xmm14, %%xmm14, %%xmm14 \n\t" + " vxorpd %%xmm15, %%xmm15, %%xmm15 \n\t" + + " cmpq $0, %0 \n\t" + " je 3f \n\t" + + " .align 16 \n\t" + "1: \n\t" + + " prefetcht0 256(%3,%1,8) \n\t" + " prefetcht0 256(%2,%1,8) \n\t" + " vmovddup (%3,%1,8), %%xmm0 \n\t" // b0 real, b0 real + " vmovddup 8(%3,%1,8), %%xmm1 \n\t" // b0 imag, b0 imag + " vmovups (%2,%1,8), %%xmm4 \n\t" // a0 real , a0 imag + " vmovups 16(%2,%1,8), %%xmm5 \n\t" // a1 real , a1 imag + " vmovddup 16(%3,%1,8), %%xmm2 \n\t" // b1 real, b1 real + " vmovddup 24(%3,%1,8), %%xmm3 \n\t" // b1 imag, b1 imag + + " vfnmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm9 , %%xmm1 , %%xmm4 , %%xmm9 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm10, %%xmm0 , %%xmm5 , %%xmm10 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm11, %%xmm1 , %%xmm5 , %%xmm11 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm12, %%xmm2 , %%xmm4 , %%xmm12 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm13, %%xmm3 , %%xmm4 , %%xmm13 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm14, %%xmm2 , %%xmm5 , %%xmm14 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm15, %%xmm3 , %%xmm5 , %%xmm15 \n\t" // a_real * b_imag , a_imag * b_imag + + " addq $4, %1 \n\t" + " cmpq %1, %0 \n\t" + + " jz 2f \n\t" + + " vmovddup (%3,%1,8), %%xmm0 \n\t" // b0 real, b0 real + " vmovddup 8(%3,%1,8), %%xmm1 \n\t" // b0 imag, b0 imag + " vmovups (%2,%1,8), %%xmm4 \n\t" // a0 real , a0 imag + " vmovups 16(%2,%1,8), %%xmm5 \n\t" // a1 real , a1 imag + " vmovddup 16(%3,%1,8), %%xmm2 \n\t" // b1 real, b1 real + " vmovddup 24(%3,%1,8), %%xmm3 \n\t" // b1 imag, b1 imag + + " vfnmaddpd %%xmm8 , %%xmm0 , %%xmm4 , %%xmm8 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm9 , %%xmm1 , %%xmm4 , %%xmm9 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm10, %%xmm0 , %%xmm5 , %%xmm10 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm11, %%xmm1 , %%xmm5 , %%xmm11 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm12, %%xmm2 , %%xmm4 , %%xmm12 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm13, %%xmm3 , %%xmm4 , %%xmm13 \n\t" // a_real * b_imag , a_imag * b_imag + + " vfnmaddpd %%xmm14, %%xmm2 , %%xmm5 , %%xmm14 \n\t" // a_real * b_real , a_imag * b_real + " vfnmaddpd %%xmm15, %%xmm3 , %%xmm5 , %%xmm15 \n\t" // a_real * b_imag , a_imag * b_imag + + " addq $4, %1 \n\t" + " cmpq %1, %0 \n\t" + + " jnz 1b \n\t" + + + "2: \n\t" + + " vshufpd $0x01 , %%xmm9 , %%xmm9, %%xmm9 \n\t" + " vshufpd $0x01 , %%xmm11 , %%xmm11 , %%xmm11 \n\t" + " vshufpd $0x01 , %%xmm13 , %%xmm13 , %%xmm13 \n\t" + " vshufpd $0x01 , %%xmm15 , %%xmm15 , %%xmm15 \n\t" + + " vaddsubpd %%xmm8 , %%xmm9 , %%xmm8 \n\t" + " vaddsubpd %%xmm10, %%xmm11, %%xmm10 \n\t" + " vaddsubpd %%xmm12, %%xmm13, %%xmm12 \n\t" + " vaddsubpd %%xmm14, %%xmm15, %%xmm14 \n\t" + + " vxorpd %%xmm7 , %%xmm7 , %%xmm7 \n\t" + + " vaddsubpd %%xmm8 , %%xmm7 , %%xmm8 \n\t" + " vaddsubpd %%xmm10, %%xmm7 , %%xmm10 \n\t" + " vaddsubpd %%xmm12, %%xmm7 , %%xmm12 \n\t" + " vaddsubpd %%xmm14, %%xmm7 , %%xmm14 \n\t" + + " vmovups (%4) , %%xmm0 \n\t" + " vmovups 16(%4) , %%xmm1 \n\t" + + " vmovups (%5) , %%xmm4 \n\t" + " vmovups 16(%5) , %%xmm5 \n\t" + + " vaddpd %%xmm0 , %%xmm8 , %%xmm8 \n\t" + " vaddpd %%xmm1 , %%xmm10, %%xmm10 \n\t" + " vaddpd %%xmm4 , %%xmm12, %%xmm12 \n\t" + " vaddpd %%xmm5 , %%xmm14, %%xmm14 \n\t" + + " vmovups %%xmm8 , (%4) \n\t" + " vmovups %%xmm10 ,16(%4) \n\t" + + " vmovups %%xmm12 , (%5) \n\t" + " vmovups %%xmm14 ,16(%5) \n\t" + + "3: \n\t" + + " vzeroupper \n\t" + + : + : + "r" (n1), // 0 + "a" (i), // 1 + "r" (a), // 2 + "r" (b), // 3 + "r" (c), // 4 + "r" (c1), // 5 + "r" (as), // 6 + "r" (bs) // 7 + : "cc", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +#endif + + +#ifndef COMPLEX + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa, bb; + + int i, j, k; + + a += (n - 1) * m; + b += (n - 1) * n; + + for (i = n - 1; i >= 0; i--) { + + bb = *(b + i); + + for (j = 0; j < m; j ++) { + aa = *(c + j + i * ldc); + aa *= bb; + *a = aa; + *(c + j + i * ldc) = aa; + a ++; + + for (k = 0; k < i; k ++){ + *(c + j + k * ldc) -= aa * *(b + k); + } + + } + b -= n; + a -= 2 * m; + } + +} + +#else + +static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) { + + FLOAT aa1, aa2; + FLOAT bb1, bb2; + FLOAT cc1, cc2; + + int i, j, k; + + ldc *= 2; + + a += (n - 1) * m * 2; + b += (n - 1) * n * 2; + + for (i = n - 1; i >= 0; i--) { + + bb1 = *(b + i * 2 + 0); + bb2 = *(b + i * 2 + 1); + + for (j = 0; j < m; j ++) { + + aa1 = *(c + j * 2 + 0 + i * ldc); + aa2 = *(c + j * 2 + 1 + i * ldc); + +#ifndef CONJ + cc1 = aa1 * bb1 - aa2 * bb2; + cc2 = aa1 * bb2 + aa2 * bb1; +#else + cc1 = aa1 * bb1 + aa2 * bb2; + cc2 = - aa1 * bb2 + aa2 * bb1; +#endif + + *(a + 0) = cc1; + *(a + 1) = cc2; + + *(c + j * 2 + 0 + i * ldc) = cc1; + *(c + j * 2 + 1 + i * ldc) = cc2; + a += 2; + + for (k = 0; k < i; k ++){ +#ifndef CONJ + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#else + *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1); + *(c + j * 2 + 1 + k * ldc) -= -cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0); +#endif + } + + } + b -= n * 2; + a -= 4 * m; + } + +} + +#endif + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1, +#ifdef COMPLEX + FLOAT dummy2, +#endif + FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){ + + BLASLONG i, j; + FLOAT *aa, *cc; + BLASLONG kk; + +#if 0 + fprintf(stderr, "TRSM RT KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n", + m, n, k, offset); +#endif + + kk = n - offset; + c += n * ldc * COMPSIZE; + b += n * k * COMPSIZE; + + if (n & (GEMM_UNROLL_N - 1)) { + + j = 1; + while (j < GEMM_UNROLL_N) { + if (n & j) { + + aa = a; + b -= j * k * COMPSIZE; + c -= j * ldc* COMPSIZE; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + + do { + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, j, + aa + (kk - j) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - j) * j * COMPSIZE, + cc, ldc); + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + do { + if (m & i) { + + if (k - kk > 0) { + GEMM_KERNEL(i, j, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + j * kk * COMPSIZE, + cc, ldc); + } + + solve(i, j, + aa + (kk - j) * i * COMPSIZE, + b + (kk - j) * j * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + + } + i >>= 1; + } while (i > 0); + } + kk -= j; + } + j <<= 1; + } + } + + j = (n >> GEMM_UNROLL_N_SHIFT); + + if (j > 0) { + + do { + aa = a; + b -= GEMM_UNROLL_N * k * COMPSIZE; + c -= GEMM_UNROLL_N * ldc * COMPSIZE; + cc = c; + + i = (m >> GEMM_UNROLL_M_SHIFT); + if (i > 0) { + do { + +#ifndef CONJ + + ztrsm_RT_solve_opt(k-kk, aa + GEMM_UNROLL_M * kk * COMPSIZE, b + GEMM_UNROLL_N * kk * COMPSIZE, cc, ldc, + aa + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_M * COMPSIZE, b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE); + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + +#else + if (k - kk > 0) { + GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + GEMM_UNROLL_M * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(GEMM_UNROLL_M, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_M * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + +#endif + + aa += GEMM_UNROLL_M * k * COMPSIZE; + cc += GEMM_UNROLL_M * COMPSIZE; + i --; + } while (i > 0); + } + + if (m & (GEMM_UNROLL_M - 1)) { + i = (GEMM_UNROLL_M >> 1); + do { + if (m & i) { + if (k - kk > 0) { + GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1, +#ifdef COMPLEX + ZERO, +#endif + aa + i * kk * COMPSIZE, + b + GEMM_UNROLL_N * kk * COMPSIZE, + cc, + ldc); + } + + solve(i, GEMM_UNROLL_N, + aa + (kk - GEMM_UNROLL_N) * i * COMPSIZE, + b + (kk - GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE, + cc, ldc); + + aa += i * k * COMPSIZE; + cc += i * COMPSIZE; + } + i >>= 1; + } while (i > 0); + } + + kk -= GEMM_UNROLL_N; + j --; + } while (j > 0); + } + + return 0; +} + +