1345 lines
24 KiB
ArmAsm
1345 lines
24 KiB
ArmAsm
/*********************************************************************/
|
|
/* Copyright 2009, 2010 The University of Texas at Austin. */
|
|
/* All rights reserved. */
|
|
/* */
|
|
/* Redistribution and use in source and binary forms, with or */
|
|
/* without modification, are permitted provided that the following */
|
|
/* conditions are met: */
|
|
/* */
|
|
/* 1. Redistributions of source code must retain the above */
|
|
/* copyright notice, this list of conditions and the following */
|
|
/* disclaimer. */
|
|
/* */
|
|
/* 2. Redistributions in binary form must reproduce the above */
|
|
/* copyright notice, this list of conditions and the following */
|
|
/* disclaimer in the documentation and/or other materials */
|
|
/* provided with the distribution. */
|
|
/* */
|
|
/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
|
|
/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
|
|
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
|
|
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
|
|
/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
|
|
/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
|
|
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
|
|
/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
|
|
/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
|
|
/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
|
|
/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
|
|
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
|
|
/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
|
|
/* POSSIBILITY OF SUCH DAMAGE. */
|
|
/* */
|
|
/* The views and conclusions contained in the software and */
|
|
/* documentation are those of the authors and should not be */
|
|
/* interpreted as representing official policies, either expressed */
|
|
/* or implied, of The University of Texas at Austin. */
|
|
/*********************************************************************/
|
|
|
|
#define ASSEMBLER
|
|
#include "common.h"
|
|
|
|
#define STACK 16
|
|
#define ARGS 16
|
|
|
|
#define M 4 + STACK + ARGS(%esp)
|
|
#define N 8 + STACK + ARGS(%esp)
|
|
#define K 12 + STACK + ARGS(%esp)
|
|
#define ALPHA 16 + STACK + ARGS(%esp)
|
|
#define A 32 + STACK + ARGS(%esp)
|
|
#define ARG_B 36 + STACK + ARGS(%esp)
|
|
#define C 40 + STACK + ARGS(%esp)
|
|
#define ARG_LDC 44 + STACK + ARGS(%esp)
|
|
|
|
#define J 0 + STACK(%esp)
|
|
#define BX 4 + STACK(%esp)
|
|
#define KK 8 + STACK(%esp)
|
|
#define KKK 12 + STACK(%esp)
|
|
|
|
#define AA %edx
|
|
#define BB %ecx
|
|
#define LDC %ebp
|
|
#define B %edi
|
|
#define C1 %esi
|
|
#define I %ebx
|
|
|
|
#ifdef NANO
|
|
#define PREFETCHSIZE (8 * 3 + 4)
|
|
#define PREFETCHW prefetcht0
|
|
#define PREFETCHB prefetcht0
|
|
#endif
|
|
|
|
#ifndef PREFETCH
|
|
#define PREFETCH prefetcht0
|
|
#endif
|
|
|
|
#ifndef PREFETCHW
|
|
#define PREFETCHW prefetcht2
|
|
#endif
|
|
|
|
#ifndef PREFETCHB
|
|
#define PREFETCHB prefetcht2
|
|
#endif
|
|
|
|
#ifndef PREFETCHSIZE
|
|
#define PREFETCHSIZE (8 * 21 + 4)
|
|
#endif
|
|
|
|
PROLOGUE
|
|
|
|
subl $ARGS, %esp # Generate Stack Frame
|
|
|
|
pushl %ebp
|
|
pushl %edi
|
|
pushl %esi
|
|
pushl %ebx
|
|
|
|
PROFCODE
|
|
|
|
movl ARG_B, B
|
|
movl ARG_LDC, LDC
|
|
|
|
#ifdef TRMMKERNEL
|
|
movl OFFSET, %eax
|
|
#ifndef LEFT
|
|
negl %eax
|
|
#endif
|
|
movl %eax, KK
|
|
#endif
|
|
|
|
subl $-16 * SIZE, A
|
|
subl $-16 * SIZE, B
|
|
|
|
sall $ZBASE_SHIFT, LDC
|
|
|
|
movl N, %eax
|
|
sarl $2, %eax
|
|
movl %eax, J
|
|
jle .L30
|
|
ALIGN_4
|
|
|
|
.L01:
|
|
#if defined(TRMMKERNEL) && defined(LEFT)
|
|
movl OFFSET, %eax
|
|
movl %eax, KK
|
|
#endif
|
|
|
|
movl K, %eax
|
|
sall $BASE_SHIFT + 2, %eax
|
|
leal (B, %eax), %eax
|
|
movl %eax, BX
|
|
|
|
movl C, C1
|
|
movl A, AA
|
|
|
|
movl M, I
|
|
sarl $1, I
|
|
jle .L20
|
|
ALIGN_4
|
|
|
|
.L11:
|
|
#if !defined(TRMMKERNEL) || \
|
|
(defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \
|
|
(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
|
|
|
|
movl B, BB
|
|
#else
|
|
movl B, BB
|
|
movl KK, %eax
|
|
leal (, %eax, SIZE), %eax
|
|
leal (AA, %eax, 2), AA
|
|
leal (BB, %eax, 4), BB
|
|
#endif
|
|
|
|
movl BX, %eax
|
|
PREFETCHB -16 * SIZE(%eax)
|
|
subl $-8 * SIZE, BX
|
|
|
|
leal (C1, LDC, 2), %eax
|
|
|
|
movaps -16 * SIZE(AA), %xmm0
|
|
pxor %xmm2, %xmm2
|
|
movaps -16 * SIZE(BB), %xmm1
|
|
pxor %xmm3, %xmm3
|
|
|
|
pxor %xmm4, %xmm4
|
|
PREFETCHW 1 * SIZE(C1)
|
|
pxor %xmm5, %xmm5
|
|
PREFETCHW 1 * SIZE(C1, LDC)
|
|
pxor %xmm6, %xmm6
|
|
PREFETCHW 1 * SIZE(%eax)
|
|
pxor %xmm7, %xmm7
|
|
PREFETCHW 1 * SIZE(%eax, LDC)
|
|
|
|
#ifndef TRMMKERNEL
|
|
movl K, %eax
|
|
#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
|
|
movl K, %eax
|
|
subl KK, %eax
|
|
movl %eax, KKK
|
|
#else
|
|
movl KK, %eax
|
|
#ifdef LEFT
|
|
addl $2, %eax
|
|
#else
|
|
addl $4, %eax
|
|
#endif
|
|
movl %eax, KKK
|
|
#endif
|
|
sarl $3, %eax
|
|
je .L15
|
|
ALIGN_4
|
|
|
|
.L12:
|
|
PREFETCH (PREFETCHSIZE + 0) * SIZE(AA)
|
|
|
|
addpd %xmm3, %xmm7
|
|
movaps -14 * SIZE(BB), %xmm3
|
|
addpd %xmm2, %xmm6
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps -12 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
pshufd $0x4e, %xmm3, %xmm2
|
|
mulpd %xmm0, %xmm3
|
|
mulpd %xmm0, %xmm2
|
|
movaps -14 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm3, %xmm7
|
|
movaps -10 * SIZE(BB), %xmm3
|
|
addpd %xmm2, %xmm6
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps -8 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
pshufd $0x4e, %xmm3, %xmm2
|
|
mulpd %xmm0, %xmm3
|
|
mulpd %xmm0, %xmm2
|
|
movaps -12 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm3, %xmm7
|
|
movaps -6 * SIZE(BB), %xmm3
|
|
addpd %xmm2, %xmm6
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps -4 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
pshufd $0x4e, %xmm3, %xmm2
|
|
mulpd %xmm0, %xmm3
|
|
mulpd %xmm0, %xmm2
|
|
movaps -10 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm3, %xmm7
|
|
movaps -2 * SIZE(BB), %xmm3
|
|
addpd %xmm2, %xmm6
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps 0 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
pshufd $0x4e, %xmm3, %xmm2
|
|
mulpd %xmm0, %xmm3
|
|
mulpd %xmm0, %xmm2
|
|
movaps -8 * SIZE(AA), %xmm0
|
|
|
|
PREFETCH (PREFETCHSIZE + 8) * SIZE(AA)
|
|
|
|
addpd %xmm3, %xmm7
|
|
movaps 2 * SIZE(BB), %xmm3
|
|
addpd %xmm2, %xmm6
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps 4 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
pshufd $0x4e, %xmm3, %xmm2
|
|
mulpd %xmm0, %xmm3
|
|
mulpd %xmm0, %xmm2
|
|
movaps -6 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm3, %xmm7
|
|
movaps 6 * SIZE(BB), %xmm3
|
|
addpd %xmm2, %xmm6
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps 8 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
pshufd $0x4e, %xmm3, %xmm2
|
|
mulpd %xmm0, %xmm3
|
|
mulpd %xmm0, %xmm2
|
|
movaps -4 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm3, %xmm7
|
|
movaps 10 * SIZE(BB), %xmm3
|
|
addpd %xmm2, %xmm6
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps 12 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
pshufd $0x4e, %xmm3, %xmm2
|
|
mulpd %xmm0, %xmm3
|
|
mulpd %xmm0, %xmm2
|
|
movaps -2 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm3, %xmm7
|
|
movaps 14 * SIZE(BB), %xmm3
|
|
addpd %xmm2, %xmm6
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps 16 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
pshufd $0x4e, %xmm3, %xmm2
|
|
mulpd %xmm0, %xmm3
|
|
subl $-32 * SIZE, BB
|
|
mulpd %xmm0, %xmm2
|
|
movaps 0 * SIZE(AA), %xmm0
|
|
|
|
subl $-16 * SIZE, AA
|
|
|
|
subl $1, %eax
|
|
BRANCH
|
|
jne .L12
|
|
ALIGN_4
|
|
|
|
.L15:
|
|
#ifndef TRMMKERNEL
|
|
movl K, %eax
|
|
#else
|
|
movl KKK, %eax
|
|
#endif
|
|
andl $7, %eax
|
|
BRANCH
|
|
je .L18
|
|
ALIGN_4
|
|
|
|
.L16:
|
|
addpd %xmm3, %xmm7
|
|
movaps -14 * SIZE(BB), %xmm3
|
|
addpd %xmm2, %xmm6
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps -12 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
pshufd $0x4e, %xmm3, %xmm2
|
|
mulpd %xmm0, %xmm3
|
|
mulpd %xmm0, %xmm2
|
|
|
|
movaps -14 * SIZE(AA), %xmm0
|
|
|
|
addl $2 * SIZE, AA
|
|
addl $4 * SIZE, BB
|
|
decl %eax
|
|
jg .L16
|
|
ALIGN_4
|
|
|
|
.L18:
|
|
addpd %xmm2, %xmm6
|
|
addpd %xmm3, %xmm7
|
|
|
|
movups ALPHA, %xmm3
|
|
|
|
movaps %xmm4, %xmm0
|
|
movsd %xmm5, %xmm4
|
|
movsd %xmm0, %xmm5
|
|
|
|
movaps %xmm6, %xmm0
|
|
movsd %xmm7, %xmm6
|
|
movsd %xmm0, %xmm7
|
|
|
|
leal (C1, LDC, 2), %eax
|
|
|
|
movsd 0 * SIZE(C1), %xmm0
|
|
movhps 1 * SIZE(C1), %xmm0
|
|
movsd 2 * SIZE(C1), %xmm1
|
|
movhps 3 * SIZE(C1), %xmm1
|
|
|
|
pshufd $0x44, %xmm4, %xmm2
|
|
unpckhpd %xmm4, %xmm4
|
|
|
|
mulpd %xmm3, %xmm2
|
|
addpd %xmm2, %xmm0
|
|
mulpd %xmm3, %xmm4
|
|
addpd %xmm4, %xmm1
|
|
|
|
movlps %xmm0, 0 * SIZE(C1)
|
|
movhps %xmm0, 1 * SIZE(C1)
|
|
movlps %xmm1, 2 * SIZE(C1)
|
|
movhps %xmm1, 3 * SIZE(C1)
|
|
|
|
movsd 0 * SIZE(C1, LDC), %xmm0
|
|
movhps 1 * SIZE(C1, LDC), %xmm0
|
|
movsd 2 * SIZE(C1, LDC), %xmm1
|
|
movhps 3 * SIZE(C1, LDC), %xmm1
|
|
|
|
pshufd $0x44, %xmm5, %xmm2
|
|
unpckhpd %xmm5, %xmm5
|
|
|
|
mulpd %xmm3, %xmm2
|
|
addpd %xmm2, %xmm0
|
|
mulpd %xmm3, %xmm5
|
|
addpd %xmm5, %xmm1
|
|
|
|
movlps %xmm0, 0 * SIZE(C1, LDC)
|
|
movhps %xmm0, 1 * SIZE(C1, LDC)
|
|
movlps %xmm1, 2 * SIZE(C1, LDC)
|
|
movhps %xmm1, 3 * SIZE(C1, LDC)
|
|
|
|
movsd 0 * SIZE(%eax), %xmm0
|
|
movhps 1 * SIZE(%eax), %xmm0
|
|
movsd 2 * SIZE(%eax), %xmm1
|
|
movhps 3 * SIZE(%eax), %xmm1
|
|
|
|
pshufd $0x44, %xmm6, %xmm2
|
|
unpckhpd %xmm6, %xmm6
|
|
|
|
mulpd %xmm3, %xmm2
|
|
addpd %xmm2, %xmm0
|
|
mulpd %xmm3, %xmm6
|
|
addpd %xmm6, %xmm1
|
|
|
|
movlps %xmm0, 0 * SIZE(%eax)
|
|
movhps %xmm0, 1 * SIZE(%eax)
|
|
movlps %xmm1, 2 * SIZE(%eax)
|
|
movhps %xmm1, 3 * SIZE(%eax)
|
|
|
|
movsd 0 * SIZE(%eax, LDC), %xmm0
|
|
movhps 1 * SIZE(%eax, LDC), %xmm0
|
|
movsd 2 * SIZE(%eax, LDC), %xmm1
|
|
movhps 3 * SIZE(%eax, LDC), %xmm1
|
|
|
|
pshufd $0x44, %xmm7, %xmm2
|
|
unpckhpd %xmm7, %xmm7
|
|
|
|
mulpd %xmm3, %xmm2
|
|
addpd %xmm2, %xmm0
|
|
mulpd %xmm3, %xmm7
|
|
addpd %xmm7, %xmm1
|
|
|
|
movlps %xmm0, 0 * SIZE(%eax, LDC)
|
|
movhps %xmm0, 1 * SIZE(%eax, LDC)
|
|
movlps %xmm1, 2 * SIZE(%eax, LDC)
|
|
movhps %xmm1, 3 * SIZE(%eax, LDC)
|
|
|
|
addl $4 * SIZE, C1
|
|
decl I
|
|
jg .L11
|
|
ALIGN_4
|
|
|
|
.L20:
|
|
movl M, I
|
|
testl $1, I
|
|
jle .L29
|
|
|
|
#if !defined(TRMMKERNEL) || \
|
|
(defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \
|
|
(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
|
|
|
|
movl B, BB
|
|
#else
|
|
movl B, BB
|
|
movl KK, %eax
|
|
leal (, %eax, SIZE), %eax
|
|
addl %eax, AA
|
|
leal (BB, %eax, 4), BB
|
|
#endif
|
|
|
|
movaps -16 * SIZE(AA), %xmm0
|
|
pxor %xmm4, %xmm4
|
|
movaps -16 * SIZE(BB), %xmm2
|
|
pxor %xmm5, %xmm5
|
|
movaps -14 * SIZE(BB), %xmm3
|
|
pxor %xmm6, %xmm6
|
|
pxor %xmm7, %xmm7
|
|
|
|
#ifndef TRMMKERNEL
|
|
movl K, %eax
|
|
#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
|
|
movl K, %eax
|
|
subl KK, %eax
|
|
movl %eax, KKK
|
|
#else
|
|
movl KK, %eax
|
|
#ifdef LEFT
|
|
addl $1, %eax
|
|
#else
|
|
addl $4, %eax
|
|
#endif
|
|
movl %eax, KKK
|
|
#endif
|
|
sarl $3, %eax
|
|
je .L25
|
|
ALIGN_4
|
|
|
|
.L22:
|
|
PREFETCH (PREFETCHSIZE + 0) * SIZE(AA)
|
|
|
|
pshufd $0x44, %xmm0, %xmm1
|
|
mulpd %xmm1, %xmm2
|
|
mulpd %xmm1, %xmm3
|
|
|
|
addpd %xmm2, %xmm4
|
|
movaps -12 * SIZE(BB), %xmm2
|
|
addpd %xmm3, %xmm5
|
|
movaps -10 * SIZE(BB), %xmm3
|
|
|
|
pshufd $0xee, %xmm0, %xmm1
|
|
movaps -14 * SIZE(AA), %xmm0
|
|
mulpd %xmm1, %xmm2
|
|
mulpd %xmm1, %xmm3
|
|
|
|
addpd %xmm2, %xmm6
|
|
movaps -8 * SIZE(BB), %xmm2
|
|
addpd %xmm3, %xmm7
|
|
movaps -6 * SIZE(BB), %xmm3
|
|
|
|
pshufd $0x44, %xmm0, %xmm1
|
|
mulpd %xmm1, %xmm2
|
|
mulpd %xmm1, %xmm3
|
|
|
|
addpd %xmm2, %xmm4
|
|
movaps -4 * SIZE(BB), %xmm2
|
|
addpd %xmm3, %xmm5
|
|
movaps -2 * SIZE(BB), %xmm3
|
|
|
|
pshufd $0xee, %xmm0, %xmm1
|
|
movaps -12 * SIZE(AA), %xmm0
|
|
mulpd %xmm1, %xmm2
|
|
mulpd %xmm1, %xmm3
|
|
|
|
addpd %xmm2, %xmm6
|
|
movaps 0 * SIZE(BB), %xmm2
|
|
addpd %xmm3, %xmm7
|
|
movaps 2 * SIZE(BB), %xmm3
|
|
|
|
pshufd $0x44, %xmm0, %xmm1
|
|
mulpd %xmm1, %xmm2
|
|
mulpd %xmm1, %xmm3
|
|
|
|
addpd %xmm2, %xmm4
|
|
movaps 4 * SIZE(BB), %xmm2
|
|
addpd %xmm3, %xmm5
|
|
movaps 6 * SIZE(BB), %xmm3
|
|
|
|
pshufd $0xee, %xmm0, %xmm1
|
|
movaps -10 * SIZE(AA), %xmm0
|
|
mulpd %xmm1, %xmm2
|
|
mulpd %xmm1, %xmm3
|
|
|
|
addpd %xmm2, %xmm6
|
|
movaps 8 * SIZE(BB), %xmm2
|
|
addpd %xmm3, %xmm7
|
|
movaps 10 * SIZE(BB), %xmm3
|
|
|
|
pshufd $0x44, %xmm0, %xmm1
|
|
mulpd %xmm1, %xmm2
|
|
mulpd %xmm1, %xmm3
|
|
|
|
addpd %xmm2, %xmm4
|
|
movaps 12 * SIZE(BB), %xmm2
|
|
addpd %xmm3, %xmm5
|
|
movaps 14 * SIZE(BB), %xmm3
|
|
|
|
pshufd $0xee, %xmm0, %xmm1
|
|
movaps -8 * SIZE(AA), %xmm0
|
|
mulpd %xmm1, %xmm2
|
|
mulpd %xmm1, %xmm3
|
|
|
|
addpd %xmm2, %xmm6
|
|
movaps 16 * SIZE(BB), %xmm2
|
|
addpd %xmm3, %xmm7
|
|
movaps 18 * SIZE(BB), %xmm3
|
|
|
|
subl $ -8 * SIZE, AA
|
|
subl $-32 * SIZE, BB
|
|
|
|
subl $1, %eax
|
|
jne .L22
|
|
ALIGN_4
|
|
|
|
.L25:
|
|
#ifndef TRMMKERNEL
|
|
movl K, %eax
|
|
#else
|
|
movl KKK, %eax
|
|
#endif
|
|
andl $7, %eax
|
|
BRANCH
|
|
je .L28
|
|
ALIGN_4
|
|
|
|
.L26:
|
|
pshufd $0x44, %xmm0, %xmm1
|
|
movsd -15 * SIZE(AA), %xmm0
|
|
mulpd %xmm1, %xmm2
|
|
mulpd %xmm1, %xmm3
|
|
|
|
addpd %xmm2, %xmm4
|
|
movaps -12 * SIZE(BB), %xmm2
|
|
addpd %xmm3, %xmm5
|
|
movaps -10 * SIZE(BB), %xmm3
|
|
|
|
addl $1 * SIZE, AA
|
|
addl $4 * SIZE, BB
|
|
decl %eax
|
|
jg .L26
|
|
ALIGN_4
|
|
|
|
.L28:
|
|
movups ALPHA, %xmm3
|
|
|
|
addpd %xmm6, %xmm4
|
|
addpd %xmm7, %xmm5
|
|
|
|
leal (C1, LDC, 2), %eax
|
|
|
|
movsd 0 * SIZE(C1), %xmm0
|
|
movhps 1 * SIZE(C1), %xmm0
|
|
movsd 0 * SIZE(C1, LDC), %xmm1
|
|
movhps 1 * SIZE(C1, LDC), %xmm1
|
|
|
|
pshufd $0x44, %xmm4, %xmm2
|
|
unpckhpd %xmm4, %xmm4
|
|
|
|
mulpd %xmm3, %xmm2
|
|
addpd %xmm2, %xmm0
|
|
mulpd %xmm3, %xmm4
|
|
addpd %xmm4, %xmm1
|
|
|
|
movlps %xmm0, 0 * SIZE(C1)
|
|
movhps %xmm0, 1 * SIZE(C1)
|
|
movlps %xmm1, 0 * SIZE(C1, LDC)
|
|
movhps %xmm1, 1 * SIZE(C1, LDC)
|
|
|
|
movsd 0 * SIZE(%eax), %xmm0
|
|
movhps 1 * SIZE(%eax), %xmm0
|
|
movsd 0 * SIZE(%eax, LDC), %xmm1
|
|
movhps 1 * SIZE(%eax, LDC), %xmm1
|
|
|
|
pshufd $0x44, %xmm5, %xmm2
|
|
unpckhpd %xmm5, %xmm5
|
|
|
|
mulpd %xmm3, %xmm2
|
|
addpd %xmm2, %xmm0
|
|
mulpd %xmm3, %xmm5
|
|
addpd %xmm5, %xmm1
|
|
|
|
movlps %xmm0, 0 * SIZE(%eax)
|
|
movhps %xmm0, 1 * SIZE(%eax)
|
|
movlps %xmm1, 0 * SIZE(%eax, LDC)
|
|
movhps %xmm1, 1 * SIZE(%eax, LDC)
|
|
ALIGN_4
|
|
|
|
.L29:
|
|
#if defined(TRMMKERNEL) && !defined(LEFT)
|
|
addl $4, KK
|
|
#endif
|
|
|
|
movl BB, B
|
|
|
|
leal (, LDC, 4), %eax
|
|
addl %eax, C
|
|
decl J
|
|
jg .L01
|
|
ALIGN_4
|
|
|
|
.L30:
|
|
movl N, %eax
|
|
testl $2, %eax
|
|
jle .L50
|
|
|
|
#if defined(TRMMKERNEL) && defined(LEFT)
|
|
movl OFFSET, %eax
|
|
movl %eax, KK
|
|
#endif
|
|
|
|
movl C, C1
|
|
movl A, AA
|
|
|
|
movl M, I
|
|
sarl $1, I
|
|
jle .L40
|
|
ALIGN_4
|
|
|
|
.L31:
|
|
#if !defined(TRMMKERNEL) || \
|
|
(defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \
|
|
(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
|
|
|
|
movl B, BB
|
|
#else
|
|
movl B, BB
|
|
movl KK, %eax
|
|
leal (, %eax, SIZE), %eax
|
|
leal (AA, %eax, 2), AA
|
|
leal (BB, %eax, 2), BB
|
|
#endif
|
|
|
|
movaps -16 * SIZE(AA), %xmm0
|
|
pxor %xmm4, %xmm4
|
|
movaps -16 * SIZE(BB), %xmm1
|
|
pxor %xmm5, %xmm5
|
|
PREFETCHW 1 * SIZE(C1)
|
|
pxor %xmm6, %xmm6
|
|
PREFETCHW 1 * SIZE(C1, LDC)
|
|
pxor %xmm7, %xmm7
|
|
|
|
#ifndef TRMMKERNEL
|
|
movl K, %eax
|
|
#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
|
|
movl K, %eax
|
|
subl KK, %eax
|
|
movl %eax, KKK
|
|
#else
|
|
movl KK, %eax
|
|
#ifdef LEFT
|
|
addl $2, %eax
|
|
#else
|
|
addl $2, %eax
|
|
#endif
|
|
movl %eax, KKK
|
|
#endif
|
|
sarl $3, %eax
|
|
je .L35
|
|
ALIGN_4
|
|
|
|
.L32:
|
|
PREFETCH (PREFETCHSIZE + 0) * SIZE(AA)
|
|
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps -14 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps -14 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps -12 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm1, %xmm7
|
|
movaps -12 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm6
|
|
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps -10 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps -10 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps -8 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm1, %xmm7
|
|
movaps -8 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm6
|
|
|
|
PREFETCH (PREFETCHSIZE + 8) * SIZE(AA)
|
|
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps -6 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps -6 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps -4 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm1, %xmm7
|
|
movaps -4 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm6
|
|
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps -2 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps -2 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps 0 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm1, %xmm7
|
|
movaps 0 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm6
|
|
|
|
subl $-16 * SIZE, AA
|
|
subl $-16 * SIZE, BB
|
|
|
|
subl $1, %eax
|
|
jne .L32
|
|
ALIGN_4
|
|
|
|
.L35:
|
|
#ifndef TRMMKERNEL
|
|
movl K, %eax
|
|
#else
|
|
movl KKK, %eax
|
|
#endif
|
|
andl $7, %eax
|
|
BRANCH
|
|
je .L38
|
|
ALIGN_4
|
|
|
|
.L36:
|
|
pshufd $0x4e, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps -14 * SIZE(AA), %xmm0
|
|
|
|
addpd %xmm1, %xmm5
|
|
movaps -14 * SIZE(BB), %xmm1
|
|
addpd %xmm2, %xmm4
|
|
|
|
addl $2 * SIZE, AA
|
|
addl $2 * SIZE, BB
|
|
decl %eax
|
|
jg .L36
|
|
ALIGN_4
|
|
|
|
.L38:
|
|
movups ALPHA, %xmm3
|
|
|
|
addpd %xmm6, %xmm4
|
|
addpd %xmm7, %xmm5
|
|
|
|
movaps %xmm4, %xmm0
|
|
movsd %xmm5, %xmm4
|
|
movsd %xmm0, %xmm5
|
|
|
|
movsd 0 * SIZE(C1), %xmm0
|
|
movhps 1 * SIZE(C1), %xmm0
|
|
movsd 2 * SIZE(C1), %xmm1
|
|
movhps 3 * SIZE(C1), %xmm1
|
|
|
|
pshufd $0x44, %xmm4, %xmm2
|
|
unpckhpd %xmm4, %xmm4
|
|
|
|
mulpd %xmm3, %xmm2
|
|
addpd %xmm2, %xmm0
|
|
mulpd %xmm3, %xmm4
|
|
addpd %xmm4, %xmm1
|
|
|
|
movlps %xmm0, 0 * SIZE(C1)
|
|
movhps %xmm0, 1 * SIZE(C1)
|
|
movlps %xmm1, 2 * SIZE(C1)
|
|
movhps %xmm1, 3 * SIZE(C1)
|
|
|
|
movsd 0 * SIZE(C1, LDC), %xmm0
|
|
movhps 1 * SIZE(C1, LDC), %xmm0
|
|
movsd 2 * SIZE(C1, LDC), %xmm1
|
|
movhps 3 * SIZE(C1, LDC), %xmm1
|
|
|
|
pshufd $0x44, %xmm5, %xmm2
|
|
unpckhpd %xmm5, %xmm5
|
|
|
|
mulpd %xmm3, %xmm2
|
|
addpd %xmm2, %xmm0
|
|
mulpd %xmm3, %xmm5
|
|
addpd %xmm5, %xmm1
|
|
|
|
movlps %xmm0, 0 * SIZE(C1, LDC)
|
|
movhps %xmm0, 1 * SIZE(C1, LDC)
|
|
movlps %xmm1, 2 * SIZE(C1, LDC)
|
|
movhps %xmm1, 3 * SIZE(C1, LDC)
|
|
|
|
addl $4 * SIZE, C1
|
|
decl I
|
|
jg .L31
|
|
ALIGN_4
|
|
|
|
.L40:
|
|
movl M, I
|
|
testl $1, I
|
|
jle .L49
|
|
|
|
#if !defined(TRMMKERNEL) || \
|
|
(defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \
|
|
(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
|
|
|
|
movl B, BB
|
|
#else
|
|
movl B, BB
|
|
movl KK, %eax
|
|
leal (, %eax, SIZE), %eax
|
|
addl %eax, AA
|
|
leal (BB, %eax, 2), BB
|
|
#endif
|
|
|
|
movaps -16 * SIZE(AA), %xmm0
|
|
pxor %xmm4, %xmm4
|
|
movaps -16 * SIZE(BB), %xmm2
|
|
pxor %xmm5, %xmm5
|
|
|
|
#ifndef TRMMKERNEL
|
|
movl K, %eax
|
|
#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
|
|
movl K, %eax
|
|
subl KK, %eax
|
|
movl %eax, KKK
|
|
#else
|
|
movl KK, %eax
|
|
#ifdef LEFT
|
|
addl $1, %eax
|
|
#else
|
|
addl $2, %eax
|
|
#endif
|
|
movl %eax, KKK
|
|
#endif
|
|
sarl $3, %eax
|
|
je .L45
|
|
ALIGN_4
|
|
|
|
.L42:
|
|
PREFETCH (PREFETCHSIZE + 0) * SIZE(AA)
|
|
|
|
pshufd $0x44, %xmm0, %xmm1
|
|
mulpd %xmm1, %xmm2
|
|
|
|
addpd %xmm2, %xmm4
|
|
movaps -14 * SIZE(BB), %xmm2
|
|
|
|
pshufd $0xee, %xmm0, %xmm1
|
|
movaps -14 * SIZE(AA), %xmm0
|
|
mulpd %xmm1, %xmm2
|
|
|
|
addpd %xmm2, %xmm5
|
|
movaps -12 * SIZE(BB), %xmm2
|
|
|
|
pshufd $0x44, %xmm0, %xmm1
|
|
mulpd %xmm1, %xmm2
|
|
|
|
addpd %xmm2, %xmm4
|
|
movaps -10 * SIZE(BB), %xmm2
|
|
|
|
pshufd $0xee, %xmm0, %xmm1
|
|
movaps -12 * SIZE(AA), %xmm0
|
|
mulpd %xmm1, %xmm2
|
|
|
|
addpd %xmm2, %xmm5
|
|
movaps -8 * SIZE(BB), %xmm2
|
|
|
|
pshufd $0x44, %xmm0, %xmm1
|
|
mulpd %xmm1, %xmm2
|
|
|
|
addpd %xmm2, %xmm4
|
|
movaps -6 * SIZE(BB), %xmm2
|
|
|
|
pshufd $0xee, %xmm0, %xmm1
|
|
movaps -10 * SIZE(AA), %xmm0
|
|
mulpd %xmm1, %xmm2
|
|
|
|
addpd %xmm2, %xmm5
|
|
movaps -4 * SIZE(BB), %xmm2
|
|
|
|
pshufd $0x44, %xmm0, %xmm1
|
|
mulpd %xmm1, %xmm2
|
|
|
|
addpd %xmm2, %xmm4
|
|
movaps -2 * SIZE(BB), %xmm2
|
|
|
|
pshufd $0xee, %xmm0, %xmm1
|
|
movaps -8 * SIZE(AA), %xmm0
|
|
mulpd %xmm1, %xmm2
|
|
|
|
addpd %xmm2, %xmm5
|
|
movaps 0 * SIZE(BB), %xmm2
|
|
|
|
subl $ -8 * SIZE, AA
|
|
subl $-16 * SIZE, BB
|
|
|
|
subl $1, %eax
|
|
jne .L42
|
|
ALIGN_4
|
|
|
|
.L45:
|
|
#ifndef TRMMKERNEL
|
|
movl K, %eax
|
|
#else
|
|
movl KKK, %eax
|
|
#endif
|
|
andl $7, %eax
|
|
BRANCH
|
|
je .L48
|
|
ALIGN_4
|
|
|
|
.L46:
|
|
pshufd $0x44, %xmm0, %xmm1
|
|
movsd -15 * SIZE(AA), %xmm0
|
|
mulpd %xmm1, %xmm2
|
|
|
|
addpd %xmm2, %xmm4
|
|
movaps -14 * SIZE(BB), %xmm2
|
|
|
|
addl $1 * SIZE, AA
|
|
addl $2 * SIZE, BB
|
|
decl %eax
|
|
jg .L46
|
|
ALIGN_4
|
|
|
|
.L48:
|
|
movups ALPHA, %xmm3
|
|
|
|
addpd %xmm5, %xmm4
|
|
|
|
movsd 0 * SIZE(C1), %xmm0
|
|
movhps 1 * SIZE(C1), %xmm0
|
|
movsd 0 * SIZE(C1, LDC), %xmm1
|
|
movhps 1 * SIZE(C1, LDC), %xmm1
|
|
|
|
pshufd $0x44, %xmm4, %xmm2
|
|
unpckhpd %xmm4, %xmm4
|
|
|
|
mulpd %xmm3, %xmm2
|
|
addpd %xmm2, %xmm0
|
|
mulpd %xmm3, %xmm4
|
|
addpd %xmm4, %xmm1
|
|
|
|
movlps %xmm0, 0 * SIZE(C1)
|
|
movhps %xmm0, 1 * SIZE(C1)
|
|
movlps %xmm1, 0 * SIZE(C1, LDC)
|
|
movhps %xmm1, 1 * SIZE(C1, LDC)
|
|
ALIGN_4
|
|
|
|
.L49:
|
|
#if defined(TRMMKERNEL) && !defined(LEFT)
|
|
addl $2, KK
|
|
#endif
|
|
|
|
movl BB, B
|
|
|
|
leal (, LDC, 2), %eax
|
|
addl %eax, C
|
|
ALIGN_4
|
|
|
|
.L50:
|
|
movl N, %eax
|
|
testl $1, %eax
|
|
jle .L999
|
|
|
|
#if defined(TRMMKERNEL) && defined(LEFT)
|
|
movl OFFSET, %eax
|
|
movl %eax, KK
|
|
#endif
|
|
|
|
movl C, C1
|
|
movl A, AA
|
|
|
|
movl M, I
|
|
sarl $1, I
|
|
jle .L60
|
|
ALIGN_4
|
|
|
|
.L51:
|
|
#if !defined(TRMMKERNEL) || \
|
|
(defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \
|
|
(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
|
|
|
|
movl B, BB
|
|
#else
|
|
movl B, BB
|
|
movl KK, %eax
|
|
leal (, %eax, SIZE), %eax
|
|
leal (AA, %eax, 2), AA
|
|
addl %eax, BB
|
|
#endif
|
|
|
|
movaps -16 * SIZE(AA), %xmm0
|
|
pxor %xmm4, %xmm4
|
|
movaps -16 * SIZE(BB), %xmm1
|
|
pxor %xmm5, %xmm5
|
|
PREFETCHW 1 * SIZE(C1)
|
|
|
|
#ifndef TRMMKERNEL
|
|
movl K, %eax
|
|
#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
|
|
movl K, %eax
|
|
subl KK, %eax
|
|
movl %eax, KKK
|
|
#else
|
|
movl KK, %eax
|
|
#ifdef LEFT
|
|
addl $2, %eax
|
|
#else
|
|
addl $1, %eax
|
|
#endif
|
|
movl %eax, KKK
|
|
#endif
|
|
sarl $3, %eax
|
|
je .L55
|
|
ALIGN_4
|
|
|
|
.L52:
|
|
PREFETCH (PREFETCHSIZE + 0) * SIZE(AA)
|
|
|
|
pshufd $0x44, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm2
|
|
movaps -14 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm4
|
|
|
|
pshufd $0xee, %xmm1, %xmm2
|
|
movaps -14 * SIZE(BB), %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps -12 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm5
|
|
|
|
pshufd $0x44, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm2
|
|
movaps -10 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm4
|
|
|
|
pshufd $0xee, %xmm1, %xmm2
|
|
movaps -12 * SIZE(BB), %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps -8 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm5
|
|
|
|
PREFETCH (PREFETCHSIZE + 8) * SIZE(AA)
|
|
|
|
pshufd $0x44, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm2
|
|
movaps -6 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm4
|
|
|
|
pshufd $0xee, %xmm1, %xmm2
|
|
movaps -10 * SIZE(BB), %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps -4 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm5
|
|
|
|
pshufd $0x44, %xmm1, %xmm2
|
|
mulpd %xmm0, %xmm2
|
|
movaps -2 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm4
|
|
|
|
pshufd $0xee, %xmm1, %xmm2
|
|
movaps -8 * SIZE(BB), %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps 0 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm5
|
|
|
|
subl $-16 * SIZE, AA
|
|
subl $ -8 * SIZE, BB
|
|
|
|
subl $1, %eax
|
|
jne .L52
|
|
ALIGN_4
|
|
|
|
.L55:
|
|
#ifndef TRMMKERNEL
|
|
movl K, %eax
|
|
#else
|
|
movl KKK, %eax
|
|
#endif
|
|
andl $7, %eax
|
|
BRANCH
|
|
je .L58
|
|
ALIGN_4
|
|
|
|
.L56:
|
|
pshufd $0x44, %xmm1, %xmm2
|
|
movsd -15 * SIZE(BB), %xmm1
|
|
mulpd %xmm0, %xmm2
|
|
movaps -14 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm4
|
|
|
|
addl $2 * SIZE, AA
|
|
addl $1 * SIZE, BB
|
|
decl %eax
|
|
jg .L56
|
|
ALIGN_4
|
|
|
|
.L58:
|
|
movups ALPHA, %xmm3
|
|
|
|
addpd %xmm5, %xmm4
|
|
|
|
movsd 0 * SIZE(C1), %xmm0
|
|
movhps 1 * SIZE(C1), %xmm0
|
|
movsd 2 * SIZE(C1), %xmm1
|
|
movhps 3 * SIZE(C1), %xmm1
|
|
|
|
pshufd $0x44, %xmm4, %xmm2
|
|
unpckhpd %xmm4, %xmm4
|
|
|
|
mulpd %xmm3, %xmm2
|
|
addpd %xmm2, %xmm0
|
|
mulpd %xmm3, %xmm4
|
|
addpd %xmm4, %xmm1
|
|
|
|
movlps %xmm0, 0 * SIZE(C1)
|
|
movhps %xmm0, 1 * SIZE(C1)
|
|
movlps %xmm1, 2 * SIZE(C1)
|
|
movhps %xmm1, 3 * SIZE(C1)
|
|
|
|
addl $4 * SIZE, C1
|
|
decl I
|
|
jg .L51
|
|
ALIGN_4
|
|
|
|
.L60:
|
|
movl M, I
|
|
testl $1, I
|
|
jle .L999
|
|
|
|
#if !defined(TRMMKERNEL) || \
|
|
(defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \
|
|
(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
|
|
|
|
movl B, BB
|
|
#else
|
|
movl B, BB
|
|
movl KK, %eax
|
|
leal (, %eax, SIZE), %eax
|
|
addl %eax, AA
|
|
addl %eax, BB
|
|
#endif
|
|
|
|
movaps -16 * SIZE(AA), %xmm0
|
|
pxor %xmm4, %xmm4
|
|
movaps -16 * SIZE(BB), %xmm2
|
|
pxor %xmm5, %xmm5
|
|
|
|
#ifndef TRMMKERNEL
|
|
movl K, %eax
|
|
#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
|
|
movl K, %eax
|
|
subl KK, %eax
|
|
movl %eax, KKK
|
|
#else
|
|
movl KK, %eax
|
|
#ifdef LEFT
|
|
addl $1, %eax
|
|
#else
|
|
addl $1, %eax
|
|
#endif
|
|
movl %eax, KKK
|
|
#endif
|
|
sarl $3, %eax
|
|
je .L65
|
|
ALIGN_4
|
|
|
|
.L62:
|
|
PREFETCH (PREFETCHSIZE + 0) * SIZE(AA)
|
|
|
|
mulpd %xmm0, %xmm2
|
|
movaps -14 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm4
|
|
movaps -14 * SIZE(BB), %xmm2
|
|
|
|
mulpd %xmm0, %xmm2
|
|
movaps -12 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm5
|
|
movaps -12 * SIZE(BB), %xmm2
|
|
|
|
mulpd %xmm0, %xmm2
|
|
movaps -10 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm4
|
|
movaps -10 * SIZE(BB), %xmm2
|
|
|
|
mulpd %xmm0, %xmm2
|
|
movaps -8 * SIZE(AA), %xmm0
|
|
addpd %xmm2, %xmm5
|
|
movaps -8 * SIZE(BB), %xmm2
|
|
|
|
subl $-8 * SIZE, AA
|
|
subl $-8 * SIZE, BB
|
|
|
|
subl $1, %eax
|
|
jne .L62
|
|
ALIGN_4
|
|
|
|
.L65:
|
|
#ifndef TRMMKERNEL
|
|
movl K, %eax
|
|
#else
|
|
movl KKK, %eax
|
|
#endif
|
|
andl $7, %eax
|
|
BRANCH
|
|
je .L68
|
|
ALIGN_4
|
|
|
|
.L66:
|
|
mulsd %xmm0, %xmm2
|
|
movsd -15 * SIZE(AA), %xmm0
|
|
addsd %xmm2, %xmm4
|
|
movsd -15 * SIZE(BB), %xmm2
|
|
|
|
addl $1 * SIZE, AA
|
|
addl $1 * SIZE, BB
|
|
decl %eax
|
|
jg .L66
|
|
ALIGN_4
|
|
|
|
.L68:
|
|
movups ALPHA, %xmm3
|
|
|
|
addpd %xmm5, %xmm4
|
|
|
|
haddpd %xmm4, %xmm4
|
|
|
|
movsd 0 * SIZE(C1), %xmm0
|
|
movhps 1 * SIZE(C1), %xmm0
|
|
|
|
pshufd $0x44, %xmm4, %xmm2
|
|
|
|
mulpd %xmm3, %xmm2
|
|
addpd %xmm2, %xmm0
|
|
|
|
movlps %xmm0, 0 * SIZE(C1)
|
|
movhps %xmm0, 1 * SIZE(C1)
|
|
ALIGN_4
|
|
|
|
.L999:
|
|
popl %ebx
|
|
popl %esi
|
|
popl %edi
|
|
popl %ebp
|
|
|
|
addl $ARGS, %esp
|
|
ret
|
|
|
|
EPILOGUE
|