loongarch64: Refine axpby optimization functions.

This commit is contained in:
Hao Chen 2023-12-29 15:08:10 +08:00 committed by Shiyou Yin
parent 1ec5dded43
commit edabb93668
6 changed files with 931 additions and 1281 deletions

View File

@ -54,8 +54,8 @@ DAXPYKERNEL = axpy_lsx.S
CAXPYKERNEL = caxpy_lsx.S CAXPYKERNEL = caxpy_lsx.S
ZAXPYKERNEL = caxpy_lsx.S ZAXPYKERNEL = caxpy_lsx.S
SAXPBYKERNEL = saxpby_lsx.S SAXPBYKERNEL = axpby_lsx.S
DAXPBYKERNEL = daxpby_lsx.S DAXPBYKERNEL = axpby_lsx.S
SSUMKERNEL = sum_lsx.S SSUMKERNEL = sum_lsx.S
DSUMKERNEL = sum_lsx.S DSUMKERNEL = sum_lsx.S

View File

@ -54,8 +54,8 @@ DAXPYKERNEL = axpy_lasx.S
CAXPYKERNEL = caxpy_lasx.S CAXPYKERNEL = caxpy_lasx.S
ZAXPYKERNEL = caxpy_lasx.S ZAXPYKERNEL = caxpy_lasx.S
SAXPBYKERNEL = saxpby_lasx.S SAXPBYKERNEL = axpby_lasx.S
DAXPBYKERNEL = daxpby_lasx.S DAXPBYKERNEL = axpby_lasx.S
SSUMKERNEL = sum_lasx.S SSUMKERNEL = sum_lasx.S
DSUMKERNEL = sum_lasx.S DSUMKERNEL = sum_lasx.S

View File

@ -1,6 +1,33 @@
#define ASSEMBLER /***************************************************************************
Copyright (c) 2023, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define ASSEMBLER
#include "common.h" #include "common.h"
#define N $r4 #define N $r4
#define ALPHA $f0 #define ALPHA $f0
#define X $r5 #define X $r5
@ -32,16 +59,22 @@
bge $r0, N, .L999 bge $r0, N, .L999
li.d TEMP, 1 li.d TEMP, 1
movgr2fr.d a1, $r0 movgr2fr.d a1, $r0
ffint.d.l a1, a1 ffint.s.l a1, a1
slli.d TEMP, TEMP, BASE_SHIFT slli.d TEMP, TEMP, BASE_SHIFT
slli.d INCX, INCX, BASE_SHIFT slli.d INCX, INCX, BASE_SHIFT
slli.d INCY, INCY, BASE_SHIFT slli.d INCY, INCY, BASE_SHIFT
movfr2gr.d t1, ALPHA MTG t1, ALPHA
MTG t2, BETA
MTG t3, a1
#ifdef DOUBLE
xvreplgr2vr.d VXA, t1 xvreplgr2vr.d VXA, t1
movfr2gr.d t2, BETA
xvreplgr2vr.d VXB, t2 xvreplgr2vr.d VXB, t2
movfr2gr.d t3, a1
xvreplgr2vr.d VXZ, t3 xvreplgr2vr.d VXZ, t3
#else
xvreplgr2vr.w VXA, t1
xvreplgr2vr.w VXB, t2
xvreplgr2vr.w VXZ, t3
#endif
srai.d I, N, 3 srai.d I, N, 3
bne INCX, TEMP, .L20 bne INCX, TEMP, .L20
bne INCY, TEMP, .L12 // INCX==1 and INCY!=1 bne INCY, TEMP, .L12 // INCX==1 and INCY!=1
@ -52,21 +85,22 @@
.L11: .L11:
bge $r0, I, .L997 bge $r0, I, .L997
fcmp.ceq.d $fcc0, ALPHA, a1 CMPEQ $fcc0, ALPHA, a1
bcnez $fcc0, .L110 bcnez $fcc0, .L110
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L112 // ALPHA!=0 BETA==0 bcnez $fcc0, .L112 // ALPHA!=0 BETA==0
b .L111 // ALPHA!=0 BETA!=0 b .L111 // ALPHA!=0 BETA!=0
.align 3 .align 3
.L110: .L110:
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L114 // ALPHA==0 BETA==0 bcnez $fcc0, .L114 // ALPHA==0 BETA==0
b .L113 // ALPHA==0 BETA!=0 b .L113 // ALPHA==0 BETA!=0
.align 3 .align 3
.L111: // ALPHA!=0 BETA!=0 .L111: // ALPHA!=0 BETA!=0
xvld VX0, X, 0 * SIZE xvld VX0, X, 0 * SIZE
#ifdef DOUBLE
xvld VX2, Y, 0 * SIZE xvld VX2, Y, 0 * SIZE
xvld VX1, X, 4 * SIZE xvld VX1, X, 4 * SIZE
xvld VX3, Y, 4 * SIZE xvld VX3, Y, 4 * SIZE
@ -77,6 +111,13 @@
addi.d I, I, -1 addi.d I, I, -1
xvst VX2, Y, 0 * SIZE xvst VX2, Y, 0 * SIZE
xvst VX3, Y, 4 * SIZE xvst VX3, Y, 4 * SIZE
#else
xvld VX2, Y, 0 * SIZE
xvfmul.s VX0, VX0, VXA
addi.d I, I, -1
xvfmadd.s VX2, VX2, VXB, VX0
xvst VX2, Y, 0 * SIZE
#endif
addi.d X, X, 8 * SIZE addi.d X, X, 8 * SIZE
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
blt $r0, I, .L111 blt $r0, I, .L111
@ -85,34 +126,46 @@
.L112: // ALPHA!=0 BETA==0 .L112: // ALPHA!=0 BETA==0
xvld VX0, X, 0 * SIZE xvld VX0, X, 0 * SIZE
#ifdef DOUBLE
xvld VX1, X, 4 * SIZE xvld VX1, X, 4 * SIZE
xvfmul.d VX0, VX0, VXA xvfmul.d VX0, VX0, VXA
xvfmul.d VX1, VX1, VXA xvfmul.d VX1, VX1, VXA
xvst VX0, Y, 0 * SIZE xvst VX0, Y, 0 * SIZE
xvst VX1, Y, 4 * SIZE xvst VX1, Y, 4 * SIZE
#else
xvfmul.s VX0, VX0, VXA
addi.d I, I, -1
xvst VX0, Y, 0 * SIZE
#endif
addi.d X, X, 8 * SIZE addi.d X, X, 8 * SIZE
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L112 blt $r0, I, .L112
b .L997 b .L997
.align 3 .align 3
.L113: // ALPHA==0 BETA!=0 .L113: // ALPHA==0 BETA!=0
xvld VX2, Y, 0 * SIZE xvld VX2, Y, 0 * SIZE
#ifdef DOUBLE
xvld VX3, Y, 4 * SIZE xvld VX3, Y, 4 * SIZE
xvfmul.d VX2, VX2, VXB xvfmul.d VX2, VX2, VXB
xvfmul.d VX3, VX3, VXB xvfmul.d VX3, VX3, VXB
xvst VX2, Y, 0 * SIZE xvst VX2, Y, 0 * SIZE
xvst VX3, Y, 4 * SIZE xvst VX3, Y, 4 * SIZE
addi.d Y, Y, 8 * SIZE #else
xvfmul.s VX2, VX2, VXB
xvst VX2, Y, 0 * SIZE
#endif
addi.d I, I, -1 addi.d I, I, -1
addi.d Y, Y, 8 * SIZE
blt $r0, I, .L113 blt $r0, I, .L113
b .L997 b .L997
.align 3 .align 3
.L114: // ALPHA==0 BETA==0 .L114: // ALPHA==0 BETA==0
xvst VXZ, Y, 0 * SIZE xvst VXZ, Y, 0 * SIZE
#ifdef DOUBLE
xvst VXZ, Y, 4 * SIZE xvst VXZ, Y, 4 * SIZE
#endif
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
addi.d I, I, -1 addi.d I, I, -1
blt $r0, I, .L114 blt $r0, I, .L114
@ -122,21 +175,22 @@
.L12: // INCX==1 and INCY!=1 .L12: // INCX==1 and INCY!=1
bge $r0, I, .L997 bge $r0, I, .L997
move YY, Y move YY, Y
fcmp.ceq.d $fcc0, ALPHA, a1 CMPEQ $fcc0, ALPHA, a1
bcnez $fcc0, .L120 bcnez $fcc0, .L120
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L122 // ALPHA!=0 BETA==0 bcnez $fcc0, .L122 // ALPHA!=0 BETA==0
b .L121 // ALPHA!=0 BETA!=0 b .L121 // ALPHA!=0 BETA!=0
.align 3 .align 3
.L120: .L120:
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L124 // ALPHA==0 BETA==0 bcnez $fcc0, .L124 // ALPHA==0 BETA==0
b .L123 // ALPHA==0 BETA!=0 b .L123 // ALPHA==0 BETA!=0
.align 3 .align 3
.L121: // ALPHA!=0 BETA!=0 .L121: // ALPHA!=0 BETA!=0
xvld VX0, X, 0 * SIZE xvld VX0, X, 0 * SIZE
#ifdef DOUBLE
ld.d t1, Y, 0 * SIZE ld.d t1, Y, 0 * SIZE
add.d Y, Y, INCY add.d Y, Y, INCY
ld.d t2, Y, 0 * SIZE ld.d t2, Y, 0 * SIZE
@ -182,14 +236,59 @@
xvstelm.d VX3, YY, 0, 2 xvstelm.d VX3, YY, 0, 2
add.d YY, YY, INCY add.d YY, YY, INCY
xvstelm.d VX3, YY, 0, 3 xvstelm.d VX3, YY, 0, 3
#else
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
add.d Y, Y, INCY
xvinsgr2vr.w VX2, t1, 0
xvinsgr2vr.w VX2, t2, 1
xvinsgr2vr.w VX2, t3, 2
xvinsgr2vr.w VX2, t4, 3
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
xvinsgr2vr.w VX2, t1, 4
xvinsgr2vr.w VX2, t2, 5
xvinsgr2vr.w VX2, t3, 6
xvinsgr2vr.w VX2, t4, 7
add.d Y, Y, INCY
xvfmul.s VX0, VX0, VXA
xvfmadd.s VX2, VX2, VXB, VX0
xvstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 7
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
addi.d X, X, 8 * SIZE addi.d X, X, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L121 blt $r0, I, .L121
b .L997 b .L997
.align 3 .align 3
.L122: // ALPHA!=0 BETA==0 .L122: // ALPHA!=0 BETA==0
xvld VX0, X, 0 * SIZE xvld VX0, X, 0 * SIZE
#ifdef DOUBLE
xvld VX1, X, 4 * SIZE xvld VX1, X, 4 * SIZE
xvfmul.d VX0, VX0, VXA xvfmul.d VX0, VX0, VXA
xvfmul.d VX1, VX1, VXA xvfmul.d VX1, VX1, VXA
@ -208,14 +307,33 @@
xvstelm.d VX1, YY, 0, 2 xvstelm.d VX1, YY, 0, 2
add.d YY, YY, INCY add.d YY, YY, INCY
xvstelm.d VX1, YY, 0, 3 xvstelm.d VX1, YY, 0, 3
#else
xvfmul.s VX0, VX0, VXA
addi.d I, I, -1
xvstelm.w VX0, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 7
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
addi.d X, X, 8 * SIZE addi.d X, X, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L122 blt $r0, I, .L122
b .L997 b .L997
.align 3 .align 3
.L123: // ALPHA==0 BETA!=0 .L123: // ALPHA==0 BETA!=0
#ifdef DOUBLE
ld.d t1, Y, 0 * SIZE ld.d t1, Y, 0 * SIZE
add.d Y, Y, INCY add.d Y, Y, INCY
ld.d t2, Y, 0 * SIZE ld.d t2, Y, 0 * SIZE
@ -250,7 +368,6 @@
xvstelm.d VX2, YY, 0, 3 xvstelm.d VX2, YY, 0, 3
add.d YY, YY, INCY add.d YY, YY, INCY
xvfmul.d VX3, VX3, VXB xvfmul.d VX3, VX3, VXB
addi.d I, I, -1
xvstelm.d VX3, YY, 0, 0 xvstelm.d VX3, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
xvstelm.d VX3, YY, 0, 1 xvstelm.d VX3, YY, 0, 1
@ -258,12 +375,56 @@
xvstelm.d VX3, YY, 0, 2 xvstelm.d VX3, YY, 0, 2
add.d YY, YY, INCY add.d YY, YY, INCY
xvstelm.d VX3, YY, 0, 3 xvstelm.d VX3, YY, 0, 3
#else
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
add.d Y, Y, INCY
xvinsgr2vr.w VX2, t1, 0
xvinsgr2vr.w VX2, t2, 1
xvinsgr2vr.w VX2, t3, 2
xvinsgr2vr.w VX2, t4, 3
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
xvinsgr2vr.w VX2, t1, 4
xvinsgr2vr.w VX2, t2, 5
xvinsgr2vr.w VX2, t3, 6
xvinsgr2vr.w VX2, t4, 7
add.d Y, Y, INCY
xvfmul.s VX2, VX2, VXB
xvstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 7
#endif
add.d YY, YY, INCY
addi.d I, I, -1
blt $r0, I, .L123 blt $r0, I, .L123
b .L997 b .L997
.align 3 .align 3
.L124: // ALPHA==0 BETA==0 .L124: // ALPHA==0 BETA==0
#ifdef DOUBLE
xvstelm.d VXZ, YY, 0, 0 xvstelm.d VXZ, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
xvstelm.d VXZ, YY, 0, 1 xvstelm.d VXZ, YY, 0, 1
@ -279,6 +440,23 @@
xvstelm.d VXZ, YY, 0, 2 xvstelm.d VXZ, YY, 0, 2
add.d YY, YY, INCY add.d YY, YY, INCY
xvstelm.d VXZ, YY, 0, 3 xvstelm.d VXZ, YY, 0, 3
#else
xvstelm.w VXZ, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 7
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
addi.d I, I, -1 addi.d I, I, -1
blt $r0, I, .L124 blt $r0, I, .L124
@ -287,21 +465,22 @@
.L21:// INCX!=1 and INCY==1 .L21:// INCX!=1 and INCY==1
bge $r0, I, .L997 bge $r0, I, .L997
fcmp.ceq.d $fcc0, ALPHA, a1 CMPEQ $fcc0, ALPHA, a1
bcnez $fcc0, .L210 bcnez $fcc0, .L210
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L212 // ALPHA!=0 BETA==0 bcnez $fcc0, .L212 // ALPHA!=0 BETA==0
b .L211 // ALPHA!=0 BETA!=0 b .L211 // ALPHA!=0 BETA!=0
.align 3 .align 3
.L210: .L210:
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L214 // ALPHA==0 BETA==0 bcnez $fcc0, .L214 // ALPHA==0 BETA==0
b .L213 // ALPHA==0 BETA!=0 b .L213 // ALPHA==0 BETA!=0
.align 3 .align 3
.L211: // ALPHA!=0 BETA!=0 .L211: // ALPHA!=0 BETA!=0
xvld VX2, Y, 0 * SIZE xvld VX2, Y, 0 * SIZE
#ifdef DOUBLE
ld.d t1, X, 0 * SIZE ld.d t1, X, 0 * SIZE
add.d X, X, INCX add.d X, X, INCX
ld.d t2, X, 0 * SIZE ld.d t2, X, 0 * SIZE
@ -334,12 +513,43 @@
xvfmadd.d VX3, VX3, VXB, VX1 xvfmadd.d VX3, VX3, VXB, VX1
addi.d I, I, -1 addi.d I, I, -1
xvst VX3, Y, 4 * SIZE xvst VX3, Y, 4 * SIZE
#else
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.w VX0, t1, 0
xvinsgr2vr.w VX0, t2, 1
xvinsgr2vr.w VX0, t3, 2
xvinsgr2vr.w VX0, t4, 3
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
xvinsgr2vr.w VX0, t1, 4
xvinsgr2vr.w VX0, t2, 5
xvinsgr2vr.w VX0, t3, 6
xvinsgr2vr.w VX0, t4, 7
add.d X, X, INCX
xvfmul.s VX0, VXA, VX0
xvfmadd.s VX2, VX2, VXB, VX0
addi.d I, I, -1
xvst VX2, Y, 0 * SIZE
#endif
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
blt $r0, I, .L211 blt $r0, I, .L211
b .L997 b .L997
.align 3 .align 3
.L212: // ALPHA!=0 BETA==0 .L212: // ALPHA!=0 BETA==0
#ifdef DOUBLE
ld.d t1, X, 0 * SIZE ld.d t1, X, 0 * SIZE
add.d X, X, INCX add.d X, X, INCX
ld.d t2, X, 0 * SIZE ld.d t2, X, 0 * SIZE
@ -369,6 +579,35 @@
xvfmul.d VX1, VX1, VXA xvfmul.d VX1, VX1, VXA
addi.d I, I, -1 addi.d I, I, -1
xvst VX1, Y, 4 * SIZE xvst VX1, Y, 4 * SIZE
#else
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.w VX0, t1, 0
xvinsgr2vr.w VX0, t2, 1
xvinsgr2vr.w VX0, t3, 2
xvinsgr2vr.w VX0, t4, 3
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
xvinsgr2vr.w VX0, t1, 4
xvinsgr2vr.w VX0, t2, 5
xvinsgr2vr.w VX0, t3, 6
xvinsgr2vr.w VX0, t4, 7
add.d X, X, INCX
xvfmul.s VX0, VXA, VX0
addi.d I, I, -1
xvst VX0, Y, 0 * SIZE
#endif
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
blt $r0, I, .L212 blt $r0, I, .L212
b .L997 b .L997
@ -376,20 +615,27 @@
.L213: // ALPHA==0 BETA!=0 .L213: // ALPHA==0 BETA!=0
xvld VX2, Y, 0 * SIZE xvld VX2, Y, 0 * SIZE
#ifdef DOUBLE
xvld VX3, Y, 4 * SIZE xvld VX3, Y, 4 * SIZE
xvfmul.d VX2, VX2, VXB xvfmul.d VX2, VX2, VXB
xvfmul.d VX3, VX3, VXB xvfmul.d VX3, VX3, VXB
addi.d I, I, -1
xvst VX2, Y, 0 * SIZE xvst VX2, Y, 0 * SIZE
xvst VX3, Y, 4 * SIZE xvst VX3, Y, 4 * SIZE
#else
xvfmul.s VX2, VX2, VXB
xvst VX2, Y, 0 * SIZE
#endif
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L213 blt $r0, I, .L213
b .L997 b .L997
.align 3 .align 3
.L214: // ALPHA==0 BETA==0 .L214: // ALPHA==0 BETA==0
xvst VXZ, Y, 0 * SIZE xvst VXZ, Y, 0 * SIZE
#ifdef DOUBLE
xvst VXZ, Y, 4 * SIZE xvst VXZ, Y, 4 * SIZE
#endif
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
addi.d I, I, -1 addi.d I, I, -1
blt $r0, I, .L214 blt $r0, I, .L214
@ -399,20 +645,21 @@
.L22: .L22:
bge $r0, I, .L997 bge $r0, I, .L997
move YY, Y move YY, Y
fcmp.ceq.d $fcc0, ALPHA, a1 CMPEQ $fcc0, ALPHA, a1
bcnez $fcc0, .L220 bcnez $fcc0, .L220
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L222 // ALPHA!=0 BETA==0 bcnez $fcc0, .L222 // ALPHA!=0 BETA==0
b .L221 // ALPHA!=0 BETA!=0 b .L221 // ALPHA!=0 BETA!=0
.align 3 .align 3
.L220: .L220:
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L224 // ALPHA==0 BETA==0 bcnez $fcc0, .L224 // ALPHA==0 BETA==0
b .L223 // ALPHA==0 BETA!=0 b .L223 // ALPHA==0 BETA!=0
.align 3 .align 3
.L221: // ALPHA!=0 BETA!=0 .L221: // ALPHA!=0 BETA!=0
#ifdef DOUBLE
ld.d t1, X, 0 * SIZE ld.d t1, X, 0 * SIZE
add.d X, X, INCX add.d X, X, INCX
ld.d t2, X, 0 * SIZE ld.d t2, X, 0 * SIZE
@ -481,12 +728,81 @@
xvstelm.d VX3, YY, 0, 2 xvstelm.d VX3, YY, 0, 2
add.d YY, YY, INCY add.d YY, YY, INCY
xvstelm.d VX3, YY, 0, 3 xvstelm.d VX3, YY, 0, 3
#else
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.w VX0, t1, 0
xvinsgr2vr.w VX0, t2, 1
xvinsgr2vr.w VX0, t3, 2
xvinsgr2vr.w VX0, t4, 3
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
xvinsgr2vr.w VX0, t1, 4
xvinsgr2vr.w VX0, t2, 5
xvinsgr2vr.w VX0, t3, 6
xvinsgr2vr.w VX0, t4, 7
add.d X, X, INCX
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
xvinsgr2vr.w VX2, t1, 0
xvinsgr2vr.w VX2, t2, 1
xvinsgr2vr.w VX2, t3, 2
xvinsgr2vr.w VX2, t4, 3
add.d Y, Y, INCY
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
xvinsgr2vr.w VX2, t1, 4
xvinsgr2vr.w VX2, t2, 5
xvinsgr2vr.w VX2, t3, 6
xvinsgr2vr.w VX2, t4, 7
add.d Y, Y, INCY
xvfmul.s VX0, VX0, VXA
xvfmadd.s VX2, VX2, VXB, VX0
addi.d I, I, -1
xvstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 7
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
blt $r0, I, .L221 blt $r0, I, .L221
b .L997 b .L997
.align 3 .align 3
.L222: // ALPHA!=0 BETA==0 .L222: // ALPHA!=0 BETA==0
#ifdef DOUBLE
ld.d t1, X, 0 * SIZE ld.d t1, X, 0 * SIZE
add.d X, X, INCX add.d X, X, INCX
ld.d t2, X, 0 * SIZE ld.d t2, X, 0 * SIZE
@ -529,12 +845,56 @@
xvstelm.d VX1, YY, 0, 2 xvstelm.d VX1, YY, 0, 2
add.d YY, YY, INCY add.d YY, YY, INCY
xvstelm.d VX1, YY, 0, 3 xvstelm.d VX1, YY, 0, 3
#else
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
xvinsgr2vr.w VX0, t1, 0
xvinsgr2vr.w VX0, t2, 1
xvinsgr2vr.w VX0, t3, 2
xvinsgr2vr.w VX0, t4, 3
add.d X, X, INCX
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
xvinsgr2vr.w VX0, t1, 4
xvinsgr2vr.w VX0, t2, 5
xvinsgr2vr.w VX0, t3, 6
xvinsgr2vr.w VX0, t4, 7
add.d X, X, INCX
xvfmul.s VX0, VX0, VXA
addi.d I, I, -1
xvstelm.w VX0, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 7
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
blt $r0, I, .L222 blt $r0, I, .L222
b .L997 b .L997
.align 3 .align 3
.L223: // ALPHA==0 BETA!=0 .L223: // ALPHA==0 BETA!=0
#ifdef DOUBLE
ld.d t1, Y, 0 * SIZE ld.d t1, Y, 0 * SIZE
add.d Y, Y, INCY add.d Y, Y, INCY
ld.d t2, Y, 0 * SIZE ld.d t2, Y, 0 * SIZE
@ -577,12 +937,56 @@
xvstelm.d VX3, YY, 0, 2 xvstelm.d VX3, YY, 0, 2
add.d YY, YY, INCY add.d YY, YY, INCY
xvstelm.d VX3, YY, 0, 3 xvstelm.d VX3, YY, 0, 3
#else
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
add.d Y, Y, INCY
xvinsgr2vr.w VX2, t1, 0
xvinsgr2vr.w VX2, t2, 1
xvinsgr2vr.w VX2, t3, 2
xvinsgr2vr.w VX2, t4, 3
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
xvinsgr2vr.w VX2, t1, 4
xvinsgr2vr.w VX2, t2, 5
xvinsgr2vr.w VX2, t3, 6
xvinsgr2vr.w VX2, t4, 7
add.d Y, Y, INCY
xvfmul.s VX2, VX2, VXB
addi.d I, I, -1
xvstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 7
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
blt $r0, I, .L223 blt $r0, I, .L223
b .L997 b .L997
.align 3 .align 3
.L224: // ALPHA==0 BETA==0 .L224: // ALPHA==0 BETA==0
#ifdef DOUBLE
xvstelm.d VXZ, YY, 0, 0 xvstelm.d VXZ, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
xvstelm.d VXZ, YY, 0, 1 xvstelm.d VXZ, YY, 0, 1
@ -598,6 +1002,23 @@
xvstelm.d VXZ, YY, 0, 2 xvstelm.d VXZ, YY, 0, 2
add.d YY, YY, INCY add.d YY, YY, INCY
xvstelm.d VXZ, YY, 0, 3 xvstelm.d VXZ, YY, 0, 3
#else
xvstelm.w VXZ, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 7
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
addi.d I, I, -1 addi.d I, I, -1
blt $r0, I, .L224 blt $r0, I, .L224
@ -610,12 +1031,12 @@
.align 3 .align 3
.L998: .L998:
fld.d $f12, X, 0 * SIZE LD $f12, X, 0 * SIZE
fld.d $f13, Y, 0 * SIZE LD $f13, Y, 0 * SIZE
addi.d I, I, -1 addi.d I, I, -1
fmul.d $f12, $f12, ALPHA MUL $f12, $f12, ALPHA
fmadd.d $f13, $f13, BETA, $f12 MADD $f13, $f13, BETA, $f12
fst.d $f13, Y, 0 * SIZE ST $f13, Y, 0 * SIZE
add.d X, X, INCX add.d X, X, INCX
add.d Y, Y, INCY add.d Y, Y, INCY
blt $r0, I, .L998 blt $r0, I, .L998

View File

@ -1,6 +1,33 @@
#define ASSEMBLER /***************************************************************************
Copyright (c) 2023, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define ASSEMBLER
#include "common.h" #include "common.h"
#define N $r4 #define N $r4
#define ALPHA $f0 #define ALPHA $f0
#define X $r5 #define X $r5
@ -32,16 +59,22 @@
bge $r0, N, .L999 bge $r0, N, .L999
li.d TEMP, 1 li.d TEMP, 1
movgr2fr.d a1, $r0 movgr2fr.d a1, $r0
ffint.d.l a1, a1 ffint.s.l a1, a1
slli.d TEMP, TEMP, BASE_SHIFT slli.d TEMP, TEMP, BASE_SHIFT
slli.d INCX, INCX, BASE_SHIFT slli.d INCX, INCX, BASE_SHIFT
slli.d INCY, INCY, BASE_SHIFT slli.d INCY, INCY, BASE_SHIFT
movfr2gr.d t1, ALPHA MTG t1, ALPHA
MTG t2, BETA
MTG t3, a1
#ifdef DOUBLE
vreplgr2vr.d VXA, t1 vreplgr2vr.d VXA, t1
movfr2gr.d t2, BETA
vreplgr2vr.d VXB, t2 vreplgr2vr.d VXB, t2
movfr2gr.d t3, a1
vreplgr2vr.d VXZ, t3 vreplgr2vr.d VXZ, t3
#else
vreplgr2vr.w VXA, t1
vreplgr2vr.w VXB, t2
vreplgr2vr.w VXZ, t3
#endif
srai.d I, N, 3 srai.d I, N, 3
bne INCX, TEMP, .L20 bne INCX, TEMP, .L20
bne INCY, TEMP, .L12 // INCX==1 and INCY!=1 bne INCY, TEMP, .L12 // INCX==1 and INCY!=1
@ -52,15 +85,15 @@
.L11: .L11:
bge $r0, I, .L997 bge $r0, I, .L997
fcmp.ceq.d $fcc0, ALPHA, a1 CMPEQ $fcc0, ALPHA, a1
bcnez $fcc0, .L110 bcnez $fcc0, .L110
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L112 // ALPHA!=0 BETA==0 bcnez $fcc0, .L112 // ALPHA!=0 BETA==0
b .L111 // ALPHA!=0 BETA!=0 b .L111 // ALPHA!=0 BETA!=0
.align 3 .align 3
.L110: .L110:
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L114 // ALPHA==0 BETA==0 bcnez $fcc0, .L114 // ALPHA==0 BETA==0
b .L113 // ALPHA==0 BETA!=0 b .L113 // ALPHA==0 BETA!=0
.align 3 .align 3
@ -68,6 +101,7 @@
.L111: // ALPHA!=0 BETA!=0 .L111: // ALPHA!=0 BETA!=0
vld VX0, X, 0 * SIZE vld VX0, X, 0 * SIZE
vld VX2, Y, 0 * SIZE vld VX2, Y, 0 * SIZE
#ifdef DOUBLE
vld VX1, X, 2 * SIZE vld VX1, X, 2 * SIZE
vld VX3, Y, 2 * SIZE vld VX3, Y, 2 * SIZE
vfmul.d VX0, VX0, VXA vfmul.d VX0, VX0, VXA
@ -86,6 +120,16 @@
vfmadd.d VX3, VX3, VXB, VX1 vfmadd.d VX3, VX3, VXB, VX1
vst VX2, Y, 4 * SIZE vst VX2, Y, 4 * SIZE
vst VX3, Y, 6 * SIZE vst VX3, Y, 6 * SIZE
#else
vld VX1, X, 4 * SIZE
vld VX3, Y, 4 * SIZE
vfmul.s VX0, VX0, VXA
vfmul.s VX1, VX1, VXA
vfmadd.s VX2, VX2, VXB, VX0
vfmadd.s VX3, VX3, VXB, VX1
vst VX2, Y, 0 * SIZE
vst VX3, Y, 4 * SIZE
#endif
addi.d X, X, 8 * SIZE addi.d X, X, 8 * SIZE
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
addi.d I, I, -1 addi.d I, I, -1
@ -95,6 +139,7 @@
.L112: // ALPHA!=0 BETA==0 .L112: // ALPHA!=0 BETA==0
vld VX0, X, 0 * SIZE vld VX0, X, 0 * SIZE
#ifdef DOUBLE
vld VX1, X, 2 * SIZE vld VX1, X, 2 * SIZE
vfmul.d VX0, VX0, VXA vfmul.d VX0, VX0, VXA
vfmul.d VX1, VX1, VXA vfmul.d VX1, VX1, VXA
@ -106,6 +151,13 @@
vfmul.d VX3, VX3, VXA vfmul.d VX3, VX3, VXA
vst VX2, Y, 4 * SIZE vst VX2, Y, 4 * SIZE
vst VX3, Y, 6 * SIZE vst VX3, Y, 6 * SIZE
#else
vld VX1, X, 4 * SIZE
vfmul.s VX0, VX0, VXA
vfmul.s VX1, VX1, VXA
vst VX0, Y, 0 * SIZE
vst VX1, Y, 4 * SIZE
#endif
addi.d X, X, 8 * SIZE addi.d X, X, 8 * SIZE
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
addi.d I, I, -1 addi.d I, I, -1
@ -113,7 +165,8 @@
b .L997 b .L997
.align 3 .align 3
.L113: // ALPHA==0 BETA!=0\ .L113: // ALPHA==0 BETA!=0
#ifdef DOUBLE
vld VX0, Y, 0 * SIZE vld VX0, Y, 0 * SIZE
vld VX1, Y, 2 * SIZE vld VX1, Y, 2 * SIZE
vfmul.d VX0, VX0, VXB vfmul.d VX0, VX0, VXB
@ -126,6 +179,14 @@
vfmul.d VX3, VX3, VXB vfmul.d VX3, VX3, VXB
vst VX2, Y, 4 * SIZE vst VX2, Y, 4 * SIZE
vst VX3, Y, 6 * SIZE vst VX3, Y, 6 * SIZE
#else
vld VX2, Y, 0 * SIZE
vld VX3, Y, 4 * SIZE
vfmul.s VX2, VX2, VXB
vfmul.s VX3, VX3, VXB
vst VX2, Y, 0 * SIZE
vst VX3, Y, 4 * SIZE
#endif
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
addi.d I, I, -1 addi.d I, I, -1
blt $r0, I, .L113 blt $r0, I, .L113
@ -134,9 +195,13 @@
.L114: // ALPHA==0 BETA==0 .L114: // ALPHA==0 BETA==0
vst VXZ, Y, 0 * SIZE vst VXZ, Y, 0 * SIZE
#ifdef DOUBLE
vst VXZ, Y, 2 * SIZE vst VXZ, Y, 2 * SIZE
vst VXZ, Y, 4 * SIZE vst VXZ, Y, 4 * SIZE
vst VXZ, Y, 6 * SIZE vst VXZ, Y, 6 * SIZE
#else
vst VXZ, Y, 4 * SIZE
#endif
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
addi.d I, I, -1 addi.d I, I, -1
blt $r0, I, .L114 blt $r0, I, .L114
@ -146,21 +211,22 @@
.L12: // INCX==1 and INCY!=1 .L12: // INCX==1 and INCY!=1
bge $r0, I, .L997 bge $r0, I, .L997
move YY, Y move YY, Y
fcmp.ceq.d $fcc0, ALPHA, a1 CMPEQ $fcc0, ALPHA, a1
bcnez $fcc0, .L120 bcnez $fcc0, .L120
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L122 // ALPHA!=0 BETA==0 bcnez $fcc0, .L122 // ALPHA!=0 BETA==0
b .L121 // ALPHA!=0 BETA!=0 b .L121 // ALPHA!=0 BETA!=0
.align 3 .align 3
.L120: .L120:
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L124 // ALPHA==0 BETA==0 bcnez $fcc0, .L124 // ALPHA==0 BETA==0
b .L123 // ALPHA==0 BETA!=0 b .L123 // ALPHA==0 BETA!=0
.align 3 .align 3
.L121: // ALPHA!=0 BETA!=0 .L121: // ALPHA!=0 BETA!=0
vld VX0, X, 0 * SIZE vld VX0, X, 0 * SIZE
#ifdef DOUBLE
ld.d t1, Y, 0 * SIZE ld.d t1, Y, 0 * SIZE
add.d Y, Y, INCY add.d Y, Y, INCY
ld.d t2, Y, 0 * SIZE ld.d t2, Y, 0 * SIZE
@ -212,6 +278,53 @@
vstelm.d VX3, YY, 0, 0 vstelm.d VX3, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
vstelm.d VX3, YY, 0, 1 vstelm.d VX3, YY, 0, 1
#else
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
vinsgr2vr.w VX2, t1, 0
vinsgr2vr.w VX2, t2, 1
vinsgr2vr.w VX2, t3, 2
vinsgr2vr.w VX2, t4, 3
add.d Y, Y, INCY
vfmul.s VX0, VX0, VXA
vld VX1, X, 4 * SIZE
vfmadd.s VX2, VX2, VXB, VX0
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
add.d Y, Y, INCY
vinsgr2vr.w VX3, t1, 0
vinsgr2vr.w VX3, t2, 1
vinsgr2vr.w VX3, t3, 2
vinsgr2vr.w VX3, t4, 3
vstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
vfmul.s VX1, VX1, VXA
vfmadd.s VX3, VX3, VXB, VX1
addi.d I, I, -1
vstelm.w VX3, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 3
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
addi.d X, X, 8 * SIZE addi.d X, X, 8 * SIZE
blt $r0, I, .L121 blt $r0, I, .L121
@ -220,6 +333,7 @@
.L122: // ALPHA!=0 BETA==0 .L122: // ALPHA!=0 BETA==0
vld VX0, X, 0 * SIZE vld VX0, X, 0 * SIZE
#ifdef DOUBLE
vld VX1, X, 2 * SIZE vld VX1, X, 2 * SIZE
vfmul.d VX0, VX0, VXA vfmul.d VX0, VX0, VXA
vfmul.d VX1, VX1, VXA vfmul.d VX1, VX1, VXA
@ -242,6 +356,26 @@
vstelm.d VX1, YY, 0, 0 vstelm.d VX1, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
vstelm.d VX1, YY, 0, 1 vstelm.d VX1, YY, 0, 1
#else
vld VX1, X, 4 * SIZE
vfmul.s VX0, VX0, VXA
vfmul.s VX1, VX1, VXA
vstelm.w VX0, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX0, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX0, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX0, YY, 0, 3
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 3
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
addi.d X, X, 8 * SIZE addi.d X, X, 8 * SIZE
addi.d I, I, -1 addi.d I, I, -1
@ -250,6 +384,7 @@
.align 3 .align 3
.L123: // ALPHA==0 BETA!=0 .L123: // ALPHA==0 BETA!=0
#ifdef DOUBLE
ld.d t1, Y, 0 * SIZE ld.d t1, Y, 0 * SIZE
add.d Y, Y, INCY add.d Y, Y, INCY
ld.d t2, Y, 0 * SIZE ld.d t2, Y, 0 * SIZE
@ -294,12 +429,57 @@
vstelm.d VX3, YY, 0, 0 vstelm.d VX3, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
vstelm.d VX3, YY, 0, 1 vstelm.d VX3, YY, 0, 1
#else
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
vinsgr2vr.w VX2, t1, 0
vinsgr2vr.w VX2, t2, 1
vinsgr2vr.w VX2, t3, 2
vinsgr2vr.w VX2, t4, 3
add.d Y, Y, INCY
vfmul.s VX2, VX2, VXB
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
add.d Y, Y, INCY
vinsgr2vr.w VX3, t1, 0
vinsgr2vr.w VX3, t2, 1
vinsgr2vr.w VX3, t3, 2
vinsgr2vr.w VX3, t4, 3
vstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
vfmul.s VX3, VX3, VXB
addi.d I, I, -1
vstelm.w VX3, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 3
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
blt $r0, I, .L123 blt $r0, I, .L123
b .L997 b .L997
.align 3 .align 3
.L124: // ALPHA==0 BETA==0 .L124: // ALPHA==0 BETA==0
#ifdef DOUBLE
vstelm.d VXZ, YY, 0, 0 vstelm.d VXZ, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
vstelm.d VXZ, YY, 0, 1 vstelm.d VXZ, YY, 0, 1
@ -315,6 +495,23 @@
vstelm.d VXZ, YY, 0, 0 vstelm.d VXZ, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
vstelm.d VXZ, YY, 0, 1 vstelm.d VXZ, YY, 0, 1
#else
vstelm.w VXZ, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 3
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 3
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
addi.d I, I, -1 addi.d I, I, -1
blt $r0, I, .L124 blt $r0, I, .L124
@ -323,21 +520,22 @@
.L21:// INCX!=1 and INCY==1 .L21:// INCX!=1 and INCY==1
bge $r0, I, .L997 bge $r0, I, .L997
fcmp.ceq.d $fcc0, ALPHA, a1 CMPEQ $fcc0, ALPHA, a1
bcnez $fcc0, .L210 bcnez $fcc0, .L210
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L212 // ALPHA!=0 BETA==0 bcnez $fcc0, .L212 // ALPHA!=0 BETA==0
b .L211 // ALPHA!=0 BETA!=0 b .L211 // ALPHA!=0 BETA!=0
.align 3 .align 3
.L210: .L210:
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L214 // ALPHA==0 BETA==0 bcnez $fcc0, .L214 // ALPHA==0 BETA==0
b .L213 // ALPHA==0 BETA!=0 b .L213 // ALPHA==0 BETA!=0
.align 3 .align 3
.L211: // ALPHA!=0 BETA!=0 .L211: // ALPHA!=0 BETA!=0
vld VX2, Y, 0 * SIZE vld VX2, Y, 0 * SIZE
#ifdef DOUBLE
ld.d t1, X, 0 * SIZE ld.d t1, X, 0 * SIZE
add.d X, X, INCX add.d X, X, INCX
ld.d t2, X, 0 * SIZE ld.d t2, X, 0 * SIZE
@ -378,12 +576,47 @@
vfmadd.d VX3, VX3, VXB, VX1 vfmadd.d VX3, VX3, VXB, VX1
addi.d I, I, -1 addi.d I, I, -1
vst VX3, Y, 6 * SIZE vst VX3, Y, 6 * SIZE
#else
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
vinsgr2vr.w VX0, t1, 0
vinsgr2vr.w VX0, t2, 1
vinsgr2vr.w VX0, t3, 2
vinsgr2vr.w VX0, t4, 3
add.d X, X, INCX
vfmul.s VX0, VXA, VX0
vld VX3, Y, 4 * SIZE
vfmadd.s VX2, VX2, VXB, VX0
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.w VX1, t1, 0
vinsgr2vr.w VX1, t2, 1
vinsgr2vr.w VX1, t3, 2
vinsgr2vr.w VX1, t4, 3
vst VX2, Y, 0 * SIZE
vfmul.s VX1, VX1, VXA
vfmadd.s VX3, VX3, VXB, VX1
addi.d I, I, -1
vst VX3, Y, 4 * SIZE
#endif
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
blt $r0, I, .L211 blt $r0, I, .L211
b .L997 b .L997
.align 3 .align 3
.L212: // ALPHA!=0 BETA==0 .L212: // ALPHA!=0 BETA==0
#ifdef DOUBLE
ld.d t1, X, 0 * SIZE ld.d t1, X, 0 * SIZE
add.d X, X, INCX add.d X, X, INCX
ld.d t2, X, 0 * SIZE ld.d t2, X, 0 * SIZE
@ -417,6 +650,37 @@
vfmul.d VX1, VX1, VXA vfmul.d VX1, VX1, VXA
addi.d I, I, -1 addi.d I, I, -1
vst VX1, Y, 6 * SIZE vst VX1, Y, 6 * SIZE
#else
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
vinsgr2vr.w VX0, t1, 0
vinsgr2vr.w VX0, t2, 1
vinsgr2vr.w VX0, t3, 2
vinsgr2vr.w VX0, t4, 3
add.d X, X, INCX
vfmul.s VX0, VXA, VX0
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.w VX1, t1, 0
vinsgr2vr.w VX1, t2, 1
vinsgr2vr.w VX1, t3, 2
vinsgr2vr.w VX1, t4, 3
vst VX0, Y, 0 * SIZE
vfmul.s VX1, VX1, VXA
addi.d I, I, -1
vst VX1, Y, 4 * SIZE
#endif
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
blt $r0, I, .L212 blt $r0, I, .L212
b .L997 b .L997
@ -424,6 +688,7 @@
.L213: // ALPHA==0 BETA!=0 .L213: // ALPHA==0 BETA!=0
vld VX2, Y, 0 * SIZE vld VX2, Y, 0 * SIZE
#ifdef DOUBLE
vld VX3, Y, 2 * SIZE vld VX3, Y, 2 * SIZE
vfmul.d VX2, VX2, VXB vfmul.d VX2, VX2, VXB
vfmul.d VX3, VX3, VXB vfmul.d VX3, VX3, VXB
@ -433,19 +698,30 @@
vld VX3, Y, 6 * SIZE vld VX3, Y, 6 * SIZE
vfmul.d VX2, VX2, VXB vfmul.d VX2, VX2, VXB
vfmul.d VX3, VX3, VXB vfmul.d VX3, VX3, VXB
addi.d I, I, -1
vst VX2, Y, 4 * SIZE vst VX2, Y, 4 * SIZE
vst VX3, Y, 6 * SIZE vst VX3, Y, 6 * SIZE
#else
vld VX3, Y, 4 * SIZE
vfmul.s VX2, VX2, VXB
vfmul.s VX3, VX3, VXB
vst VX2, Y, 0 * SIZE
vst VX3, Y, 4 * SIZE
#endif
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L213 blt $r0, I, .L213
b .L997 b .L997
.align 3 .align 3
.L214: // ALPHA==0 BETA==0 .L214: // ALPHA==0 BETA==0
vst VXZ, Y, 0 * SIZE vst VXZ, Y, 0 * SIZE
#ifdef DOUBLE
vst VXZ, Y, 2 * SIZE vst VXZ, Y, 2 * SIZE
vst VXZ, Y, 4 * SIZE vst VXZ, Y, 4 * SIZE
vst VXZ, Y, 6 * SIZE vst VXZ, Y, 6 * SIZE
#else
vst VXZ, Y, 4 * SIZE
#endif
addi.d Y, Y, 8 * SIZE addi.d Y, Y, 8 * SIZE
addi.d I, I, -1 addi.d I, I, -1
blt $r0, I, .L214 blt $r0, I, .L214
@ -455,20 +731,21 @@
.L22: .L22:
bge $r0, I, .L997 bge $r0, I, .L997
move YY, Y move YY, Y
fcmp.ceq.d $fcc0, ALPHA, a1 CMPEQ $fcc0, ALPHA, a1
bcnez $fcc0, .L220 bcnez $fcc0, .L220
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L222 // ALPHA!=0 BETA==0 bcnez $fcc0, .L222 // ALPHA!=0 BETA==0
b .L221 // ALPHA!=0 BETA!=0 b .L221 // ALPHA!=0 BETA!=0
.align 3 .align 3
.L220: .L220:
fcmp.ceq.d $fcc0, BETA, a1 CMPEQ $fcc0, BETA, a1
bcnez $fcc0, .L224 // ALPHA==0 BETA==0 bcnez $fcc0, .L224 // ALPHA==0 BETA==0
b .L223 // ALPHA==0 BETA!=0 b .L223 // ALPHA==0 BETA!=0
.align 3 .align 3
.L221: // ALPHA!=0 BETA!=0 .L221: // ALPHA!=0 BETA!=0
#ifdef DOUBLE
ld.d t1, X, 0 * SIZE ld.d t1, X, 0 * SIZE
add.d X, X, INCX add.d X, X, INCX
ld.d t2, X, 0 * SIZE ld.d t2, X, 0 * SIZE
@ -541,12 +818,83 @@
vstelm.d VX3, YY, 0, 0 vstelm.d VX3, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
vstelm.d VX3, YY, 0, 1 vstelm.d VX3, YY, 0, 1
#else
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.w VX0, t1, 0
vinsgr2vr.w VX0, t2, 1
vinsgr2vr.w VX0, t3, 2
vinsgr2vr.w VX0, t4, 3
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
vinsgr2vr.w VX2, t1, 0
vinsgr2vr.w VX2, t2, 1
vinsgr2vr.w VX2, t3, 2
vinsgr2vr.w VX2, t4, 3
add.d Y, Y, INCY
vfmul.s VX0, VX0, VXA
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vfmadd.s VX2, VX2, VXB, VX0
vinsgr2vr.w VX1, t1, 0
vinsgr2vr.w VX1, t2, 1
vinsgr2vr.w VX1, t3, 2
vinsgr2vr.w VX1, t4, 3
vstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
vinsgr2vr.w VX3, t1, 0
vinsgr2vr.w VX3, t2, 1
vinsgr2vr.w VX3, t3, 2
vinsgr2vr.w VX3, t4, 3
add.d Y, Y, INCY
vfmul.s VX1, VX1, VXA
addi.d I, I, -1
vfmadd.s VX3, VX3, VXB, VX1
vstelm.w VX3, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 3
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
blt $r0, I, .L221 blt $r0, I, .L221
b .L997 b .L997
.align 3 .align 3
.L222: // ALPHA!=0 BETA==0 .L222: // ALPHA!=0 BETA==0
#ifdef DOUBLE
ld.d t1, X, 0 * SIZE ld.d t1, X, 0 * SIZE
add.d X, X, INCX add.d X, X, INCX
ld.d t2, X, 0 * SIZE ld.d t2, X, 0 * SIZE
@ -591,12 +939,57 @@
vstelm.d VX1, YY, 0, 0 vstelm.d VX1, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
vstelm.d VX1, YY, 0, 1 vstelm.d VX1, YY, 0, 1
#else
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
vinsgr2vr.w VX0, t1, 0
vinsgr2vr.w VX0, t2, 1
vinsgr2vr.w VX0, t3, 2
vinsgr2vr.w VX0, t4, 3
add.d X, X, INCX
vfmul.s VX0, VX0, VXA
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.w VX1, t1, 0
vinsgr2vr.w VX1, t2, 1
vinsgr2vr.w VX1, t3, 2
vinsgr2vr.w VX1, t4, 3
vstelm.w VX0, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX0, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX0, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX0, YY, 0, 3
add.d YY, YY, INCY
vfmul.s VX1, VX1, VXA
addi.d I, I, -1
vstelm.w VX1, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 3
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
blt $r0, I, .L222 blt $r0, I, .L222
b .L997 b .L997
.align 3 .align 3
.L223: // ALPHA==0 BETA!=0 .L223: // ALPHA==0 BETA!=0
#ifdef DOUBLE
ld.d t1, Y, 0 * SIZE ld.d t1, Y, 0 * SIZE
add.d Y, Y, INCY add.d Y, Y, INCY
ld.d t2, Y, 0 * SIZE ld.d t2, Y, 0 * SIZE
@ -641,12 +1034,57 @@
vstelm.d VX3, YY, 0, 0 vstelm.d VX3, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
vstelm.d VX3, YY, 0, 1 vstelm.d VX3, YY, 0, 1
#else
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
vinsgr2vr.w VX2, t1, 0
vinsgr2vr.w VX2, t2, 1
vinsgr2vr.w VX2, t3, 2
vinsgr2vr.w VX2, t4, 3
add.d Y, Y, INCY
vfmul.s VX2, VX2, VXB
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
add.d Y, Y, INCY
vinsgr2vr.w VX3, t1, 0
vinsgr2vr.w VX3, t2, 1
vinsgr2vr.w VX3, t3, 2
vinsgr2vr.w VX3, t4, 3
vstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
vfmul.s VX3, VX3, VXB
addi.d I, I, -1
vstelm.w VX3, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 3
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
blt $r0, I, .L223 blt $r0, I, .L223
b .L997 b .L997
.align 3 .align 3
.L224: // ALPHA==0 BETA==0 .L224: // ALPHA==0 BETA==0
#ifdef DOUBLE
vstelm.d VXZ, YY, 0, 0 vstelm.d VXZ, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
vstelm.d VXZ, YY, 0, 1 vstelm.d VXZ, YY, 0, 1
@ -662,6 +1100,23 @@
vstelm.d VXZ, YY, 0, 0 vstelm.d VXZ, YY, 0, 0
add.d YY, YY, INCY add.d YY, YY, INCY
vstelm.d VXZ, YY, 0, 1 vstelm.d VXZ, YY, 0, 1
#else
vstelm.w VXZ, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 3
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 3
#endif
add.d YY, YY, INCY add.d YY, YY, INCY
addi.d I, I, -1 addi.d I, I, -1
blt $r0, I, .L224 blt $r0, I, .L224
@ -674,12 +1129,12 @@
.align 3 .align 3
.L998: .L998:
fld.d $f12, X, 0 * SIZE LD $f12, X, 0 * SIZE
fld.d $f13, Y, 0 * SIZE LD $f13, Y, 0 * SIZE
addi.d I, I, -1 addi.d I, I, -1
fmul.d $f12, $f12, ALPHA MUL $f12, $f12, ALPHA
fmadd.d $f13, $f13, BETA, $f12 MADD $f13, $f13, BETA, $f12
fst.d $f13, Y, 0 * SIZE ST $f13, Y, 0 * SIZE
add.d X, X, INCX add.d X, X, INCX
add.d Y, Y, INCY add.d Y, Y, INCY
blt $r0, I, .L998 blt $r0, I, .L998

View File

@ -1,597 +0,0 @@
#define ASSEMBLER
#include "common.h"
#define N $r4
#define ALPHA $f0
#define X $r5
#define INCX $r6
#define BETA $f1
#define Y $r7
#define INCY $r8
#define I $r12
#define TEMP $r13
#define t1 $r14
#define t2 $r16
#define t3 $r15
#define t4 $r17
#define XX $r18
#define YY $r19
#define a1 $f12
#define a2 $f13
#define VX0 $xr8
#define VX1 $xr20
#define VX2 $xr21
#define VX3 $xr22
#define VXA $xr23
#define VXB $xr9
#define VXZ $xr19
PROLOGUE
bge $r0, N, .L999
li.d TEMP, 1
movgr2fr.d a1, $r0
ffint.s.l a1, a1
slli.d TEMP, TEMP, BASE_SHIFT
slli.d INCX, INCX, BASE_SHIFT
slli.d INCY, INCY, BASE_SHIFT
movfr2gr.s t1, ALPHA
xvreplgr2vr.w VXA, t1
movfr2gr.s t2, BETA
xvreplgr2vr.w VXB, t2
movfr2gr.s t3, a1
xvreplgr2vr.w VXZ, t3
srai.d I, N, 3
bne INCX, TEMP, .L20
bne INCY, TEMP, .L12 // INCX==1 and INCY!=1
b .L11 // INCX==1 and INCY==1
.L20:
bne INCY, TEMP, .L22 // INCX!=1 and INCY!=1
b .L21 // INCX!=1 and INCY==1
.L11:
bge $r0, I, .L997
fcmp.ceq.s $fcc0, ALPHA, a1
bcnez $fcc0, .L110
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L112 // ALPHA!=0 BETA==0
b .L111 // ALPHA!=0 BETA!=0
.align 3
.L110:
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L114 // ALPHA==0 BETA==0
b .L113 // ALPHA==0 BETA!=0
.align 3
.L111: // ALPHA!=0 BETA!=0
xvld VX0, X, 0 * SIZE
xvld VX2, Y, 0 * SIZE
xvfmul.s VX0, VX0, VXA
addi.d I, I, -1
xvfmadd.s VX2, VX2, VXB, VX0
xvst VX2, Y, 0 * SIZE
addi.d X, X, 8 * SIZE
addi.d Y, Y, 8 * SIZE
blt $r0, I, .L111
b .L997
.align 3
.L112: // ALPHA!=0 BETA==0
xvld VX0, X, 0 * SIZE
xvfmul.s VX0, VX0, VXA
addi.d I, I, -1
xvst VX0, Y, 0 * SIZE
addi.d X, X, 8 * SIZE
addi.d Y, Y, 8 * SIZE
blt $r0, I, .L112
b .L997
.align 3
.L113: // ALPHA==0 BETA!=0
xvld VX2, Y, 0 * SIZE
xvfmul.s VX2, VX2, VXB
addi.d I, I, -1
xvst VX2, Y, 0 * SIZE
addi.d Y, Y, 8 * SIZE
blt $r0, I, .L113
b .L997
.align 3
.L114: // ALPHA==0 BETA==0
xvst VXZ, Y, 0 * SIZE
addi.d Y, Y, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L114
b .L997
.align 3
.L12: // INCX==1 and INCY!=1
bge $r0, I, .L997
move YY, Y
fcmp.ceq.s $fcc0, ALPHA, a1
bcnez $fcc0, .L120
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L122 // ALPHA!=0 BETA==0
b .L121 // ALPHA!=0 BETA!=0
.align 3
.L120:
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L124 // ALPHA==0 BETA==0
b .L123 // ALPHA==0 BETA!=0
.align 3
.L121: // ALPHA!=0 BETA!=0
xvld VX0, X, 0 * SIZE
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
add.d Y, Y, INCY
xvinsgr2vr.w VX2, t1, 0
xvinsgr2vr.w VX2, t2, 1
xvinsgr2vr.w VX2, t3, 2
xvinsgr2vr.w VX2, t4, 3
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
xvinsgr2vr.w VX2, t1, 4
xvinsgr2vr.w VX2, t2, 5
xvinsgr2vr.w VX2, t3, 6
xvinsgr2vr.w VX2, t4, 7
add.d Y, Y, INCY
xvfmul.s VX0, VX0, VXA
xvfmadd.s VX2, VX2, VXB, VX0
xvstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 7
add.d YY, YY, INCY
addi.d X, X, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L121
b .L997
.align 3
.L122: // ALPHA!=0 BETA==0
xvld VX0, X, 0 * SIZE
xvfmul.s VX0, VX0, VXA
addi.d I, I, -1
xvstelm.w VX0, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 7
add.d YY, YY, INCY
addi.d X, X, 8 * SIZE
blt $r0, I, .L122
b .L997
.align 3
.L123: // ALPHA==0 BETA!=0
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
add.d Y, Y, INCY
xvinsgr2vr.w VX2, t1, 0
xvinsgr2vr.w VX2, t2, 1
xvinsgr2vr.w VX2, t3, 2
xvinsgr2vr.w VX2, t4, 3
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
xvinsgr2vr.w VX2, t1, 4
xvinsgr2vr.w VX2, t2, 5
xvinsgr2vr.w VX2, t3, 6
xvinsgr2vr.w VX2, t4, 7
add.d Y, Y, INCY
xvfmul.s VX2, VX2, VXB
xvstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 7
add.d YY, YY, INCY
addi.d I, I, -1
blt $r0, I, .L123
b .L997
.align 3
.L124: // ALPHA==0 BETA==0
xvstelm.w VXZ, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 7
add.d YY, YY, INCY
addi.d I, I, -1
blt $r0, I, .L124
b .L997
.align 3
.L21:// INCX!=1 and INCY==1
bge $r0, I, .L997
fcmp.ceq.s $fcc0, ALPHA, a1
bcnez $fcc0, .L210
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L212 // ALPHA!=0 BETA==0
b .L211 // ALPHA!=0 BETA!=0
.align 3
.L210:
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L214 // ALPHA==0 BETA==0
b .L213 // ALPHA==0 BETA!=0
.align 3
.L211: // ALPHA!=0 BETA!=0
xvld VX2, Y, 0 * SIZE
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.w VX0, t1, 0
xvinsgr2vr.w VX0, t2, 1
xvinsgr2vr.w VX0, t3, 2
xvinsgr2vr.w VX0, t4, 3
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
xvinsgr2vr.w VX0, t1, 4
xvinsgr2vr.w VX0, t2, 5
xvinsgr2vr.w VX0, t3, 6
xvinsgr2vr.w VX0, t4, 7
add.d X, X, INCX
xvfmul.s VX0, VXA, VX0
xvfmadd.s VX2, VX2, VXB, VX0
addi.d I, I, -1
xvst VX2, Y, 0 * SIZE
addi.d Y, Y, 8 * SIZE
blt $r0, I, .L211
b .L997
.align 3
.L212: // ALPHA!=0 BETA==0
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.w VX0, t1, 0
xvinsgr2vr.w VX0, t2, 1
xvinsgr2vr.w VX0, t3, 2
xvinsgr2vr.w VX0, t4, 3
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
xvinsgr2vr.w VX0, t1, 4
xvinsgr2vr.w VX0, t2, 5
xvinsgr2vr.w VX0, t3, 6
xvinsgr2vr.w VX0, t4, 7
add.d X, X, INCX
xvfmul.s VX0, VXA, VX0
addi.d I, I, -1
xvst VX0, Y, 0 * SIZE
addi.d Y, Y, 8 * SIZE
blt $r0, I, .L212
b .L997
.align 3
.L213: // ALPHA==0 BETA!=0
xvld VX2, Y, 0 * SIZE
xvfmul.s VX2, VX2, VXB
xvst VX2, Y, 0 * SIZE
addi.d Y, Y, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L213
b .L997
.align 3
.L214: // ALPHA==0 BETA==0
xvst VXZ, Y, 0 * SIZE
addi.d Y, Y, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L214
b .L997
.align 3
.L22:
bge $r0, I, .L997
move YY, Y
fcmp.ceq.s $fcc0, ALPHA, a1
bcnez $fcc0, .L220
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L222 // ALPHA!=0 BETA==0
b .L221 // ALPHA!=0 BETA!=0
.align 3
.L220:
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L224 // ALPHA==0 BETA==0
b .L223 // ALPHA==0 BETA!=0
.align 3
.L221: // ALPHA!=0 BETA!=0
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.w VX0, t1, 0
xvinsgr2vr.w VX0, t2, 1
xvinsgr2vr.w VX0, t3, 2
xvinsgr2vr.w VX0, t4, 3
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
xvinsgr2vr.w VX0, t1, 4
xvinsgr2vr.w VX0, t2, 5
xvinsgr2vr.w VX0, t3, 6
xvinsgr2vr.w VX0, t4, 7
add.d X, X, INCX
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
xvinsgr2vr.w VX2, t1, 0
xvinsgr2vr.w VX2, t2, 1
xvinsgr2vr.w VX2, t3, 2
xvinsgr2vr.w VX2, t4, 3
add.d Y, Y, INCY
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
xvinsgr2vr.w VX2, t1, 4
xvinsgr2vr.w VX2, t2, 5
xvinsgr2vr.w VX2, t3, 6
xvinsgr2vr.w VX2, t4, 7
add.d Y, Y, INCY
xvfmul.s VX0, VX0, VXA
xvfmadd.s VX2, VX2, VXB, VX0
addi.d I, I, -1
xvstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 7
add.d YY, YY, INCY
blt $r0, I, .L221
b .L997
.align 3
.L222: // ALPHA!=0 BETA==0
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
xvinsgr2vr.w VX0, t1, 0
xvinsgr2vr.w VX0, t2, 1
xvinsgr2vr.w VX0, t3, 2
xvinsgr2vr.w VX0, t4, 3
add.d X, X, INCX
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
xvinsgr2vr.w VX0, t1, 4
xvinsgr2vr.w VX0, t2, 5
xvinsgr2vr.w VX0, t3, 6
xvinsgr2vr.w VX0, t4, 7
add.d X, X, INCX
xvfmul.s VX0, VX0, VXA
addi.d I, I, -1
xvstelm.w VX0, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VX0, YY, 0, 7
add.d YY, YY, INCY
blt $r0, I, .L222
b .L997
.align 3
.L223: // ALPHA==0 BETA!=0
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
add.d Y, Y, INCY
xvinsgr2vr.w VX2, t1, 0
xvinsgr2vr.w VX2, t2, 1
xvinsgr2vr.w VX2, t3, 2
xvinsgr2vr.w VX2, t4, 3
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
xvinsgr2vr.w VX2, t1, 4
xvinsgr2vr.w VX2, t2, 5
xvinsgr2vr.w VX2, t3, 6
xvinsgr2vr.w VX2, t4, 7
add.d Y, Y, INCY
xvfmul.s VX2, VX2, VXB
addi.d I, I, -1
xvstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VX2, YY, 0, 7
add.d YY, YY, INCY
blt $r0, I, .L223
b .L997
.align 3
.L224: // ALPHA==0 BETA==0
xvstelm.w VXZ, YY, 0, 0
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 1
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 2
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 3
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 4
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 5
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 6
add.d YY, YY, INCY
xvstelm.w VXZ, YY, 0, 7
add.d YY, YY, INCY
addi.d I, I, -1
blt $r0, I, .L224
b .L997
.align 3
.L997:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L998:
fld.s $f12, X, 0 * SIZE
fld.s $f13, Y, 0 * SIZE
addi.d I, I, -1
fmul.s $f12, $f12, ALPHA
fmadd.s $f13, $f13, BETA, $f12
fst.s $f13, Y, 0 * SIZE
add.d X, X, INCX
add.d Y, Y, INCY
blt $r0, I, .L998
.align 3
.L999:
move $r4, $r12
jirl $r0, $r1, 0x0
.align 3
EPILOGUE

View File

@ -1,629 +0,0 @@
#define ASSEMBLER
#include "common.h"
#define N $r4
#define ALPHA $f0
#define X $r5
#define INCX $r6
#define BETA $f1
#define Y $r7
#define INCY $r8
#define I $r12
#define TEMP $r13
#define t1 $r14
#define t2 $r16
#define t3 $r15
#define t4 $r17
#define XX $r18
#define YY $r19
#define a1 $f12
#define a2 $f13
#define VX0 $vr8
#define VX1 $vr20
#define VX2 $vr21
#define VX3 $vr22
#define VXA $vr23
#define VXB $vr9
#define VXZ $vr19
PROLOGUE
bge $r0, N, .L999
li.d TEMP, 1
movgr2fr.d a1, $r0
ffint.s.l a1, a1
slli.d TEMP, TEMP, BASE_SHIFT
slli.d INCX, INCX, BASE_SHIFT
slli.d INCY, INCY, BASE_SHIFT
movfr2gr.s t1, ALPHA
vreplgr2vr.w VXA, t1
movfr2gr.s t2, BETA
vreplgr2vr.w VXB, t2
movfr2gr.s t3, a1
vreplgr2vr.w VXZ, t3
srai.d I, N, 3
bne INCX, TEMP, .L20
bne INCY, TEMP, .L12 // INCX==1 and INCY!=1
b .L11 // INCX==1 and INCY==1
.L20:
bne INCY, TEMP, .L22 // INCX!=1 and INCY!=1
b .L21 // INCX!=1 and INCY==1
.L11:
bge $r0, I, .L997
fcmp.ceq.s $fcc0, ALPHA, a1
bcnez $fcc0, .L110
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L112 // ALPHA!=0 BETA==0
b .L111 // ALPHA!=0 BETA!=0
.align 3
.L110:
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L114 // ALPHA==0 BETA==0
b .L113 // ALPHA==0 BETA!=0
.align 3
.L111: // ALPHA!=0 BETA!=0
vld VX0, X, 0 * SIZE
vld VX2, Y, 0 * SIZE
vld VX1, X, 4 * SIZE
vld VX3, Y, 4 * SIZE
vfmul.s VX0, VX0, VXA
vfmul.s VX1, VX1, VXA
vfmadd.s VX2, VX2, VXB, VX0
vfmadd.s VX3, VX3, VXB, VX1
vst VX2, Y, 0 * SIZE
vst VX3, Y, 4 * SIZE
addi.d X, X, 8 * SIZE
addi.d Y, Y, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L111
b .L997
.align 3
.L112: // ALPHA!=0 BETA==0
vld VX0, X, 0 * SIZE
vld VX1, X, 4 * SIZE
vfmul.s VX0, VX0, VXA
vfmul.s VX1, VX1, VXA
vst VX0, Y, 0 * SIZE
vst VX1, Y, 4 * SIZE
addi.d X, X, 8 * SIZE
addi.d Y, Y, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L112
b .L997
.align 3
.L113: // ALPHA==0 BETA!=0
vld VX2, Y, 0 * SIZE
vld VX3, Y, 4 * SIZE
vfmul.s VX2, VX2, VXB
vfmul.s VX3, VX3, VXB
vst VX2, Y, 0 * SIZE
vst VX3, Y, 4 * SIZE
addi.d Y, Y, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L113
b .L997
.align 3
.L114: // ALPHA==0 BETA==0
vst VXZ, Y, 0 * SIZE
vst VXZ, Y, 4 * SIZE
addi.d Y, Y, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L114
b .L997
.align 3
.L12: // INCX==1 and INCY!=1
bge $r0, I, .L997
move YY, Y
fcmp.ceq.s $fcc0, ALPHA, a1
bcnez $fcc0, .L120
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L122 // ALPHA!=0 BETA==0
b .L121 // ALPHA!=0 BETA!=0
.align 3
.L120:
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L124 // ALPHA==0 BETA==0
b .L123 // ALPHA==0 BETA!=0
.align 3
.L121: // ALPHA!=0 BETA!=0
vld VX0, X, 0 * SIZE
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
vinsgr2vr.w VX2, t1, 0
vinsgr2vr.w VX2, t2, 1
vinsgr2vr.w VX2, t3, 2
vinsgr2vr.w VX2, t4, 3
add.d Y, Y, INCY
vfmul.s VX0, VX0, VXA
vld VX1, X, 4 * SIZE
vfmadd.s VX2, VX2, VXB, VX0
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
add.d Y, Y, INCY
vinsgr2vr.w VX3, t1, 0
vinsgr2vr.w VX3, t2, 1
vinsgr2vr.w VX3, t3, 2
vinsgr2vr.w VX3, t4, 3
vstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
vfmul.s VX1, VX1, VXA
vfmadd.s VX3, VX3, VXB, VX1
addi.d I, I, -1
vstelm.w VX3, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 3
add.d YY, YY, INCY
addi.d X, X, 8 * SIZE
blt $r0, I, .L121
b .L997
.align 3
.L122: // ALPHA!=0 BETA==0
vld VX0, X, 0 * SIZE
vld VX1, X, 4 * SIZE
vfmul.s VX0, VX0, VXA
vfmul.s VX1, VX1, VXA
vstelm.w VX0, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX0, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX0, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX0, YY, 0, 3
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 3
add.d YY, YY, INCY
addi.d X, X, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L122
b .L997
.align 3
.L123: // ALPHA==0 BETA!=0
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
vinsgr2vr.w VX2, t1, 0
vinsgr2vr.w VX2, t2, 1
vinsgr2vr.w VX2, t3, 2
vinsgr2vr.w VX2, t4, 3
add.d Y, Y, INCY
vfmul.s VX2, VX2, VXB
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
add.d Y, Y, INCY
vinsgr2vr.w VX3, t1, 0
vinsgr2vr.w VX3, t2, 1
vinsgr2vr.w VX3, t3, 2
vinsgr2vr.w VX3, t4, 3
vstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
vfmul.s VX3, VX3, VXB
addi.d I, I, -1
vstelm.w VX3, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 3
add.d YY, YY, INCY
blt $r0, I, .L123
b .L997
.align 3
.L124: // ALPHA==0 BETA==0
vstelm.w VXZ, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 3
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 3
add.d YY, YY, INCY
addi.d I, I, -1
blt $r0, I, .L124
b .L997
.align 3
.L21:// INCX!=1 and INCY==1
bge $r0, I, .L997
fcmp.ceq.s $fcc0, ALPHA, a1
bcnez $fcc0, .L210
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L212 // ALPHA!=0 BETA==0
b .L211 // ALPHA!=0 BETA!=0
.align 3
.L210:
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L214 // ALPHA==0 BETA==0
b .L213 // ALPHA==0 BETA!=0
.align 3
.L211: // ALPHA!=0 BETA!=0
vld VX2, Y, 0 * SIZE
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
vinsgr2vr.w VX0, t1, 0
vinsgr2vr.w VX0, t2, 1
vinsgr2vr.w VX0, t3, 2
vinsgr2vr.w VX0, t4, 3
add.d X, X, INCX
vfmul.s VX0, VXA, VX0
vld VX3, Y, 4 * SIZE
vfmadd.s VX2, VX2, VXB, VX0
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.w VX1, t1, 0
vinsgr2vr.w VX1, t2, 1
vinsgr2vr.w VX1, t3, 2
vinsgr2vr.w VX1, t4, 3
vst VX2, Y, 0 * SIZE
vfmul.s VX1, VX1, VXA
vfmadd.s VX3, VX3, VXB, VX1
addi.d I, I, -1
vst VX3, Y, 4 * SIZE
addi.d Y, Y, 8 * SIZE
blt $r0, I, .L211
b .L997
.align 3
.L212: // ALPHA!=0 BETA==0
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
vinsgr2vr.w VX0, t1, 0
vinsgr2vr.w VX0, t2, 1
vinsgr2vr.w VX0, t3, 2
vinsgr2vr.w VX0, t4, 3
add.d X, X, INCX
vfmul.s VX0, VXA, VX0
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.w VX1, t1, 0
vinsgr2vr.w VX1, t2, 1
vinsgr2vr.w VX1, t3, 2
vinsgr2vr.w VX1, t4, 3
vst VX0, Y, 0 * SIZE
vfmul.s VX1, VX1, VXA
addi.d I, I, -1
vst VX1, Y, 4 * SIZE
addi.d Y, Y, 8 * SIZE
blt $r0, I, .L212
b .L997
.align 3
.L213: // ALPHA==0 BETA!=0
vld VX2, Y, 0 * SIZE
vld VX3, Y, 4 * SIZE
vfmul.s VX2, VX2, VXB
vfmul.s VX3, VX3, VXB
vst VX2, Y, 0 * SIZE
vst VX3, Y, 4 * SIZE
addi.d Y, Y, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L213
b .L997
.align 3
.L214: // ALPHA==0 BETA==0
vst VXZ, Y, 0 * SIZE
vst VXZ, Y, 4 * SIZE
addi.d Y, Y, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L214
b .L997
.align 3
.L22:
bge $r0, I, .L997
move YY, Y
fcmp.ceq.s $fcc0, ALPHA, a1
bcnez $fcc0, .L220
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L222 // ALPHA!=0 BETA==0
b .L221 // ALPHA!=0 BETA!=0
.align 3
.L220:
fcmp.ceq.s $fcc0, BETA, a1
bcnez $fcc0, .L224 // ALPHA==0 BETA==0
b .L223 // ALPHA==0 BETA!=0
.align 3
.L221: // ALPHA!=0 BETA!=0
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.w VX0, t1, 0
vinsgr2vr.w VX0, t2, 1
vinsgr2vr.w VX0, t3, 2
vinsgr2vr.w VX0, t4, 3
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
vinsgr2vr.w VX2, t1, 0
vinsgr2vr.w VX2, t2, 1
vinsgr2vr.w VX2, t3, 2
vinsgr2vr.w VX2, t4, 3
add.d Y, Y, INCY
vfmul.s VX0, VX0, VXA
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vfmadd.s VX2, VX2, VXB, VX0
vinsgr2vr.w VX1, t1, 0
vinsgr2vr.w VX1, t2, 1
vinsgr2vr.w VX1, t3, 2
vinsgr2vr.w VX1, t4, 3
vstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
vinsgr2vr.w VX3, t1, 0
vinsgr2vr.w VX3, t2, 1
vinsgr2vr.w VX3, t3, 2
vinsgr2vr.w VX3, t4, 3
add.d Y, Y, INCY
vfmul.s VX1, VX1, VXA
addi.d I, I, -1
vfmadd.s VX3, VX3, VXB, VX1
vstelm.w VX3, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 3
add.d YY, YY, INCY
blt $r0, I, .L221
b .L997
.align 3
.L222: // ALPHA!=0 BETA==0
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
vinsgr2vr.w VX0, t1, 0
vinsgr2vr.w VX0, t2, 1
vinsgr2vr.w VX0, t3, 2
vinsgr2vr.w VX0, t4, 3
add.d X, X, INCX
vfmul.s VX0, VX0, VXA
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.w VX1, t1, 0
vinsgr2vr.w VX1, t2, 1
vinsgr2vr.w VX1, t3, 2
vinsgr2vr.w VX1, t4, 3
vstelm.w VX0, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX0, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX0, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX0, YY, 0, 3
add.d YY, YY, INCY
vfmul.s VX1, VX1, VXA
addi.d I, I, -1
vstelm.w VX1, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX1, YY, 0, 3
add.d YY, YY, INCY
blt $r0, I, .L222
b .L997
.align 3
.L223: // ALPHA==0 BETA!=0
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
vinsgr2vr.w VX2, t1, 0
vinsgr2vr.w VX2, t2, 1
vinsgr2vr.w VX2, t3, 2
vinsgr2vr.w VX2, t4, 3
add.d Y, Y, INCY
vfmul.s VX2, VX2, VXB
ld.w t1, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t2, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t3, Y, 0 * SIZE
add.d Y, Y, INCY
ld.w t4, Y, 0 * SIZE
add.d Y, Y, INCY
vinsgr2vr.w VX3, t1, 0
vinsgr2vr.w VX3, t2, 1
vinsgr2vr.w VX3, t3, 2
vinsgr2vr.w VX3, t4, 3
vstelm.w VX2, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX2, YY, 0, 3
add.d YY, YY, INCY
vfmul.s VX3, VX3, VXB
addi.d I, I, -1
vstelm.w VX3, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VX3, YY, 0, 3
add.d YY, YY, INCY
blt $r0, I, .L223
b .L997
.align 3
.L224: // ALPHA==0 BETA==0
vstelm.w VXZ, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 3
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 0
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 1
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 2
add.d YY, YY, INCY
vstelm.w VXZ, YY, 0, 3
add.d YY, YY, INCY
addi.d I, I, -1
blt $r0, I, .L224
b .L997
.align 3
.L997:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L998:
fld.s $f12, X, 0 * SIZE
fld.s $f13, Y, 0 * SIZE
addi.d I, I, -1
fmul.s $f12, $f12, ALPHA
fmadd.s $f13, $f13, BETA, $f12
fst.s $f13, Y, 0 * SIZE
add.d X, X, INCX
add.d Y, Y, INCY
blt $r0, I, .L998
.align 3
.L999:
move $r4, $r12
jirl $r0, $r1, 0x0
.align 3
EPILOGUE