loongarch64: Add zgemm and cgemm optimization

This commit is contained in:
pengxu 2023-12-29 15:10:01 +08:00 committed by Shiyou Yin
parent 546f13558c
commit a5d0d21378
14 changed files with 4621 additions and 5 deletions

View File

@ -144,6 +144,7 @@ static inline int WhereAmI(void){
#define XVCMPLT xvfcmp.clt.d
#define XVMUL xvfmul.d
#define XVMSUB xvfmsub.d
#define XVNMSUB xvfnmsub.d
#define VFSUB vfsub.d
#define VFADD vfadd.d
@ -158,6 +159,7 @@ static inline int WhereAmI(void){
#define VCMPLT vfcmp.clt.d
#define VMUL vfmul.d
#define VMSUB vfmsub.d
#define VNMSUB vfnmsub.d
#else
@ -198,6 +200,7 @@ static inline int WhereAmI(void){
#define XVCMPLT xvfcmp.clt.s
#define XVMUL xvfmul.s
#define XVMSUB xvfmsub.s
#define XVNMSUB xvfnmsub.s
#define VFSUB vfsub.s
#define VFADD vfadd.s
@ -212,6 +215,7 @@ static inline int WhereAmI(void){
#define VCMPLT vfcmp.clt.s
#define VMUL vfmul.s
#define VMSUB vfmsub.s
#define VNMSUB vfnmsub.s
#endif /* defined(DOUBLE) */

View File

@ -95,4 +95,16 @@ DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
CGEMMKERNEL = cgemm_kernel_2x2_lsx.S
CGEMMONCOPY = cgemm_ncopy_2_lsx.S
CGEMMOTCOPY = cgemm_tcopy_2_lsx.S
CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX)
CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX)
CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
endif

View File

@ -107,13 +107,35 @@ SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX)
SGEMVNKERNEL = sgemv_n_8_lasx.S
SGEMVTKERNEL = sgemv_t_8_lasx.S
CGEMMKERNEL = cgemm_kernel_2x2_lsx.S
CGEMMONCOPY = cgemm_ncopy_2_lsx.S
CGEMMOTCOPY = cgemm_tcopy_2_lsx.S
CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX)
CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX)
CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
ZGEMMKERNEL = zgemm_kernel_2x2_lasx.S
ZGEMMONCOPY = zgemm_ncopy_2_lasx.S
ZGEMMOTCOPY = zgemm_tcopy_2_lasx.S
ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX)
ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX)
ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
DTRSMKERNEL_LN = dtrsm_kernel_LN_16x4_lasx.S
DTRSMKERNEL_LT = dtrsm_kernel_LT_16x4_lasx.S
DTRSMKERNEL_RN = dtrsm_kernel_RN_16x4_lasx.S
DTRSMKERNEL_RT = dtrsm_kernel_RT_16x4_lasx.S
endif
STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
endif

View File

@ -0,0 +1,857 @@
/*******************************************************************************
Copyright (c) 2023, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: bm
#define N $r5 // param 2: bn
#define K $r6 // param 3: bk
#define ALPHA_R $f0 // param 4: alphar
#define ALPHA_I $f1 // param 5: alphai
#define A $r7 // param 6: ba
#define B $r8 // param 7: bb
#define C $r9 // param 8: bc
#define LDC $r10 // param 9: ldc
#if defined (TRMMKERNEL)
#define OFFSET $r11 // param 10: offset
#endif
#define OFF $r26
#define I $r12
#define J $r13
#define L $r14
#define TL $r15
#define A0 $r16
#define B0 $r17
#define C0 $r18
#define C1 $r19
#define C2 $r20
#define C3 $r23
#define T0 $r24
#define T1 $r25
#define a1 $f2
#define a2 $f3
#define a3 $f4
#define a4 $f5
#define a5 $f6
#define a6 $f7
#define a7 $f8
#define a8 $f9
#define b1 $f10
#define b2 $f11
#define b3 $f12
#define b4 $f13
#define b5 $f14
#define b6 $f15
#define b7 $f16
#define b8 $f17
#define c11 $f18
#define c12 $f19
#define c21 $f20
#define c22 $f21
#define c31 $f22
#define c32 $f23
#define c41 $f24
#define c42 $f25
/* LASX vectors */
#define U0 $xr30
#define U1 $xr31
#define U2 $xr2
#define U3 $xr3
#define U4 $xr4
#define U5 $xr5
#define U6 $xr6
#define U7 $xr7
#define U8 $xr8
#define U9 $xr9
#define U10 $xr10
#define U11 $xr11
#define U12 $xr12
#define U13 $xr13
#define U14 $xr14
#define U15 $xr15
#define D0 $xr16
#define D1 $xr17
#define D2 $xr18
#define D3 $xr19
#define D4 $xr20
#define D5 $xr21
#define D6 $xr22
#define D7 $xr23
#define D8 $xr24
#define D9 $xr25
#define D10 $xr26
#define D11 $xr27
#define VALPHAR $xr28
#define VALPHAI $xr29
#if defined(NN) || defined(NT) || defined(TN) || defined(TT)
#define XVMADD1 XVFMADD
#define XVMADD2 XVFMADD
#define XVMADD3 XVNMSUB
#define XVMADD4 XVFMADD
#define VMADD1 VFMADD
#define VMADD2 VFMADD
#define VMADD3 VNMSUB
#define VMADD4 VFMADD
#define XVFADD1 XVFADD
#define XVFADD2 XVFADD
#define XVFADD3 XVFSUB
#define XVFADD4 XVFADD
#define MADD1 MADD
#define MADD2 MADD
#define MADD3 NMSUB
#define MADD4 MADD
#endif
#if defined(NR) || defined(NC) || defined(TR) || defined(TC)
#define XVMADD1 XVFMADD
#define XVMADD2 XVFMADD
#define XVMADD3 XVFMADD
#define XVMADD4 XVNMSUB
#define VMADD1 VFMADD
#define VMADD2 VFMADD
#define VMADD3 VFMADD
#define VMADD4 VNMSUB
#define XVFADD1 XVFADD
#define XVFADD2 XVFADD
#define XVFADD3 XVFADD
#define XVFADD4 XVFSUB
#define MADD1 MADD
#define MADD2 MADD
#define MADD3 MADD
#define MADD4 NMSUB
#endif
#if defined(RN) || defined(RT) || defined(CN) || defined(CT)
#define XVMADD1 XVFMADD
#define XVMADD2 XVNMSUB
#define XVMADD3 XVFMADD
#define XVMADD4 XVFMADD
#define VMADD1 VFMADD
#define VMADD2 VNMSUB
#define VMADD3 VFMADD
#define VMADD4 VFMADD
#define XVFADD1 XVFADD
#define XVFADD2 XVFSUB
#define XVFADD3 XVFADD
#define XVFADD4 XVFADD
#define MADD1 MADD
#define MADD2 NMSUB
#define MADD3 MADD
#define MADD4 MADD
#endif
#if defined(RR) || defined(RC) || defined(CR) || defined(CC)
#define XVMADD1 XVFMADD
#define XVMADD2 XVNMSUB
#define XVMADD3 XVNMSUB
#define XVMADD4 XVNMSUB
#define VMADD1 VFMADD
#define VMADD2 VNMSUB
#define VMADD3 VNMSUB
#define VMADD4 VNMSUB
#define XVFADD1 XVFADD
#define XVFADD2 XVFSUB
#define XVFADD3 XVFSUB
#define XVFADD4 XVFSUB
#define MADD1 MADD
#define MADD2 NMSUB
#define MADD3 NMSUB
#define MADD4 NMSUB
#endif
PROLOGUE
addi.d $sp, $sp, -128
SDARG $r23, $sp, 0
SDARG $r24, $sp, 8
SDARG $r25, $sp, 16
SDARG $r26, $sp, 24
SDARG $r27, $sp, 32
ST $f23, $sp, 40
ST $f24, $sp, 48
ST $f25, $sp, 56
ST $f26, $sp, 64
ST $f27, $sp, 72
ST $f28, $sp, 80
ST $f29, $sp, 88
ST $f30, $sp, 96
ST $f31, $sp, 104
ST ALPHA_R,$sp, 112
ST ALPHA_I,$sp, 120
xvldrepl.w VALPHAR, $sp, 112
xvldrepl.w VALPHAI, $sp, 120
#if defined (TRMMKERNEL) && !defined(LEFT)
sub.d OFF, $r0, OFFSET
#else
xor OFF, OFF, OFF
#endif
slli.d LDC, LDC, 2
move J, $r0
srai.d T0, N, 1
beq J, T0, .L19
.L10: /* for(j=0; j<bn/2; j+=1) */
move C0, C
slli.d TL, LDC, 1
add.d C1, C0, TL
move A0, A //ptrba
#if defined(TRMMKERNEL) && defined(LEFT)
move OFF, OFFSET
#endif
move I, $r0
srai.d T0, M, 1
beq I, T0, .L150
.L11: /* for(i=0; i<bm/2; i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x04
add.d A0, A0, C3
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF //temp
#elif defined(LEFT)
addi.d TL, OFF, 2
#else
addi.d TL, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
xvxor.v U0, U0, U0
xvxor.v U1, U1, U1
move L, $r0 //cycle param k
srai.d C2, TL, 2
beq L, C2, .L130
blt C2, L, .L130
.L12: /* for(k=0; k<bk/4; k+=1) */
xvld D0, A0, 0x00 //a 0-7
xvld D1, A0, 0x20 //a 8-15
xvld D2, B0, 0x00 //b 0-7
xvld D3, B0, 0x20 //b 8-15
xvand.v D4, D0, D0
xvpermi.q D4, D1, 0x02 //a 0 1 2 3 8 9 10 11
xvand.v D5, D4, D4
xvshuf4i.w D4, D4, 0x88 //a 0 2 0 2 8 10 8 10
xvshuf4i.w D5, D5, 0xdd //a 1 3 1 3 9 11 9 11
xvand.v D6, D1, D1
xvpermi.q D6, D0, 0x31 //a 4 5 6 7 12 13 14 15
xvand.v D7, D6, D6
xvshuf4i.w D6, D6, 0x88 //a 4 6 4 6 12 14 12 14
xvshuf4i.w D7, D7, 0xdd //a 5 7 5 7 13 15 13 15
xvand.v D8, D2, D2
xvpermi.q D8, D3, 0x02 //b 0 1 2 3 8 9 10 11
xvand.v D9, D8, D8
xvshuf4i.w D8, D8, 0xa0 //b 0 0 2 2 8 8 10 10
xvshuf4i.w D9, D9, 0xf5 //a 1 1 3 3 9 9 11 11
xvand.v D10, D3, D3
xvpermi.q D10, D2, 0x31 //b 4 5 6 7 12 13 14 15
xvand.v D11, D10, D10
xvshuf4i.w D10, D10, 0xa0 //b 4 4 6 6 12 12 14 14
xvshuf4i.w D11, D11, 0xf5 //a 5 5 7 7 13 13 15 15
XVMADD1 U0, D4, D8, U0 //res0 2 4 6 0 2 4 6
XVMADD2 U1, D5, D8, U1 //res1 3 4 7 1 3 4 7
xvpermi.q U0, U0, 0x01
xvpermi.q U1, U1, 0x01
XVMADD1 U0, D4, D8, U0
XVMADD2 U1, D5, D8, U1
XVMADD3 U0, D5, D9, U0
XVMADD4 U1, D4, D9, U1
xvpermi.q U0, U0, 0x01
xvpermi.q U1, U1, 0x01
XVMADD3 U0, D5, D9, U0
XVMADD4 U1, D4, D9, U1
XVMADD1 U0, D6, D10, U0 //res0 2 4 6 0 2 4 6
XVMADD2 U1, D7, D10, U1 //res1 3 4 7 1 3 4 7
xvpermi.q U0, U0, 0x01
xvpermi.q U1, U1, 0x01
XVMADD1 U0, D6, D10, U0
XVMADD2 U1, D7, D10, U1
XVMADD3 U0, D7, D11, U0
XVMADD4 U1, D6, D11, U1
xvpermi.q U0, U0, 0x01
xvpermi.q U1, U1, 0x01
XVMADD3 U0, D7, D11, U0
XVMADD4 U1, D6, D11, U1
addi.d A0, A0, 0x40
addi.d B0, B0, 0x40
addi.d L, L, 1
blt L, C2, .L12
.L130:
move L, $r0
andi C2, TL, 3
beq L, C2, .L14
.L13: /* for(k=0; k<(bk&3); k+=1) */
vld $vr16, A0, 0x00 //a0 a1 a2 a3
vld $vr17, B0, 0x00 //b0 b1 b2 b3
vshuf4i.w $vr20, $vr17, 0xa0 //b0 b0 b2 b2
vshuf4i.w $vr21, $vr17, 0xf5 //b1 b1 b3 b3
vshuf4i.w $vr18, $vr16, 0x88 //a0 a2 a0 a2
vshuf4i.w $vr19, $vr16, 0xdd //a1 a3 a1 a3
VMADD1 $vr30, $vr18, $vr20, $vr30 //res0 2 4 6
VMADD2 $vr31, $vr19, $vr20, $vr31 //res1 3 5 7
VMADD3 $vr30, $vr19, $vr21, $vr30
VMADD4 $vr31, $vr18, $vr21, $vr31
addi.d A0, A0, 0x10
addi.d B0, B0, 0x10
addi.d L, L, 1
blt L, C2, .L13
.L14:
#if defined(TRMMKERNEL)
vld $vr8, C0, 0x00 //0 1 2 3
vld $vr9, C1, 0x00 //4 5 6 7
vpackev.w $vr10, $vr9, $vr8 //0 4 2 6
vpermi.w $vr10, $vr10, 0xd8 //0 2 4 6
vpackod.w $vr11, $vr9, $vr8 //1 5 3 7
vpermi.w $vr11, $vr11, 0xd8 //1 3 5 7
vfmul.s $vr10, $vr30, $vr28
vfmul.s $vr11, $vr31, $vr28
VNMSUB $vr10, $vr31, $vr29, $vr10
VFMADD $vr11, $vr30, $vr29, $vr11
vilvl.w $vr8, $vr11, $vr10 //0 1 2 3
vilvh.w $vr9, $vr11, $vr10 //4 5 6 7
vst $vr8, C0, 0x00
vst $vr9, C1, 0x00
#else
vld $vr8, C0, 0x00 //0 1 2 3
vld $vr9, C1, 0x00 //4 5 6 7
vpackev.w $vr10, $vr9, $vr8 //0 4 2 6
vpermi.w $vr10, $vr10, 0xd8 //0 2 4 6
vpackod.w $vr11, $vr9, $vr8 //1 5 3 7
vpermi.w $vr11, $vr11, 0xd8 //1 3 5 7
VFMADD $vr10, $vr30, $vr28, $vr10
VFMADD $vr11, $vr31, $vr28, $vr11
VNMSUB $vr10, $vr31, $vr29, $vr10
VFMADD $vr11, $vr30, $vr29, $vr11
vilvl.w $vr8, $vr11, $vr10 //0 1 2 3
vilvh.w $vr9, $vr11, $vr10 //4 5 6 7
vst $vr8, C0, 0x00
vst $vr9, C1, 0x00
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -2
#else
addi.d TL, TL, -2
#endif
slli.d C3, TL, 0x04
add.d A0, A0, C3
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x10
addi.d C1, C1, 0x10
addi.d I, I, 1
blt I, T0, .L11
.L150:
move I, $r0
andi T0, M, 1
beq I, T0, .L18
.L15: /* for(i=0; i<(bm&1); i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) ||(!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x03
add.d A0, A0, C3
slli.d C3, OFF, 0x04
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF
#elif defined(LEFT)
addi.d TL, OFF, 1
#else
addi.d TL, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MTC c12, $r0
MTC c21, $r0
MTC c22, $r0
move L, $r0 //cycle param k
beq L, TL, .L17
blt TL, L, .L17
.L16: /* for (k=0; k<bk; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
MADD1 c11, a1, b1, c11 //res0
LD a2, A0, 0x04 //load2
MADD2 c12, a2, b1, c12 //res1
LD b2, B0, 0x04 //load3
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
LD b3, B0, 0x08 //load4
MADD1 c21, a1, b3, c21 //res2
MADD2 c22, a2, b3, c22 //res3
LD b4, B0, 0x0c //load5
MADD3 c21, a2, b4, c21
MADD4 c22, a1, b4, c22
addi.d A0, A0, 0x08
addi.d B0, B0, 0x10
addi.d L, L, 1
blt L, TL, .L16
.L17:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x04
MUL b5, c21, ALPHA_R
MUL b6, c22, ALPHA_I
SUB b5, b5, b6
ST b5, C1, 0x00
MUL b5, c22, ALPHA_R
MUL b6, c21, ALPHA_I
ADD b6, b5, b6
ST b6, C1, 0x04
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x04 //C0[1]
LD b5, C1, 0x00 //C1[0]
LD b6, C1, 0x04 //C1[1]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
ST a5, C0, 0x00
ST a6, C0, 0x04
MADD b5, c21, ALPHA_R, b5
MADD b6, c22, ALPHA_R, b6
NMSUB b5, c22, ALPHA_I, b5
MADD b6, c21, ALPHA_I, b6
ST b5, C1, 0x00
ST b6, C1, 0x04
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -1
#else
addi.d TL, TL, -2
#endif
slli.d C3, TL, 0x03
add.d A0, A0, C3
slli.d C3, TL, 0x04
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x08
addi.d C1, C1, 0x08
addi.d I, I, 1
blt I, T0, .L15
.L18:
#if defined(TRMMKERNEL) && !defined(LEFT)
addi.d OFF, OFF, 2
#endif
slli.d L, K, 0x04
add.d B, B, L
slli.d I, LDC, 0x02
add.d C, C, I
addi.d J, J, 1
srai.d T0, N, 1
blt J, T0, .L10
.L19:
move J, $r0
andi T0, N, 1
beq J, T0, .L30
.L20: /* for (j=0; j<(bn&1); j+=1) */
#if defined(TRMMKERNEL) && defined(LEFT)
move OFF, OFFSET
#endif
move C0, C
move A0, A //ptrba
move I, $r0
srai.d T0, M, 1
beq I, T0, .L24
.L21: /* for (i=0; i<bm/2; i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) ||(!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x04
add.d A0, A0, C3
slli.d C3, OFF, 0x03
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF
#elif defined(LEFT)
addi.d TL, OFF, 2
#else
addi.d TL, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MTC c12, $r0
MTC c21, $r0
MTC c22, $r0
move L, $r0 //cycle param k
beq L, TL, .L23
blt TL, L, .L23
.L22: /* for (k=0; k<bk; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
MADD1 c11, a1, b1, c11 //res0
LD a2, A0, 0x04 //load2
MADD2 c12, a2, b1, c12 //res1
LD b2, B0, 0x04 //load3
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
LD a3, A0, 0x08 //load4
MADD1 c21, a3, b1, c21 //res2
LD a4, A0, 0x0c //load5
MADD2 c22, a4, b1, c22 //res3
MADD3 c21, a4, b2, c21
MADD4 c22, a3, b2, c22
addi.d A0, A0, 0x10
addi.d B0, B0, 0x08
addi.d L, L, 1
blt L, TL, .L22
.L23:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x04
MUL a7, c21, ALPHA_R
MUL a8, c22, ALPHA_I
SUB a7, a7, a8
ST a7, C0, 0x08
MUL a7, c22, ALPHA_R
MUL a8, c21, ALPHA_I
ADD a8, a7, a8
ST a8, C0, 0x0c
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x04 //C0[1]
LD a7, C0, 0x08 //C1[2]
LD a8, C0, 0x0c //C1[3]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
MADD a7, c21, ALPHA_R, a7
MADD a8, c22, ALPHA_R, a8
NMSUB a7, c22, ALPHA_I, a7
MADD a8, c21, ALPHA_I, a8
ST a5, C0, 0x00
ST a6, C0, 0x04
ST a7, C0, 0x08
ST a8, C0, 0x0c
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -2
#else
addi.d TL, TL, -1
#endif
slli.d C3, TL, 0x04
add.d A0, A0, C3
slli.d C3, TL, 0x03
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x10
addi.d I, I, 1
blt I, T0, .L21
.L24:
move I, $r0
andi T1, M, 1 //bm&1
beq I, T1, .L28
.L25: /* for (i=0; i<(bm&1); i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) ||(!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x03
add.d A0, A0, C3
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF
#elif defined(LEFT)
addi.d TL, OFF, 1
#else
addi.d TL, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MTC c12, $r0
move L, $r0 //cycle param k
beq L, TL, .L27
blt TL, L, .L27
.L26: /* for (k=0; k<bk; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
MADD1 c11, a1, b1, c11 //res0
LD a2, A0, 0x04 //load2
MADD2 c12, a2, b1, c12 //res1
LD b2, B0, 0x04 //load3
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
addi.d A0, A0, 0x08
addi.d B0, B0, 0x08
addi.d L, L, 1
blt L, TL, .L26
.L27:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x04
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x04 //C0[1]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
ST a5, C0, 0x00
ST a6, C0, 0x04
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -1
#else
addi.d TL, TL, -1
#endif
slli.d C3, TL, 0x03
add.d A0, A0, C3
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x08
addi.d I, I, 1
blt I, T1, .L25
.L28:
slli.d L, K, 3
add.d B, B, L
slli.d I, LDC, 1
add.d C, C, I
addi.d J, J, 1
andi T0, N, 1
blt J, T0, .L20
.L30:
LDARG $r23, $sp, 0
LDARG $r24, $sp, 8
LDARG $r25, $sp, 16
LDARG $r26, $sp, 24
LDARG $r27, $sp, 32
LD $f23, $sp, 40
LD $f24, $sp, 48
LD $f25, $sp, 56
LD $f26, $sp, 64
LD $f27, $sp, 72
LD $f28, $sp, 80
LD $f29, $sp, 88
LD $f30, $sp, 96
LD $f31, $sp, 104
addi.d $sp, $sp, 128
jirl $r0, $r1, 0x0
EPILOGUE

View File

@ -0,0 +1,812 @@
/*******************************************************************************
Copyright (c) 2023, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: bm
#define N $r5 // param 2: bn
#define K $r6 // param 3: bk
#define ALPHA_R $f0 // param 4: alphar
#define ALPHA_I $f1 // param 5: alphai
#define A $r7 // param 6: ba
#define B $r8 // param 7: bb
#define C $r9 // param 8: bc
#define LDC $r10 // param 9: ldc
#if defined (TRMMKERNEL)
#define OFFSET $r11 // param 10: offset
#endif
#define OFF $r26
#define I $r12
#define J $r13
#define L $r14
#define TL $r15
#define A0 $r16
#define B0 $r17
#define C0 $r18
#define C1 $r19
#define C2 $r20
#define C3 $r23
#define T0 $r24
#define T1 $r25
#define a1 $f2
#define a2 $f3
#define a3 $f4
#define a4 $f5
#define a5 $f6
#define a6 $f7
#define a7 $f8
#define a8 $f9
#define b1 $f10
#define b2 $f11
#define b3 $f12
#define b4 $f13
#define b5 $f14
#define b6 $f15
#define b7 $f16
#define b8 $f17
#define c11 $f18
#define c12 $f19
#define c21 $f20
#define c22 $f21
#define c31 $f22
#define c32 $f23
#define c41 $f24
#define c42 $f25
/* LASX vectors */
#define U0 $vr30
#define U1 $vr31
#define U2 $vr2
#define U3 $vr3
#define U4 $vr4
#define U5 $vr5
#define U6 $vr6
#define U7 $vr7
#define U8 $vr8
#define U9 $vr9
#define U10 $vr10
#define U11 $vr11
#define U12 $vr12
#define U13 $vr13
#define U14 $vr14
#define U15 $vr15
#define D0 $vr16
#define D1 $vr17
#define D2 $vr18
#define D3 $vr19
#define D4 $vr20
#define D5 $vr21
#define D6 $vr22
#define D7 $vr23
#define D8 $vr24
#define D9 $vr25
#define D10 $vr26
#define D11 $vr27
#define VALPHAR $vr28
#define VALPHAI $vr29
#if defined(NN) || defined(NT) || defined(TN) || defined(TT)
#define VMADD1 VFMADD
#define VMADD2 VFMADD
#define VMADD3 VNMSUB
#define VMADD4 VFMADD
#define MADD1 MADD
#define MADD2 MADD
#define MADD3 NMSUB
#define MADD4 MADD
#endif
#if defined(NR) || defined(NC) || defined(TR) || defined(TC)
#define VMADD1 VFMADD
#define VMADD2 VFMADD
#define VMADD3 VFMADD
#define VMADD4 VNMSUB
#define MADD1 MADD
#define MADD2 MADD
#define MADD3 MADD
#define MADD4 NMSUB
#endif
#if defined(RN) || defined(RT) || defined(CN) || defined(CT)
#define VMADD1 VFMADD
#define VMADD2 VNMSUB
#define VMADD3 VFMADD
#define VMADD4 VFMADD
#define MADD1 MADD
#define MADD2 NMSUB
#define MADD3 MADD
#define MADD4 MADD
#endif
#if defined(RR) || defined(RC) || defined(CR) || defined(CC)
#define VMADD1 VFMADD
#define VMADD2 VNMSUB
#define VMADD3 VNMSUB
#define VMADD4 VNMSUB
#define MADD1 MADD
#define MADD2 NMSUB
#define MADD3 NMSUB
#define MADD4 NMSUB
#endif
PROLOGUE
addi.d $sp, $sp, -128
SDARG $r23, $sp, 0
SDARG $r24, $sp, 8
SDARG $r25, $sp, 16
SDARG $r26, $sp, 24
SDARG $r27, $sp, 32
ST $f23, $sp, 40
ST $f24, $sp, 48
ST $f25, $sp, 56
ST $f26, $sp, 64
ST $f27, $sp, 72
ST $f28, $sp, 80
ST $f29, $sp, 88
ST $f30, $sp, 96
ST $f31, $sp, 104
ST ALPHA_R,$sp, 112
ST ALPHA_I,$sp, 120
vldrepl.w VALPHAR, $sp, 112
vldrepl.w VALPHAI, $sp, 120
#if defined (TRMMKERNEL) && !defined(LEFT)
sub.d OFF, $r0, OFFSET
#else
xor OFF, OFF, OFF
#endif
slli.d LDC, LDC, 2
move J, $r0
srai.d T0, N, 1
beq J, T0, .L19
.L10: /* for(j=0; j<bn/2; j+=1) */
move C0, C
slli.d TL, LDC, 1
add.d C1, C0, TL
move A0, A //ptrba
#if defined(TRMMKERNEL) && defined(LEFT)
move OFF, OFFSET
#endif
move I, $r0
srai.d T0, M, 1
beq I, T0, .L150
.L11: /* for(i=0; i<bm/2; i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x04
add.d A0, A0, C3
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF //temp
#elif defined(LEFT)
addi.d TL, OFF, 2
#else
addi.d TL, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
vxor.v U0, U0, U0
vxor.v U1, U1, U1
move L, $r0 //cycle param k
srai.d C2, TL, 2
beq L, C2, .L130
blt C2, L, .L130
.L12: /* for(k=0; k<bk/4; k+=1) */
vld D0, A0, 0x00 //a0 a1 a2 a3
vld D1, B0, 0x00 //b0 b1 b2 b3
vshuf4i.w D4, D1, 0xa0 //b0 b0 b2 b2
vshuf4i.w D5, D1, 0xf5 //b1 b1 b3 b3
vshuf4i.w D2, D0, 0x88 //a0 a2 a0 a2
vshuf4i.w D3, D0, 0xdd //a1 a3 a1 a3
VMADD1 U0, D2, D4, U0 //res0 2 4 6
VMADD2 U1, D3, D4, U1 //res1 3 4 7
VMADD3 U0, D3, D5, U0
VMADD4 U1, D2, D5, U1
vld D0, A0, 0x10 //a0 a1 a2 a3
vld D1, B0, 0x10 //b0 b1 b2 b3
vshuf4i.w D4, D1, 0xa0 //b0 b0 b2 b2
vshuf4i.w D5, D1, 0xf5 //b1 b1 b3 b3
vshuf4i.w D2, D0, 0x88 //a0 a2 a0 a2
vshuf4i.w D3, D0, 0xdd //a1 a3 a1 a3
VMADD1 U0, D2, D4, U0 //res0 2 4 6
VMADD2 U1, D3, D4, U1 //res1 3 4 7
VMADD3 U0, D3, D5, U0
VMADD4 U1, D2, D5, U1
vld D0, A0, 0x20 //a0 a1 a2 a3
vld D1, B0, 0x20 //b0 b1 b2 b3
vshuf4i.w D4, D1, 0xa0 //b0 b0 b2 b2
vshuf4i.w D5, D1, 0xf5 //b1 b1 b3 b3
vshuf4i.w D2, D0, 0x88 //a0 a2 a0 a2
vshuf4i.w D3, D0, 0xdd //a1 a3 a1 a3
VMADD1 U0, D2, D4, U0 //res0 2 4 6
VMADD2 U1, D3, D4, U1 //res1 3 4 7
VMADD3 U0, D3, D5, U0
VMADD4 U1, D2, D5, U1
vld D0, A0, 0x30 //a0 a1 a2 a3
vld D1, B0, 0x30 //b0 b1 b2 b3
vshuf4i.w D4, D1, 0xa0 //b0 b0 b2 b2
vshuf4i.w D5, D1, 0xf5 //b1 b1 b3 b3
vshuf4i.w D2, D0, 0x88 //a0 a2 a0 a2
vshuf4i.w D3, D0, 0xdd //a1 a3 a1 a3
VMADD1 U0, D2, D4, U0 //res0 2 4 6
VMADD2 U1, D3, D4, U1 //res1 3 4 7
VMADD3 U0, D3, D5, U0
VMADD4 U1, D2, D5, U1
addi.d A0, A0, 0x40
addi.d B0, B0, 0x40
addi.d L, L, 1
blt L, C2, .L12
.L130:
move L, $r0
andi C2, TL, 3
beq L, C2, .L14
.L13: /* for(k=0; k<(bk&3); k+=1) */
vld D0, A0, 0x00 //a0 a1 a2 a3
vld D1, B0, 0x00 //b0 b1 b2 b3
vshuf4i.w D4, D1, 0xa0 //b0 b0 b2 b2
vshuf4i.w D5, D1, 0xf5 //b1 b1 b3 b3
vshuf4i.w D2, D0, 0x88 //a0 a2 a0 a2
vshuf4i.w D3, D0, 0xdd //a1 a3 a1 a3
VMADD1 U0, D2, D4, U0 //res0 2 4 6
VMADD2 U1, D3, D4, U1 //res1 3 5 7
VMADD3 U0, D3, D5, U0
VMADD4 U1, D2, D5, U1
addi.d A0, A0, 0x10
addi.d B0, B0, 0x10
addi.d L, L, 1
blt L, C2, .L13
.L14:
#if defined(TRMMKERNEL)
vld U8, C0, 0x00 //0 1 2 3
vld U9, C1, 0x00 //4 5 6 7
vpackev.w U10, U9, U8 //0 4 2 6
vpermi.w U10, U10, 0xd8 //0 2 4 6
vpackod.w U11, U9, U8 //1 5 3 7
vpermi.w U11, U11, 0xd8 //1 3 5 7
vfmul.s U10, U0, VALPHAR
vfmul.s U11, U1, VALPHAR
VNMSUB U10, U1, VALPHAI, U10
VFMADD U11, U0, VALPHAI, U11
vilvl.w U8, U11, U10 //0 1 2 3
vilvh.w U9, U11, U10 //4 5 6 7
vst U8, C0, 0x00
vst U9, C1, 0x00
#else
vld U8, C0, 0x00 //0 1 2 3
vld U9, C1, 0x00 //4 5 6 7
vpackev.w U10, U9, U8 //0 4 2 6
vpermi.w U10, U10, 0xd8 //0 2 4 6
vpackod.w U11, U9, U8 //1 5 3 7
vpermi.w U11, U11, 0xd8 //1 3 5 7
VFMADD U10, U0, VALPHAR, U10
VFMADD U11, U1, VALPHAR, U11
VNMSUB U10, U1, VALPHAI, U10
VFMADD U11, U0, VALPHAI, U11
vilvl.w U8, U11, U10 //0 1 2 3
vilvh.w U9, U11, U10 //4 5 6 7
vst U8, C0, 0x00
vst U9, C1, 0x00
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -2
#else
addi.d TL, TL, -2
#endif
slli.d C3, TL, 0x04
add.d A0, A0, C3
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x10
addi.d C1, C1, 0x10
addi.d I, I, 1
blt I, T0, .L11
.L150:
move I, $r0
andi T0, M, 1
beq I, T0, .L18
.L15: /* for(i=0; i<(bm&1); i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) ||(!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x03
add.d A0, A0, C3
slli.d C3, OFF, 0x04
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF
#elif defined(LEFT)
addi.d TL, OFF, 1
#else
addi.d TL, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MTC c12, $r0
MTC c21, $r0
MTC c22, $r0
move L, $r0 //cycle param k
beq L, TL, .L17
blt TL, L, .L17
.L16: /* for (k=0; k<bk; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
MADD1 c11, a1, b1, c11 //res0
LD a2, A0, 0x04 //load2
MADD2 c12, a2, b1, c12 //res1
LD b2, B0, 0x04 //load3
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
LD b3, B0, 0x08 //load4
MADD1 c21, a1, b3, c21 //res2
MADD2 c22, a2, b3, c22 //res3
LD b4, B0, 0x0c //load5
MADD3 c21, a2, b4, c21
MADD4 c22, a1, b4, c22
addi.d A0, A0, 0x08
addi.d B0, B0, 0x10
addi.d L, L, 1
blt L, TL, .L16
.L17:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x04
MUL b5, c21, ALPHA_R
MUL b6, c22, ALPHA_I
SUB b5, b5, b6
ST b5, C1, 0x00
MUL b5, c22, ALPHA_R
MUL b6, c21, ALPHA_I
ADD b6, b5, b6
ST b6, C1, 0x04
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x04 //C0[1]
LD b5, C1, 0x00 //C1[0]
LD b6, C1, 0x04 //C1[1]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
ST a5, C0, 0x00
ST a6, C0, 0x04
MADD b5, c21, ALPHA_R, b5
MADD b6, c22, ALPHA_R, b6
NMSUB b5, c22, ALPHA_I, b5
MADD b6, c21, ALPHA_I, b6
ST b5, C1, 0x00
ST b6, C1, 0x04
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -1
#else
addi.d TL, TL, -2
#endif
slli.d C3, TL, 0x03
add.d A0, A0, C3
slli.d C3, TL, 0x04
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x08
addi.d C1, C1, 0x08
addi.d I, I, 1
blt I, T0, .L15
.L18:
#if defined(TRMMKERNEL) && !defined(LEFT)
addi.d OFF, OFF, 2
#endif
slli.d L, K, 0x04
add.d B, B, L
slli.d I, LDC, 0x02
add.d C, C, I
addi.d J, J, 1
srai.d T0, N, 1
blt J, T0, .L10
.L19:
move J, $r0
andi T0, N, 1
beq J, T0, .L30
.L20: /* for (j=0; j<(bn&1); j+=1) */
#if defined(TRMMKERNEL) && defined(LEFT)
move OFF, OFFSET
#endif
move C0, C
move A0, A //ptrba
move I, $r0
srai.d T0, M, 1
beq I, T0, .L24
.L21: /* for (i=0; i<bm/2; i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) ||(!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x04
add.d A0, A0, C3
slli.d C3, OFF, 0x03
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF
#elif defined(LEFT)
addi.d TL, OFF, 2
#else
addi.d TL, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MTC c12, $r0
MTC c21, $r0
MTC c22, $r0
move L, $r0 //cycle param k
beq L, TL, .L23
blt TL, L, .L23
.L22: /* for (k=0; k<bk; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
MADD1 c11, a1, b1, c11 //res0
LD a2, A0, 0x04 //load2
MADD2 c12, a2, b1, c12 //res1
LD b2, B0, 0x04 //load3
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
LD a3, A0, 0x08 //load4
MADD1 c21, a3, b1, c21 //res2
LD a4, A0, 0x0c //load5
MADD2 c22, a4, b1, c22 //res3
MADD3 c21, a4, b2, c21
MADD4 c22, a3, b2, c22
addi.d A0, A0, 0x10
addi.d B0, B0, 0x08
addi.d L, L, 1
blt L, TL, .L22
.L23:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x04
MUL a7, c21, ALPHA_R
MUL a8, c22, ALPHA_I
SUB a7, a7, a8
ST a7, C0, 0x08
MUL a7, c22, ALPHA_R
MUL a8, c21, ALPHA_I
ADD a8, a7, a8
ST a8, C0, 0x0c
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x04 //C0[1]
LD a7, C0, 0x08 //C1[2]
LD a8, C0, 0x0c //C1[3]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
MADD a7, c21, ALPHA_R, a7
MADD a8, c22, ALPHA_R, a8
NMSUB a7, c22, ALPHA_I, a7
MADD a8, c21, ALPHA_I, a8
ST a5, C0, 0x00
ST a6, C0, 0x04
ST a7, C0, 0x08
ST a8, C0, 0x0c
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -2
#else
addi.d TL, TL, -1
#endif
slli.d C3, TL, 0x04
add.d A0, A0, C3
slli.d C3, TL, 0x03
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x10
addi.d I, I, 1
blt I, T0, .L21
.L24:
move I, $r0
andi T1, M, 1 //bm&1
beq I, T1, .L28
.L25: /* for (i=0; i<(bm&1); i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) ||(!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x03
add.d A0, A0, C3
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF
#elif defined(LEFT)
addi.d TL, OFF, 1
#else
addi.d TL, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MTC c12, $r0
move L, $r0 //cycle param k
beq L, TL, .L27
blt TL, L, .L27
.L26: /* for (k=0; k<bk; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
MADD1 c11, a1, b1, c11 //res0
LD a2, A0, 0x04 //load2
MADD2 c12, a2, b1, c12 //res1
LD b2, B0, 0x04 //load3
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
addi.d A0, A0, 0x08
addi.d B0, B0, 0x08
addi.d L, L, 1
blt L, TL, .L26
.L27:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x04
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x04 //C0[1]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
ST a5, C0, 0x00
ST a6, C0, 0x04
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -1
#else
addi.d TL, TL, -1
#endif
slli.d C3, TL, 0x03
add.d A0, A0, C3
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x08
addi.d I, I, 1
blt I, T1, .L25
.L28:
slli.d L, K, 3
add.d B, B, L
slli.d I, LDC, 1
add.d C, C, I
addi.d J, J, 1
andi T0, N, 1
blt J, T0, .L20
.L30:
LDARG $r23, $sp, 0
LDARG $r24, $sp, 8
LDARG $r25, $sp, 16
LDARG $r26, $sp, 24
LDARG $r27, $sp, 32
LD $f23, $sp, 40
LD $f24, $sp, 48
LD $f25, $sp, 56
LD $f26, $sp, 64
LD $f27, $sp, 72
LD $f28, $sp, 80
LD $f29, $sp, 88
LD $f30, $sp, 96
LD $f31, $sp, 104
addi.d $sp, $sp, 128
jirl $r0, $r1, 0x0
EPILOGUE

View File

@ -0,0 +1,193 @@
/*******************************************************************************
Copyright (c) 2021, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: m
#define N $r5 // param 2: n
#define SRC $r6 // param 3: src
#define LDA $r7 // param 4: lda
#define DST $r8 // param 5: dst
#define I $r9
#define J $r10
#define S1 $r12
#define S2 $r13
#define S3 $r14
#define S4 $r15
#define S5 $r16
#define S6 $r17
#define S7 $r18
#define S8 $r19
#define TD $r20
#define TS $r11
#define TL $r7
#define T0 $r23
#define ZERO $r0
#define F0 $f0
#define F1 $f1
#define F2 $f2
#define F3 $f3
#define F4 $f4
#define F5 $f5
#define F6 $f6
#define F7 $f7
/* LASX vectors */
#define U0 $xr0
#define U1 $xr1
#define U2 $xr2
#define U3 $xr3
#define U4 $xr4
#define U5 $xr5
#define U6 $xr6
#define U7 $xr7
#define D0 $xr8
#define D1 $xr9
#define D2 $xr10
#define D3 $xr11
#define D4 $xr12
#define D5 $xr13
#define D6 $xr14
#define D7 $xr15
#define D8 $xr16
PROLOGUE
addi.d $sp, $sp, -8
SDARG $r23, $sp, 0
move TD, DST //boffset
move TS, SRC //aoffset
slli.d TL, LDA, 0x02 //lda
slli.d TL, TL, 0x01
slli.d T0, TL, 0x01
srai.d I, N, 0x01
beq I, ZERO, .L_N0
.L_J1: /* if (i > 0) I-- */
move S1, TS //a_offset1
add.d S2, TS, TL //a_offset2
srai.d J, M, 0x02
add.d TS, TS, T0
beq J, ZERO, .L_I3
.L_I1: /* if (j > 0) J-- */
xvld U0, S1, 0x00
xvld U1, S1, 0x00
xvld U2, S2, 0x00
xvpermi.q U0, U2, 0x02
xvpermi.q U2, U1, 0x31
xvpermi.d U0, U0, 0xd8
xvpermi.d U2, U2, 0xd8
xvst U0, TD, 0x00
xvst U2, TD, 0x20
addi.d S1, S1, 0x20 // a_offset1
addi.d S2, S2, 0x20
addi.d TD, TD, 0x40 // b_offset
addi.d J, J, -1
blt ZERO, J, .L_I1
.L_I3:
andi J, M, 0x03
beq J, ZERO, .L_II20
.L_II1: /* j = (m & 3) if (j > 0) */
fld.s F0, S1, 0x00
fld.s F1, S1, 0x04
fld.s F2, S2, 0x00
fld.s F3, S2, 0x04
fst.s F0, TD, 0x00
fst.s F1, TD, 0x04
fst.s F2, TD, 0x08
fst.s F3, TD, 0x0c
addi.d S1, S1, 0x08
addi.d S2, S2, 0x08
addi.d TD, TD, 0x10
addi.d J, J, -1
blt ZERO, J, .L_II1
.L_II20:
addi.d I, I, -1
blt ZERO, I, .L_J1
.L_N0: /* if(n&1)*/
andi I, N, 0x01
beq ZERO, I, .L_N00
.L_N1:
srai.d J, M, 0x02
beq ZERO, J, .L_N10
.L_N11: /* j = (m >> 2) if (j > 0) */
xvld U0, TS, 0x00
xvst U0, TD, 0x00
addi.d TS, TS, 0x20 // a_offset
addi.d TD, TD, 0x20 // b_offset
addi.d J, J, -1
blt ZERO, J, .L_N11
.L_N10:
andi J, M, 0x03
beq J, ZERO, .L_N00
.L_N12: /* j = (m & 3) if (j > 0) */
fld.s F0, TS, 0x00
fld.s F1, TS, 0x04
fst.s F0, TD, 0x00
fst.s F1, TD, 0x04
addi.d TS, TS, 0x08 // a_offset
addi.d TD, TD, 0x08 // b_offset
addi.d J, J, -1
blt ZERO, J, .L_N12
.L_N00:
LDARG $r23, $sp, 0
addi.d $sp, $sp, 8
jirl $r0, $r1, 0x00
EPILOGUE

View File

@ -0,0 +1,202 @@
/*******************************************************************************
Copyright (c) 2021, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: m
#define N $r5 // param 2: n
#define SRC $r6 // param 3: src
#define LDA $r7 // param 4: lda
#define DST $r8 // param 5: dst
#define I $r9
#define J $r10
#define S1 $r12
#define S2 $r13
#define S3 $r14
#define S4 $r15
#define S5 $r16
#define S6 $r17
#define S7 $r18
#define S8 $r19
#define TD $r20
#define TS $r11
#define TL $r7
#define T0 $r23
#define ZERO $r0
#define F0 $f0
#define F1 $f1
#define F2 $f2
#define F3 $f3
#define F4 $f4
#define F5 $f5
#define F6 $f6
#define F7 $f7
/* LASX vectors */
#define U0 $vr0
#define U1 $vr1
#define U2 $vr2
#define U3 $vr3
#define U4 $vr4
#define U5 $vr5
#define U6 $vr6
#define U7 $vr7
#define D0 $vr8
#define D1 $vr9
#define D2 $vr10
#define D3 $vr11
#define D4 $vr12
#define D5 $vr13
#define D6 $vr14
#define D7 $vr15
#define D8 $vr16
PROLOGUE
addi.d $sp, $sp, -8
SDARG $r23, $sp, 0
move TD, DST //boffset
move TS, SRC //aoffset
slli.d TL, LDA, 0x02 //lda
slli.d TL, TL, 0x01
slli.d T0, TL, 0x01
srai.d I, N, 0x01
beq I, ZERO, .L_N0
.L_J1: /* if (i > 0) I-- */
move S1, TS //a_offset1
add.d S2, TS, TL //a_offset2
srai.d J, M, 0x02
add.d TS, TS, T0
beq J, ZERO, .L_I3
.L_I1: /* if (j > 0) J-- */
vld U0, S1, 0x00
vld U1, S1, 0x10
vld U2, S2, 0x00
vld U3, S2, 0x10
vand.v D0, U2, U2
vand.v D1, U3, U3
vand.v D2, U2, U2
vand.v D3, U3, U3
vpermi.w D0, U0, 0x44
vpermi.w D2, U0, 0xee
vpermi.w D1, U1, 0x44
vpermi.w D3, U1, 0xee
vst D0, TD, 0x00
vst D2, TD, 0x10
vst D1, TD, 0x20
vst D3, TD, 0x30
addi.d S1, S1, 0x20 // a_offset1
addi.d S2, S2, 0x20
addi.d TD, TD, 0x40 // b_offset
addi.d J, J, -1
blt ZERO, J, .L_I1
.L_I3:
andi J, M, 0x03
beq J, ZERO, .L_II20
.L_II1: /* j = (m & 3) if (j > 0) */
fld.s F0, S1, 0x00
fld.s F1, S1, 0x04
fld.s F2, S2, 0x00
fld.s F3, S2, 0x04
fst.s F0, TD, 0x00
fst.s F1, TD, 0x04
fst.s F2, TD, 0x08
fst.s F3, TD, 0x0c
addi.d S1, S1, 0x08
addi.d S2, S2, 0x08
addi.d TD, TD, 0x10
addi.d J, J, -1
blt ZERO, J, .L_II1
.L_II20:
addi.d I, I, -1
blt ZERO, I, .L_J1
.L_N0: /* if(n&1)*/
andi I, N, 0x01
beq ZERO, I, .L_N00
.L_N1:
srai.d J, M, 0x02
beq ZERO, J, .L_N10
.L_N11: /* j = (m >> 2) if (j > 0) */
vld U0, TS, 0x00
vld U1, TS, 0x10
vst U0, TD, 0x00
vst U1, TD, 0x10
addi.d TS, TS, 0x20 // a_offset
addi.d TD, TD, 0x20 // b_offset
addi.d J, J, -1
blt ZERO, J, .L_N11
.L_N10:
andi J, M, 0x03
beq J, ZERO, .L_N00
.L_N12: /* j = (m & 3) if (j > 0) */
fld.s F0, TS, 0x00
fld.s F1, TS, 0x04
fst.s F0, TD, 0x00
fst.s F1, TD, 0x04
addi.d TS, TS, 0x08 // a_offset
addi.d TD, TD, 0x08 // b_offset
addi.d J, J, -1
blt ZERO, J, .L_N12
.L_N00:
LDARG $r23, $sp, 0
addi.d $sp, $sp, 8
jirl $r0, $r1, 0x00
EPILOGUE

View File

@ -0,0 +1,218 @@
/*******************************************************************************
Copyright (c) 2021, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: m
#define N $r5 // param 2: n
#define SRC $r6 // param 3: src
#define LDA $r7 // param 4: lda
#define DST $r8 // param 5: dst
#define I $r9
#define J $r10
#define S1 $r12
#define S2 $r13
#define S3 $r14
#define S4 $r15
#define TD $r16
#define TS $r17
#define TL $r7
#define T0 $r18
#define S8 $r19
#define S9 $r20
#define S10 $r23
#define ZERO $r0
#define F0 $f0
#define F1 $f1
#define F2 $f2
#define F3 $f3
#define F4 $f4
#define F5 $f5
#define F6 $f6
#define F7 $f7
/* LASX vectors */
#define U0 $xr0
#define U1 $xr1
#define U2 $xr2
#define U3 $xr3
#define U4 $xr4
#define U5 $xr5
#define U6 $xr6
#define U7 $xr7
#define D0 $xr8
#define D1 $xr9
#define D2 $xr10
#define D3 $xr11
#define D4 $xr12
#define D5 $xr13
#define D6 $xr14
#define D7 $xr15
PROLOGUE
addi.d $sp, $sp, -8
SDARG $r23, $sp, 0
move TS, SRC //aoffset
move TD, DST //boffset
slli.d TL, LDA, 0x02 //lda
slli.d TL, TL, 0x01
ori T0, ZERO, 0x01
andn T0, N, T0
mul.d T0, M, T0
slli.d T0, T0, 0x01
slli.d T0, T0, 0x02
add.d S9, DST, T0 //boffset2
srai.d J, M, 0x01 //j
beq J, ZERO, .L_M1
.L_J1: /* if(j>0) j--*/
move S1, TS //aoffset1
slli.d T0, TL, 0x01
add.d S2, S1, TL //aoffset2
add.d TS, TS, T0
move S8, TD //boffset1
addi.d TD, TD, 0x20
srai.d I, N, 0x02
beq ZERO, I, .L_JN1
.L_JI1: /* if(i>0) i--*/
xvld U0, S1, 0x00
xvld U1, S1, 0x00
xvld U2, S2, 0x00
xvpermi.q U0, U2, 0x02
xvpermi.q U2, U1, 0x31
xvst U0, S8, 0x00
slli.d T0, M, 0x04
add.d S8, S8, T0
xvst U2, S8, 0x00
add.d S8, S8, T0
addi.d S1, S1, 0x20
addi.d S2, S2, 0x20
addi.d I, I, -1
blt ZERO, I, .L_JI1
.L_JN1: /* if(n&2) */
andi I, N, 0x02
beq ZERO, I, .L_JN2
vld $vr0, S1, 0x00
vld $vr1, S2, 0x00
vst $vr0, S8, 0x00
vst $vr1, S8, 0x10
addi.d S1, S1, 0x10
addi.d S2, S2, 0x10
.L_JN2: /* if(n&1) */
andi I, N, 0x01
beq ZERO, I, .L_J0
fld.s F0, S1, 0x00
fld.s F1, S1, 0x04
fld.s F2, S2, 0x00
fld.s F3, S2, 0x04
fst.s F0, S9, 0x00
fst.s F1, S9, 0x04
fst.s F2, S9, 0x08
fst.s F3, S9, 0x0c
addi.d S9, S9, 0x10
.L_J0:
addi.d J, J, -1
blt ZERO, J, .L_J1
.L_M1: /* if(m&1) */
andi I, M, 0x01
beq ZERO, I, .L_M0
srai.d I, N, 0x02
beq ZERO, I, .L_M1N1
.L_M1I1: /* if(i>0) */
vld $vr0, TS, 0x00
vld $vr1, TS, 0x10
vst $vr0, TD, 0x00
slli.d T0, M, 0x04
add.d TD, TD, T0
vst $vr1, TD, 0x00
add.d TD, TD, T0
addi.d TS, TS, 0x20
addi.d I, I, -1
blt ZERO, I, .L_M1I1
.L_M1N1: /* if(n&2) */
andi I, N, 0x02
beq ZERO, I, .L_M1N2
vld $vr0, TS, 0x00
vst $vr0, TD, 0x00
addi.d TS, TS, 0x10
.L_M1N2: /* if(n&1) */
andi I, N, 0x01
beq ZERO, I, .L_M0
fld.s F0, TS, 0x00
fld.s F1, TS, 0x04
fst.s F0, S9, 0x00
fst.s F1, S9, 0x04
.L_M0:
LDARG $r23, $sp, 0
addi.d $sp, $sp, 8
jirl $r0, $r1, 0x00
EPILOGUE

View File

@ -0,0 +1,218 @@
/*******************************************************************************
Copyright (c) 2021, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: m
#define N $r5 // param 2: n
#define SRC $r6 // param 3: src
#define LDA $r7 // param 4: lda
#define DST $r8 // param 5: dst
#define I $r9
#define J $r10
#define S1 $r12
#define S2 $r13
#define S3 $r14
#define S4 $r15
#define TD $r16
#define TS $r17
#define TL $r7
#define T0 $r18
#define S8 $r19
#define S9 $r20
#define S10 $r23
#define ZERO $r0
#define F0 $f0
#define F1 $f1
#define F2 $f2
#define F3 $f3
#define F4 $f4
#define F5 $f5
#define F6 $f6
#define F7 $f7
/* LASX vectors */
#define U0 $vr0
#define U1 $vr1
#define U2 $vr2
#define U3 $vr3
#define U4 $vr4
#define U5 $vr5
#define U6 $vr6
#define U7 $vr7
#define D0 $vr8
#define D1 $vr9
#define D2 $vr10
#define D3 $vr11
#define D4 $vr12
#define D5 $vr13
#define D6 $vr14
#define D7 $vr15
PROLOGUE
addi.d $sp, $sp, -8
SDARG $r23, $sp, 0
move TS, SRC //aoffset
move TD, DST //boffset
slli.d TL, LDA, 0x02 //lda
slli.d TL, TL, 0x01
ori T0, ZERO, 0x01
andn T0, N, T0
mul.d T0, M, T0
slli.d T0, T0, 0x01
slli.d T0, T0, 0x02
add.d S9, DST, T0 //boffset2
srai.d J, M, 0x01 //j
beq J, ZERO, .L_M1
.L_J1: /* if(j>0) j--*/
move S1, TS //aoffset1
slli.d T0, TL, 0x01
add.d S2, S1, TL //aoffset2
add.d TS, TS, T0
move S8, TD //boffset1
addi.d TD, TD, 0x20
srai.d I, N, 0x02
beq ZERO, I, .L_JN1
.L_JI1: /* if(i>0) i--*/
vld U0, S1, 0x00
vld U1, S1, 0x10
vld U2, S2, 0x00
vld U3, S2, 0x10
vst U0, S8, 0x00
vst U2, S8, 0x10
slli.d T0, M, 0x04
add.d S8, S8, T0
vst U1, S8, 0x00
vst U3, S8, 0x10
add.d S8, S8, T0
addi.d S1, S1, 0x20
addi.d S2, S2, 0x20
addi.d I, I, -1
blt ZERO, I, .L_JI1
.L_JN1: /* if(n&2) */
andi I, N, 0x02
beq ZERO, I, .L_JN2
vld U0, S1, 0x00
vld U1, S2, 0x00
vst U0, S8, 0x00
vst U1, S8, 0x10
addi.d S1, S1, 0x10
addi.d S2, S2, 0x10
.L_JN2: /* if(n&1) */
andi I, N, 0x01
beq ZERO, I, .L_J0
fld.s F0, S1, 0x00
fld.s F1, S1, 0x04
fld.s F2, S2, 0x00
fld.s F3, S2, 0x04
fst.s F0, S9, 0x00
fst.s F1, S9, 0x04
fst.s F2, S9, 0x08
fst.s F3, S9, 0x0c
addi.d S9, S9, 0x10
.L_J0:
addi.d J, J, -1
blt ZERO, J, .L_J1
.L_M1: /* if(m&1) */
andi I, M, 0x01
beq ZERO, I, .L_M0
srai.d I, N, 0x02
beq ZERO, I, .L_M1N1
.L_M1I1: /* if(i>0) */
vld U0, TS, 0x00
vld U1, TS, 0x10
vst U0, TD, 0x00
slli.d T0, M, 0x04
add.d TD, TD, T0
vst U1, TD, 0x00
add.d TD, TD, T0
addi.d TS, TS, 0x20
addi.d I, I, -1
blt ZERO, I, .L_M1I1
.L_M1N1: /* if(n&2) */
andi I, N, 0x02
beq ZERO, I, .L_M1N2
vld U0, TS, 0x00
vst U0, TD, 0x00
addi.d TS, TS, 0x10
.L_M1N2: /* if(n&1) */
andi I, N, 0x01
beq ZERO, I, .L_M0
fld.s F0, TS, 0x00
fld.s F1, TS, 0x04
fst.s F0, S9, 0x00
fst.s F1, S9, 0x04
.L_M0:
LDARG $r23, $sp, 0
addi.d $sp, $sp, 8
jirl $r0, $r1, 0x00
EPILOGUE

View File

@ -0,0 +1,848 @@
/*******************************************************************************
Copyright (c) 2023, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: bm
#define N $r5 // param 2: bn
#define K $r6 // param 3: bk
#define ALPHA_R $f0 // param 4: alphar
#define ALPHA_I $f1 // param 5: alphai
#define A $r7 // param 6: ba
#define B $r8 // param 7: bb
#define C $r9 // param 8: bc
#define LDC $r10 // param 9: ldc
#if defined (TRMMKERNEL)
#define OFFSET $r11 // param 10: offset
#endif
#define OFF $r26
#define I $r12
#define J $r13
#define L $r14
#define TL $r15
#define A0 $r16
#define B0 $r17
#define C0 $r18
#define C1 $r19
#define C2 $r20
#define C3 $r23
#define T0 $r24
#define T1 $r25
#define a1 $f2
#define a2 $f3
#define a3 $f4
#define a4 $f5
#define a5 $f6
#define a6 $f7
#define a7 $f8
#define a8 $f9
#define b1 $f10
#define b2 $f11
#define b3 $f12
#define b4 $f13
#define b5 $f14
#define b6 $f15
#define b7 $f16
#define b8 $f17
#define c11 $f18
#define c12 $f19
#define c21 $f20
#define c22 $f21
#define c31 $f22
#define c32 $f23
#define c41 $f24
#define c42 $f25
#define c51 $f26
#define c52 $f27
#define c61 $f28
#define c62 $f29
#if defined(NN) || defined(NT) || defined(TN) || defined(TT)
#define MADD1 MADD
#define MADD2 MADD
#define MADD3 NMSUB
#define MADD4 MADD
#endif
#if defined(NR) || defined(NC) || defined(TR) || defined(TC)
#define MADD1 MADD
#define MADD2 MADD
#define MADD3 MADD
#define MADD4 NMSUB
#endif
#if defined(RN) || defined(RT) || defined(CN) || defined(CT)
#define MADD1 MADD
#define MADD2 NMSUB
#define MADD3 MADD
#define MADD4 MADD
#endif
#if defined(RR) || defined(RC) || defined(CR) || defined(CC)
#define MADD1 MADD
#define MADD2 NMSUB
#define MADD3 NMSUB
#define MADD4 NMSUB
#endif
PROLOGUE
addi.d $sp, $sp, -88
SDARG $r23, $sp, 0
SDARG $r24, $sp, 8
SDARG $r25, $sp, 16
SDARG $r26, $sp, 24
ST $f23, $sp, 32
ST $f24, $sp, 40
ST $f25, $sp, 48
ST $f26, $sp, 56
ST $f27, $sp, 64
ST $f28, $sp, 72
ST $f29, $sp, 80
#if defined (TRMMKERNEL) && !defined(LEFT)
sub.d OFF, $r0, OFFSET
#else
xor OFF, OFF, OFF
#endif
slli.d LDC, LDC, BASE_SHIFT
move J, $r0
srai.d T0, N, 1
beq J, T0, .L19
.L10: /* for(j=0; j<bn/2; j+=1) */
move C0, C
slli.d TL, LDC, 1
add.d C1, C0, TL
move A0, A //ptrba
#if defined(TRMMKERNEL) && defined(LEFT)
move OFF, OFFSET
#endif
move I, $r0
srai.d T0, M, 1
beq I, T0, .L150
.L11: /* for(i=0; i<bm/2; i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x05
add.d A0, A0, C3
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF //temp
#elif defined(LEFT)
addi.d TL, OFF, 2
#else
addi.d TL, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MOV c12, c11
MOV c21, c11
MOV c22, c11
MOV c31, c11
MOV c32, c11
MOV c41, c11
MOV c42, c11
move L, $r0 //cycle param k
srai.d C2, TL, 2
beq L, C2, .L130
blt C2, L, .L130
.L12: /* for(k=0; k<bk/4; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
MADD1 c11, a1, b1, c11 //res0
LD a2, A0, 0x08 //load2
MADD2 c12, a2, b1, c12 //res1
LD b2, B0, 0x08 //load3
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
LD a3, A0, 0x10 //load4
MADD1 c21, a3, b1, c21 //res2
LD a4, A0, 0x18 //load5
MADD2 c22, a4, b1, c22 //res3
MADD3 c21, a4, b2, c21
MADD4 c22, a3, b2, c22
LD b3, B0, 0x10 //load6
MADD1 c31, a1, b3, c31 //res4
MADD2 c32, a2, b3, c32 //res5
LD b4, B0, 0x18 //load7
MADD3 c31, a2, b4, c31
MADD4 c32, a1, b4, c32
MADD1 c41, a3, b3, c41 //res6
MADD2 c42, a4, b3, c42 //res7
MADD3 c41, a4, b4, c41
MADD4 c42, a3, b4, c42
LD a5, A0, 0x20 //load8
LD b5, B0, 0x20 //load9
MADD1 c11, a5, b5, c11
LD a6, A0, 0x28 //load10
MADD2 c12, a6, b5, c12
LD b6, B0, 0x28 //load11
MADD3 c11, a6, b6, c11
MADD4 c12, a5, b6, c12
LD a7, A0, 0x30 //load12
MADD1 c21, a7, b5, c21
LD a8, A0, 0x38 //load13
MADD2 c22, a8, b5, c22
MADD3 c21, a8, b6, c21
MADD4 c22, a7, b6, c22
LD b7, B0, 0x30 //load14
MADD1 c31, a5, b7, c31
MADD2 c32, a6, b7, c32
LD b8, B0, 0x38 //load15
MADD3 c31, a6, b8, c31
MADD4 c32, a5, b8, c32
MADD1 c41, a7, b7, c41
MADD2 c42, a8, b7, c42
MADD3 c41, a8, b8, c41
MADD4 c42, a7, b8, c42
LD a1, A0, 0x40 //load0
LD b1, B0, 0x40 //load1
MADD1 c11, a1, b1, c11 //res0
LD a2, A0, 0x48 //load2
MADD2 c12, a2, b1, c12 //res1
LD b2, B0, 0x48 //load3
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
LD a3, A0, 0x50 //load4
MADD1 c21, a3, b1, c21 //res2
LD a4, A0, 0x58 //load5
MADD2 c22, a4, b1, c22 //res3
MADD3 c21, a4, b2, c21
MADD4 c22, a3, b2, c22
LD b3, B0, 0x50 //load6
MADD1 c31, a1, b3, c31 //res4
MADD2 c32, a2, b3, c32 //res5
LD b4, B0, 0x58 //load7
MADD3 c31, a2, b4, c31
MADD4 c32, a1, b4, c32
MADD1 c41, a3, b3, c41 //res6
MADD2 c42, a4, b3, c42 //res7
MADD3 c41, a4, b4, c41
MADD4 c42, a3, b4, c42
LD a5, A0, 0x60 //load8
LD b5, B0, 0x60 //load9
MADD1 c11, a5, b5, c11
LD a6, A0, 0x68 //load10
MADD2 c12, a6, b5, c12
LD b6, B0, 0x68 //load11
MADD3 c11, a6, b6, c11
MADD4 c12, a5, b6, c12
LD a7, A0, 0x70 //load12
MADD1 c21, a7, b5, c21
LD a8, A0, 0x78 //load13
MADD2 c22, a8, b5, c22
MADD3 c21, a8, b6, c21
MADD4 c22, a7, b6, c22
LD b7, B0, 0x70 //load14
MADD1 c31, a5, b7, c31
MADD2 c32, a6, b7, c32
LD b8, B0, 0x78 //load15
MADD3 c31, a6, b8, c31
MADD4 c32, a5, b8, c32
MADD1 c41, a7, b7, c41
MADD2 c42, a8, b7, c42
MADD3 c41, a8, b8, c41
MADD4 c42, a7, b8, c42
addi.d A0, A0, 0x80
addi.d B0, B0, 0x80
addi.d L, L, 1
blt L, C2, .L12
.L130:
move L, $r0
andi C2, TL, 3
beq L, C2, .L14
.L13: /* for(k=0; k<(bk&3); k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
MADD1 c11, a1, b1, c11 //res0
LD a2, A0, 0x08 //load2
MADD2 c12, a2, b1, c12 //res1
LD b2, B0, 0x08 //load3
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
LD a3, A0, 0x10 //load4
MADD1 c21, a3, b1, c21 //res2
LD a4, A0, 0x18 //load5
MADD2 c22, a4, b1, c22 //res3
MADD3 c21, a4, b2, c21
MADD4 c22, a3, b2, c22
LD b3, B0, 0x10 //load6
MADD1 c31, a1, b3, c31 //res4
MADD2 c32, a2, b3, c32 //res5
LD b4, B0, 0x18 //load7
MADD3 c31, a2, b4, c31
MADD4 c32, a1, b4, c32
MADD1 c41, a3, b3, c41 //res6
MADD2 c42, a4, b3, c42 //res7
MADD3 c41, a4, b4, c41
MADD4 c42, a3, b4, c42
addi.d A0, A0, 0x20
addi.d B0, B0, 0x20
addi.d L, L, 1
blt L, C2, .L13
.L14:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x08
MUL a7, c21, ALPHA_R
MUL a8, c22, ALPHA_I
SUB a7, a7, a8
ST a7, C0, 0x10
MUL a7, c22, ALPHA_R
MUL a8, c21, ALPHA_I
ADD a8, a7, a8
ST a8, C0, 0x18
MUL b5, c31, ALPHA_R
MUL b6, c32, ALPHA_I
SUB b5, b5, b6
ST b5, C1, 0x00
MUL b5, c32, ALPHA_R
MUL b6, c31, ALPHA_I
ADD b6, b5, b6
ST b6, C1, 0x08
MUL b7, c41, ALPHA_R
MUL b8, c42, ALPHA_I
SUB b7, b7, b8
ST b7, C1, 0x10
MUL b7, c42, ALPHA_R
MUL b8, c41, ALPHA_I
ADD b8, b7, b8
ST b8, C1, 0x18
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x08 //C0[1]
LD a7, C0, 0x10 //C0[2]
LD a8, C0, 0x18 //C0[3]
LD b5, C1, 0x00 //C1[0]
LD b6, C1, 0x08 //C1[1]
LD b7, C1, 0x10 //C1[2]
LD b8, C1, 0x18 //C1[3]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
ST a5, C0, 0x00
ST a6, C0, 0x08
MADD a7, c21, ALPHA_R, a7
MADD a8, c22, ALPHA_R, a8
NMSUB a7, c22, ALPHA_I, a7
MADD a8, c21, ALPHA_I, a8
ST a7, C0, 0x10
ST a8, C0, 0x18
MADD b5, c31, ALPHA_R, b5
MADD b6, c32, ALPHA_R, b6
NMSUB b5, c32, ALPHA_I, b5
MADD b6, c31, ALPHA_I, b6
ST b5, C1, 0x00
ST b6, C1, 0x08
MADD b7, c41, ALPHA_R, b7
MADD b8, c42, ALPHA_R, b8
NMSUB b7, c42, ALPHA_I, b7
MADD b8, c41, ALPHA_I, b8
ST b7, C1, 0x10
ST b8, C1, 0x18
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -2
#else
addi.d TL, TL, -2
#endif
slli.d C3, TL, 0x05
add.d A0, A0, C3
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x20
addi.d C1, C1, 0x20
addi.d I, I, 1
blt I, T0, .L11
.L150:
move I, $r0
andi T0, M, 1
beq I, T0, .L18
.L15: /* for(i=0; i<(bm&1); i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) ||(!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x04
add.d A0, A0, C3
slli.d C3, OFF, 0x05
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF
#elif defined(LEFT)
addi.d TL, OFF, 1
#else
addi.d TL, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MTC c12, $r0
MTC c21, $r0
MTC c22, $r0
move L, $r0 //cycle param k
beq L, TL, .L17
blt TL, L, .L17
.L16: /* for (k=0; k<bk; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
MADD1 c11, a1, b1, c11 //res0
LD a2, A0, 0x08 //load2
MADD2 c12, a2, b1, c12 //res1
LD b2, B0, 0x08 //load3
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
LD b3, B0, 0x10 //load4
MADD1 c21, a1, b3, c21 //res2
MADD2 c22, a2, b3, c22 //res3
LD b4, B0, 0x18 //load5
MADD3 c21, a2, b4, c21
MADD4 c22, a1, b4, c22
addi.d A0, A0, 0x10
addi.d B0, B0, 0x20
addi.d L, L, 1
blt L, TL, .L16
.L17:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x08
MUL b5, c21, ALPHA_R
MUL b6, c22, ALPHA_I
SUB b5, b5, b6
ST b5, C1, 0x00
MUL b5, c22, ALPHA_R
MUL b6, c21, ALPHA_I
ADD b6, b5, b6
ST b6, C1, 0x08
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x08 //C0[1]
LD b5, C1, 0x00 //C1[0]
LD b6, C1, 0x08 //C1[1]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
ST a5, C0, 0x00
ST a6, C0, 0x08
MADD b5, c21, ALPHA_R, b5
MADD b6, c22, ALPHA_R, b6
NMSUB b5, c22, ALPHA_I, b5
MADD b6, c21, ALPHA_I, b6
ST b5, C1, 0x00
ST b6, C1, 0x08
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -1
#else
addi.d TL, TL, -2
#endif
slli.d C3, TL, 0x04
add.d A0, A0, C3
slli.d C3, TL, 0x05
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x10
addi.d C1, C1, 0x10
addi.d I, I, 1
blt I, T0, .L15
.L18:
#if defined(TRMMKERNEL) && !defined(LEFT)
addi.d OFF, OFF, 2
#endif
slli.d L, K, 0x05
add.d B, B, L
slli.d I, LDC, 0x02
add.d C, C, I
addi.d J, J, 1
srai.d T0, N, 1
blt J, T0, .L10
.L19:
move J, $r0
andi T0, N, 1
beq J, T0, .L30
.L20: /* for (j=0; j<(bn&1); j+=1) */
#if defined(TRMMKERNEL) && defined(LEFT)
move OFF, OFFSET
#endif
move C0, C
move A0, A //ptrba
move I, $r0
srai.d T0, M, 1
beq I, T0, .L24
.L21: /* for (i=0; i<bm/2; i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) ||(!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x05
add.d A0, A0, C3
slli.d C3, OFF, 0x04
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF
#elif defined(LEFT)
addi.d TL, OFF, 2
#else
addi.d TL, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MTC c12, $r0
MTC c21, $r0
MTC c22, $r0
move L, $r0 //cycle param k
beq L, TL, .L23
blt TL, L, .L23
.L22: /* for (k=0; k<bk; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
MADD1 c11, a1, b1, c11 //res0
LD a2, A0, 0x08 //load2
MADD2 c12, a2, b1, c12 //res1
LD b2, B0, 0x08 //load3
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
LD a3, A0, 0x10 //load4
MADD1 c21, a3, b1, c21 //res2
LD a4, A0, 0x18 //load5
MADD2 c22, a4, b1, c22 //res3
MADD3 c21, a4, b2, c21
MADD4 c22, a3, b2, c22
addi.d A0, A0, 0x20
addi.d B0, B0, 0x10
addi.d L, L, 1
blt L, TL, .L22
.L23:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x08
MUL a7, c21, ALPHA_R
MUL a8, c22, ALPHA_I
SUB a7, a7, a8
ST a7, C0, 0x10
MUL a7, c22, ALPHA_R
MUL a8, c21, ALPHA_I
ADD a8, a7, a8
ST a8, C0, 0x18
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x08 //C0[1]
LD a7, C0, 0x10 //C1[2]
LD a8, C0, 0x18 //C1[3]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
MADD a7, c21, ALPHA_R, a7
MADD a8, c22, ALPHA_R, a8
NMSUB a7, c22, ALPHA_I, a7
MADD a8, c21, ALPHA_I, a8
ST a5, C0, 0x00
ST a6, C0, 0x08
ST a7, C0, 0x10
ST a8, C0, 0x18
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -2
#else
addi.d TL, TL, -1
#endif
slli.d C3, TL, 0x05
add.d A0, A0, C3
slli.d C3, TL, 0x04
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x20
addi.d I, I, 1
blt I, T0, .L21
.L24:
move I, $r0
andi T1, M, 1 //bm&1
beq I, T1, .L28
.L25: /* for (i=0; i<(bm&1); i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) ||(!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x04
add.d A0, A0, C3
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF
#elif defined(LEFT)
addi.d TL, OFF, 1
#else
addi.d TL, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MTC c12, $r0
move L, $r0 //cycle param k
beq L, TL, .L27
blt TL, L, .L27
.L26: /* for (k=0; k<bk; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
MADD1 c11, a1, b1, c11 //res0
LD a2, A0, 0x08 //load2
MADD2 c12, a2, b1, c12 //res1
LD b2, B0, 0x08 //load3
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
addi.d A0, A0, 0x10
addi.d B0, B0, 0x10
addi.d L, L, 1
blt L, TL, .L26
.L27:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x08
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x08 //C0[1]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
ST a5, C0, 0x00
ST a6, C0, 0x08
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -1
#else
addi.d TL, TL, -1
#endif
slli.d C3, TL, 0x04
add.d A0, A0, C3
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x10
addi.d I, I, 1
blt I, T1, .L25
.L28:
slli.d L, K, 4
add.d B, B, L
slli.d I, LDC, 1
add.d C, C, I
addi.d J, J, 1
andi T0, N, 1
blt J, T0, .L20
.L30:
LDARG $r23, $sp, 0
LDARG $r24, $sp, 8
LDARG $r25, $sp, 16
LDARG $r26, $sp, 24
LD $f23, $sp, 32
LD $f24, $sp, 40
LD $f25, $sp, 48
LD $f26, $sp, 56
LD $f27, $sp, 64
LD $f28, $sp, 72
LD $f29, $sp, 80
addi.d $sp, $sp, 88
jirl $r0, $r1, 0x0
EPILOGUE

View File

@ -0,0 +1,822 @@
/*******************************************************************************
Copyright (c) 2023, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: bm
#define N $r5 // param 2: bn
#define K $r6 // param 3: bk
#define ALPHA_R $f0 // param 4: alphar
#define ALPHA_I $f1 // param 5: alphai
#define A $r7 // param 6: ba
#define B $r8 // param 7: bb
#define C $r9 // param 8: bc
#define LDC $r10 // param 9: ldc
#if defined (TRMMKERNEL)
#define OFFSET $r11 // param 10: offset
#endif
#define OFF $r26
#define I $r12
#define J $r13
#define L $r14
#define TL $r15
#define A0 $r16
#define B0 $r17
#define C0 $r18
#define C1 $r19
#define C2 $r20
#define C3 $r23
#define T0 $r24
#define T1 $r25
#define a1 $f2
#define a2 $f3
#define a3 $f4
#define a4 $f5
#define a5 $f6
#define a6 $f7
#define a7 $f8
#define a8 $f9
#define b1 $f10
#define b2 $f11
#define b3 $f12
#define b4 $f13
#define b5 $f14
#define b6 $f15
#define b7 $f16
#define b8 $f17
#define c11 $f18
#define c12 $f19
#define c21 $f20
#define c22 $f21
#define c31 $f22
#define c32 $f23
#define c41 $f24
#define c42 $f25
/* LASX vectors */
#define U0 $xr30
#define U1 $xr31
#define U2 $xr2
#define U3 $xr3
#define U4 $xr4
#define U5 $xr5
#define U6 $xr6
#define U7 $xr7
#define U8 $xr8
#define U9 $xr9
#define U10 $xr10
#define U11 $xr11
#define U12 $xr12
#define U13 $xr13
#define U14 $xr14
#define U15 $xr15
#define D0 $xr16
#define D1 $xr17
#define D2 $xr18
#define D3 $xr19
#define D4 $xr20
#define D5 $xr21
#define D6 $xr22
#define D7 $xr23
#define D8 $xr24
#define D9 $xr25
#define D10 $xr26
#define D11 $xr27
#define D12 $xr28
#define D13 $xr29
#define VALPHAR $xr28
#define VALPHAI $xr29
#if defined(NN) || defined(NT) || defined(TN) || defined(TT)
#define XVMADD1 XVFMADD
#define XVMADD2 XVFMADD
#define XVMADD3 XVNMSUB
#define XVMADD4 XVFMADD
#define MADD1 MADD
#define MADD2 MADD
#define MADD3 NMSUB
#define MADD4 MADD
#endif
#if defined(NR) || defined(NC) || defined(TR) || defined(TC)
#define XVMADD1 XVFMADD
#define XVMADD2 XVFMADD
#define XVMADD3 XVFMADD
#define XVMADD4 XVNMSUB
#define MADD1 MADD
#define MADD2 MADD
#define MADD3 MADD
#define MADD4 NMSUB
#endif
#if defined(RN) || defined(RT) || defined(CN) || defined(CT)
#define XVMADD1 XVFMADD
#define XVMADD2 XVNMSUB
#define XVMADD3 XVFMADD
#define XVMADD4 XVFMADD
#define MADD1 MADD
#define MADD2 NMSUB
#define MADD3 MADD
#define MADD4 MADD
#endif
#if defined(RR) || defined(RC) || defined(CR) || defined(CC)
#define XVMADD1 XVFMADD
#define XVMADD2 XVNMSUB
#define XVMADD3 XVNMSUB
#define XVMADD4 XVNMSUB
#define MADD1 MADD
#define MADD2 NMSUB
#define MADD3 NMSUB
#define MADD4 NMSUB
#endif
PROLOGUE
addi.d $sp, $sp, -128
SDARG $r23, $sp, 0
SDARG $r24, $sp, 8
SDARG $r25, $sp, 16
SDARG $r26, $sp, 24
SDARG $r27, $sp, 32
ST $f23, $sp, 40
ST $f24, $sp, 48
ST $f25, $sp, 56
ST $f26, $sp, 64
ST $f27, $sp, 72
ST $f28, $sp, 80
ST $f29, $sp, 88
ST $f30, $sp, 96
ST $f31, $sp, 104
ST ALPHA_R,$sp, 112
ST ALPHA_I,$sp, 120
xvldrepl.d VALPHAR, $sp, 112
xvldrepl.d VALPHAI, $sp, 120
#if defined (TRMMKERNEL) && !defined(LEFT)
sub.d OFF, $r0, OFFSET
#else
xor OFF, OFF, OFF
#endif
slli.d LDC, LDC, BASE_SHIFT
move J, $r0
srai.d T0, N, 1
beq J, T0, .L19
.L10: /* for(j=0; j<bn/2; j+=1) */
move C0, C
slli.d TL, LDC, 1
add.d C1, C0, TL
move A0, A //ptrba
#if defined(TRMMKERNEL) && defined(LEFT)
move OFF, OFFSET
#endif
move I, $r0
srai.d T0, M, 1
beq I, T0, .L150
.L11: /* for(i=0; i<bm/2; i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x05
add.d A0, A0, C3
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF //temp
#elif defined(LEFT)
addi.d TL, OFF, 2
#else
addi.d TL, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
xvxor.v U0, U0, U0
move L, $r0 //cycle param k
srai.d C2, TL, 2
xvxor.v U1, U1, U1
beq L, C2, .L130
blt C2, L, .L130
.L12: /* for(k=0; k<bk/4; k+=1) */
xvld D0, A0, 0x00 //a0 a1 a2 a3
xvld D1, B0, 0x00 //b0 b1 b2 b3
xvpermi.d D4, D1, 0xa0 //b0 b0 b2 b2
xvpermi.d D5, D1, 0xf5 //b1 b1 b3 b3
xvpermi.d D2, D0, 0x88 //a0 a2 a0 a2
xvpermi.d D3, D0, 0xdd //a1 a3 a1 a3
XVMADD1 U0, D2, D4, U0 //res0 2 4 6
XVMADD2 U1, D3, D4, U1 //res1 3 4 7
XVMADD3 U0, D3, D5, U0
XVMADD4 U1, D2, D5, U1
xvld D0, A0, 0x20 //a0 a1 a2 a3
xvld D1, B0, 0x20 //b0 b1 b2 b3
xvpermi.d D4, D1, 0xa0 //b0 b0 b2 b2
xvpermi.d D5, D1, 0xf5 //b1 b1 b3 b3
xvpermi.d D2, D0, 0x88 //a0 a2 a0 a2
xvpermi.d D3, D0, 0xdd //a1 a3 a1 a3
XVMADD1 U0, D2, D4, U0 //res0 2 4 6
XVMADD2 U1, D3, D4, U1 //res1 3 4 7
XVMADD3 U0, D3, D5, U0
XVMADD4 U1, D2, D5, U1
xvld D0, A0, 0x40 //a0 a1 a2 a3
xvld D1, B0, 0x40 //b0 b1 b2 b3
xvpermi.d D4, D1, 0xa0 //b0 b0 b2 b2
xvpermi.d D5, D1, 0xf5 //b1 b1 b3 b3
xvpermi.d D2, D0, 0x88 //a0 a2 a0 a2
xvpermi.d D3, D0, 0xdd //a1 a3 a1 a3
XVMADD1 U0, D2, D4, U0 //res0 2 4 6
XVMADD2 U1, D3, D4, U1 //res1 3 4 7
XVMADD3 U0, D3, D5, U0
XVMADD4 U1, D2, D5, U1
xvld D0, A0, 0x60 //a0 a1 a2 a3
xvld D1, B0, 0x60 //b0 b1 b2 b3
xvpermi.d D4, D1, 0xa0 //b0 b0 b2 b2
xvpermi.d D5, D1, 0xf5 //b1 b1 b3 b3
xvpermi.d D2, D0, 0x88 //a0 a2 a0 a2
xvpermi.d D3, D0, 0xdd //a1 a3 a1 a3
XVMADD1 U0, D2, D4, U0 //res0 2 4 6
XVMADD2 U1, D3, D4, U1 //res1 3 4 7
XVMADD3 U0, D3, D5, U0
XVMADD4 U1, D2, D5, U1
addi.d A0, A0, 0x80
addi.d B0, B0, 0x80
addi.d L, L, 1
blt L, C2, .L12
.L130:
move L, $r0
andi C2, TL, 3
beq L, C2, .L14
.L13: /* for(k=0; k<(bk&3); k+=1) */
xvld D0, A0, 0x00 //a0 a1 a2 a3
xvld D1, B0, 0x00 //b0 b1 b2 b3
xvpermi.d D4, D1, 0xa0 //b0 b0 b2 b2
xvpermi.d D5, D1, 0xf5 //b1 b1 b3 b3
xvpermi.d D2, D0, 0x88 //a0 a2 a0 a2
xvpermi.d D3, D0, 0xdd //a1 a3 a1 a3
XVMADD1 U0, D2, D4, U0 //res0 2 4 6
XVMADD2 U1, D3, D4, U1 //res1 3 5 7
XVMADD3 U0, D3, D5, U0
XVMADD4 U1, D2, D5, U1
addi.d A0, A0, 0x20
addi.d B0, B0, 0x20
addi.d L, L, 1
blt L, C2, .L13
.L14:
#if defined(TRMMKERNEL)
xvld U8, C0, 0x00 //0 1 2 3
xvld U9, C1, 0x00 //4 5 6 7
xvpackev.d U10, U9, U8
xvpermi.d U10, U10, 0xd8 //0 2 4 6
xvpackod.d U11, U9, U8
xvpermi.d U11, U11, 0xd8 //1 3 5 7
xvfmul.d U10, U0, VALPHAR
xvfmul.d U11, U1, VALPHAR
XVNMSUB U10, U1, VALPHAI, U10
XVFMADD U11, U0, VALPHAI, U11
xvand.v U8, U10, U10 //0 2 4 6
xvpermi.q U8, U11, 0x02 //0 2 1 3
xvpermi.d U8, U8, 0xd8 //0 1 2 3
xvand.v U9, U11, U11 //1 3 5 7
xvpermi.q U9, U10, 0x31 //4 6 5 7
xvpermi.d U9, U9, 0xd8 //4 5 6 7
xvst U8, C0, 0x00
xvst U9, C1, 0x00
#else
xvld U8, C0, 0x00 //0 1 2 3
xvld U9, C1, 0x00 //4 5 6 7
xvpackev.d U10, U9, U8
xvpermi.d U10, U10, 0xd8 //0 2 4 6
xvpackod.d U11, U9, U8
xvpermi.d U11, U11, 0xd8 //1 3 5 7
XVFMADD U10, U0, VALPHAR, U10
XVFMADD U11, U1, VALPHAR, U11
XVNMSUB U10, U1, VALPHAI, U10
XVFMADD U11, U0, VALPHAI, U11
xvand.v U8, U10, U10 //0 2 4 6
xvpermi.q U8, U11, 0x02 //0 2 1 3
xvpermi.d U8, U8, 0xd8 //0 1 2 3
xvand.v U9, U11, U11 //1 3 5 7
xvpermi.q U9, U10, 0x31 //4 6 5 7
xvpermi.d U9, U9, 0xd8 //4 5 6 7
xvst U8, C0, 0x00
xvst U9, C1, 0x00
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -2
#else
addi.d TL, TL, -2
#endif
slli.d C3, TL, 0x05
add.d A0, A0, C3
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x20
addi.d C1, C1, 0x20
addi.d I, I, 1
blt I, T0, .L11
.L150:
move I, $r0
andi T0, M, 1
beq I, T0, .L18
.L15: /* for(i=0; i<(bm&1); i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) ||(!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x04
add.d A0, A0, C3
slli.d C3, OFF, 0x05
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF
#elif defined(LEFT)
addi.d TL, OFF, 1
#else
addi.d TL, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MTC c12, $r0
MTC c21, $r0
MTC c22, $r0
move L, $r0 //cycle param k
beq L, TL, .L17
blt TL, L, .L17
.L16: /* for (k=0; k<bk; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
LD a2, A0, 0x08 //load2
LD b2, B0, 0x08 //load3
LD b3, B0, 0x10 //load4
LD b4, B0, 0x18 //load5
MADD1 c11, a1, b1, c11 //res0
MADD2 c12, a2, b1, c12 //res1
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
MADD1 c21, a1, b3, c21 //res2
MADD2 c22, a2, b3, c22 //res3
MADD3 c21, a2, b4, c21
MADD4 c22, a1, b4, c22
addi.d A0, A0, 0x10
addi.d B0, B0, 0x20
addi.d L, L, 1
blt L, TL, .L16
.L17:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x08
MUL b5, c21, ALPHA_R
MUL b6, c22, ALPHA_I
SUB b5, b5, b6
ST b5, C1, 0x00
MUL b5, c22, ALPHA_R
MUL b6, c21, ALPHA_I
ADD b6, b5, b6
ST b6, C1, 0x08
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x08 //C0[1]
LD b5, C1, 0x00 //C1[0]
LD b6, C1, 0x08 //C1[1]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
MADD b5, c21, ALPHA_R, b5
MADD b6, c22, ALPHA_R, b6
NMSUB b5, c22, ALPHA_I, b5
MADD b6, c21, ALPHA_I, b6
ST a5, C0, 0x00
ST a6, C0, 0x08
ST b5, C1, 0x00
ST b6, C1, 0x08
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -1
#else
addi.d TL, TL, -2
#endif
slli.d C3, TL, 0x04
add.d A0, A0, C3
slli.d C3, TL, 0x05
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x10
addi.d C1, C1, 0x10
addi.d I, I, 1
blt I, T0, .L15
.L18:
#if defined(TRMMKERNEL) && !defined(LEFT)
addi.d OFF, OFF, 2
#endif
slli.d L, K, 0x05
add.d B, B, L
slli.d I, LDC, 0x02
add.d C, C, I
addi.d J, J, 1
srai.d T0, N, 1
blt J, T0, .L10
.L19:
move J, $r0
andi T0, N, 1
beq J, T0, .L30
.L20: /* for (j=0; j<(bn&1); j+=1) */
#if defined(TRMMKERNEL) && defined(LEFT)
move OFF, OFFSET
#endif
move C0, C
move A0, A //ptrba
move I, $r0
srai.d T0, M, 1
beq I, T0, .L24
.L21: /* for (i=0; i<bm/2; i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) ||(!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x05
add.d A0, A0, C3
slli.d C3, OFF, 0x04
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF
#elif defined(LEFT)
addi.d TL, OFF, 2
#else
addi.d TL, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MTC c12, $r0
MTC c21, $r0
MTC c22, $r0
move L, $r0 //cycle param k
beq L, TL, .L23
blt TL, L, .L23
.L22: /* for (k=0; k<bk; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
LD a2, A0, 0x08 //load2
LD b2, B0, 0x08 //load3
LD a3, A0, 0x10 //load4
LD a4, A0, 0x18 //load5
MADD1 c11, a1, b1, c11 //res0
MADD2 c12, a2, b1, c12 //res1
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
MADD1 c21, a3, b1, c21 //res2
MADD2 c22, a4, b1, c22 //res3
MADD3 c21, a4, b2, c21
MADD4 c22, a3, b2, c22
addi.d A0, A0, 0x20
addi.d B0, B0, 0x10
addi.d L, L, 1
blt L, TL, .L22
.L23:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x08
MUL a7, c21, ALPHA_R
MUL a8, c22, ALPHA_I
SUB a7, a7, a8
ST a7, C0, 0x10
MUL a7, c22, ALPHA_R
MUL a8, c21, ALPHA_I
ADD a8, a7, a8
ST a8, C0, 0x18
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x08 //C0[1]
LD a7, C0, 0x10 //C1[2]
LD a8, C0, 0x18 //C1[3]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
MADD a7, c21, ALPHA_R, a7
MADD a8, c22, ALPHA_R, a8
NMSUB a7, c22, ALPHA_I, a7
MADD a8, c21, ALPHA_I, a8
ST a5, C0, 0x00
ST a6, C0, 0x08
ST a7, C0, 0x10
ST a8, C0, 0x18
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -2
#else
addi.d TL, TL, -1
#endif
slli.d C3, TL, 0x05
add.d A0, A0, C3
slli.d C3, TL, 0x04
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 2
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x20
addi.d I, I, 1
blt I, T0, .L21
.L24:
move I, $r0
andi T1, M, 1 //bm&1
beq I, T1, .L28
.L25: /* for (i=0; i<(bm&1); i+=1) */
move B0, B //ptrbb
move TL, K /* TL = bk */
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) ||(!defined(LEFT) && !defined(TRANSA))
move B0, B //ptrbb
#else
slli.d C3, OFF, 0x04
add.d A0, A0, C3
add.d B0, B, C3
#endif
#if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
sub.d TL, K, OFF
#elif defined(LEFT)
addi.d TL, OFF, 1
#else
addi.d TL, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
MTC c11, $r0
MTC c12, $r0
move L, $r0 //cycle param k
beq L, TL, .L27
blt TL, L, .L27
.L26: /* for (k=0; k<bk; k+=1) */
LD a1, A0, 0x00 //load0
LD b1, B0, 0x00 //load1
LD a2, A0, 0x08 //load2
LD b2, B0, 0x08 //load3
MADD1 c11, a1, b1, c11 //res0
MADD2 c12, a2, b1, c12 //res1
MADD3 c11, a2, b2, c11
MADD4 c12, a1, b2, c12
addi.d A0, A0, 0x10
addi.d B0, B0, 0x10
addi.d L, L, 1
blt L, TL, .L26
.L27:
#if defined(TRMMKERNEL)
MUL a5, c11, ALPHA_R
MUL a6, c12, ALPHA_I
SUB a5, a5, a6
ST a5, C0, 0x00
MUL a5, c12, ALPHA_R
MUL a6, c11, ALPHA_I
ADD a6, a5, a6
ST a6, C0, 0x08
#else
LD a5, C0, 0x00 //C0[0]
LD a6, C0, 0x08 //C0[1]
MADD a5, c11, ALPHA_R, a5
MADD a6, c12, ALPHA_R, a6
NMSUB a5, c12, ALPHA_I, a5
MADD a6, c11, ALPHA_I, a6
ST a5, C0, 0x00
ST a6, C0, 0x08
#endif
#if defined(TRMMKERNEL)
#if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
sub.d TL, K, OFF
#ifdef LEFT
addi.d TL, TL, -1
#else
addi.d TL, TL, -1
#endif
slli.d C3, TL, 0x04
add.d A0, A0, C3
add.d B0, B0, C3
#endif
#ifdef LEFT
addi.d OFF, OFF, 1
#endif
#endif // #if defined(TRMMKERNEL)
addi.d C0, C0, 0x10
addi.d I, I, 1
blt I, T1, .L25
.L28:
slli.d L, K, 4
add.d B, B, L
slli.d I, LDC, 1
add.d C, C, I
addi.d J, J, 1
andi T0, N, 1
blt J, T0, .L20
.L30:
LDARG $r23, $sp, 0
LDARG $r24, $sp, 8
LDARG $r25, $sp, 16
LDARG $r26, $sp, 24
LDARG $r27, $sp, 32
LD $f23, $sp, 40
LD $f24, $sp, 48
LD $f25, $sp, 56
LD $f26, $sp, 64
LD $f27, $sp, 72
LD $f28, $sp, 80
LD $f29, $sp, 88
LD $f30, $sp, 96
LD $f31, $sp, 104
addi.d $sp, $sp, 128
jirl $r0, $r1, 0x0
EPILOGUE

View File

@ -0,0 +1,196 @@
/*******************************************************************************
Copyright (c) 2021, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: m
#define N $r5 // param 2: n
#define SRC $r6 // param 3: src
#define LDA $r7 // param 4: lda
#define DST $r8 // param 5: dst
#define I $r9
#define J $r10
#define S1 $r12
#define S2 $r13
#define S3 $r14
#define S4 $r15
#define S5 $r16
#define S6 $r17
#define S7 $r18
#define S8 $r19
#define TD $r20
#define TS $r11
#define TL $r7
#define T0 $r23
#define ZERO $r0
#define F0 $f0
#define F1 $f1
#define F2 $f2
#define F3 $f3
#define F4 $f4
#define F5 $f5
#define F6 $f6
#define F7 $f7
/* LASX vectors */
#define U0 $xr0
#define U1 $xr1
#define U2 $xr2
#define U3 $xr3
#define U4 $xr4
#define U5 $xr5
#define U6 $xr6
#define U7 $xr7
#define D0 $xr8
#define D1 $xr9
#define D2 $xr10
#define D3 $xr11
#define D4 $xr12
#define D5 $xr13
#define D6 $xr14
#define D7 $xr15
#define D8 $xr16
PROLOGUE
addi.d $sp, $sp, -8
SDARG $r23, $sp, 0
move TD, DST //boffset
move TS, SRC //aoffset
slli.d TL, LDA, 0x03 //lda
slli.d TL, TL, 0x01
slli.d T0, TL, 0x01
srai.d I, N, 0x01
beq I, ZERO, .L_N0
.L_J1: /* if (i > 0) I-- */
move S1, TS //a_offset1
add.d S2, TS, TL //a_offset2
srai.d J, M, 0x02
add.d TS, TS, T0
beq J, ZERO, .L_I3
.L_I1: /* if (j > 0) J-- */
xvld U0, S1, 0x00
xvld U1, S1, 0x20
xvld U2, S2, 0x00
xvld U3, S2, 0x20
xvand.v D0, U0, U0
xvand.v D1, U1, U1
xvand.v D2, U2, U2
xvand.v D3, U3, U3
xvpermi.q D0, U2, 0x02
xvpermi.q D2, U0, 0x31
xvpermi.q D1, U3, 0x02
xvpermi.q D3, U1, 0x31
xvst D0, TD, 0x00
xvst D2, TD, 0x20
xvst D1, TD, 0x40
xvst D3, TD, 0x60
addi.d S1, S1, 0x40 // a_offset1
addi.d S2, S2, 0x40
addi.d TD, TD, 0x80 // b_offset
addi.d J, J, -1
blt ZERO, J, .L_I1
.L_I3:
andi J, M, 0x03
beq J, ZERO, .L_II20
.L_II1: /* j = (m & 3) if (j > 0) */
vld $vr0, S1, 0x00
vld $vr1, S2, 0x00
vst $vr0, TD, 0x00
vst $vr1, TD, 0x10
addi.d S1, S1, 0x10
addi.d S2, S2, 0x10
addi.d TD, TD, 0x20
addi.d J, J, -1
blt ZERO, J, .L_II1
.L_II20:
addi.d I, I, -1
blt ZERO, I, .L_J1
.L_N0: /* if(n&1)*/
andi I, N, 0x01
beq ZERO, I, .L_N00
.L_N1:
srai.d J, M, 0x02
beq ZERO, J, .L_N10
.L_N11: /* j = (m >> 2) if (j > 0) */
xvld U0, TS, 0x00
xvld U1, TS, 0x20
xvst U0, TD, 0x00
xvst U1, TD, 0x20
addi.d TS, TS, 0x40 // a_offset
addi.d TD, TD, 0x40 // b_offset
addi.d J, J, -1
blt ZERO, J, .L_N11
.L_N10:
andi J, M, 0x03
beq J, ZERO, .L_N00
.L_N12: /* j = (m & 3) if (j > 0) */
vld $vr0, TS, 0x00
vst $vr0, TD, 0x00
addi.d TS, TS, 0x10 // a_offset
addi.d TD, TD, 0x10 // b_offset
addi.d J, J, -1
blt ZERO, J, .L_N12
.L_N00:
LDARG $r23, $sp, 0
addi.d $sp, $sp, 8
jirl $r0, $r1, 0x00
EPILOGUE

View File

@ -0,0 +1,212 @@
/*******************************************************************************
Copyright (c) 2021, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: m
#define N $r5 // param 2: n
#define SRC $r6 // param 3: src
#define LDA $r7 // param 4: lda
#define DST $r8 // param 5: dst
#define I $r9
#define J $r10
#define S1 $r12
#define S2 $r13
#define S3 $r14
#define S4 $r15
#define TD $r16
#define TS $r17
#define TL $r7
#define T0 $r18
#define S8 $r19
#define S9 $r20
#define S10 $r23
#define ZERO $r0
#define F0 $f0
#define F1 $f1
#define F2 $f2
#define F3 $f3
#define F4 $f4
#define F5 $f5
#define F6 $f6
#define F7 $f7
/* LASX vectors */
#define U0 $xr0
#define U1 $xr1
#define U2 $xr2
#define U3 $xr3
#define U4 $xr4
#define U5 $xr5
#define U6 $xr6
#define U7 $xr7
#define D0 $xr8
#define D1 $xr9
#define D2 $xr10
#define D3 $xr11
#define D4 $xr12
#define D5 $xr13
#define D6 $xr14
#define D7 $xr15
PROLOGUE
addi.d $sp, $sp, -8
SDARG $r23, $sp, 0
move TS, SRC //aoffset
move TD, DST //boffset
slli.d TL, LDA, 0x03 //lda
slli.d TL, TL, 0x01
ori T0, ZERO, 0x01
andn T0, N, T0
mul.d T0, M, T0
slli.d T0, T0, 0x01
slli.d T0, T0, 0x03
add.d S9, DST, T0 //boffset2
srai.d J, M, 0x01 //j
beq J, ZERO, .L_M1
.L_J1: /* if(j>0) j--*/
move S1, TS //aoffset1
slli.d T0, TL, 0x01
add.d S2, S1, TL //aoffset2
add.d TS, TS, T0
move S8, TD //boffset1
addi.d TD, TD, 0x40
srai.d I, N, 0x02
beq ZERO, I, .L_JN1
.L_JI1: /* if(i>0) i--*/
xvld U0, S1, 0x00
xvld U1, S1, 0x20
xvld U2, S2, 0x00
xvld U3, S2, 0x20
xvst U0, S8, 0x00
xvst U2, S8, 0x20
slli.d T0, M, 0x05
add.d S8, S8, T0
xvst U1, S8, 0x00
xvst U3, S8, 0x20
add.d S8, S8, T0
addi.d S1, S1, 0x40
addi.d S2, S2, 0x40
addi.d I, I, -1
blt ZERO, I, .L_JI1
.L_JN1: /* if(n&2) */
andi I, N, 0x02
beq ZERO, I, .L_JN2
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvst U0, S8, 0x00
xvst U1, S8, 0x20
addi.d S1, S1, 0x20
addi.d S2, S2, 0x20
.L_JN2: /* if(n&1) */
andi I, N, 0x01
beq ZERO, I, .L_J0
vld $vr0, S1, 0x00
vld $vr1, S2, 0x00
vst $vr0, S9, 0x00
vst $vr1, S9, 0x10
addi.d S9, S9, 0x20
.L_J0:
addi.d J, J, -1
blt ZERO, J, .L_J1
.L_M1: /* if(m&1) */
andi I, M, 0x01
beq ZERO, I, .L_M0
srai.d I, N, 0x02
beq ZERO, I, .L_M1N1
.L_M1I1: /* if(i>0) */
xvld U0, TS, 0x00
xvld U1, TS, 0x20
xvst U0, TD, 0x00
slli.d T0, M, 0x05
add.d TD, TD, T0
xvst U1, TD, 0x00
add.d TD, TD, T0
addi.d TS, TS, 0x40
addi.d I, I, -1
blt ZERO, I, .L_M1I1
.L_M1N1: /* if(n&2) */
andi I, N, 0x02
beq ZERO, I, .L_M1N2
xvld U0, TS, 0x00
xvst U0, TD, 0x00
addi.d TS, TS, 0x20
.L_M1N2: /* if(n&1) */
andi I, N, 0x01
beq ZERO, I, .L_M0
vld $vr0, TS, 0x00
vst $vr0, S9, 0x00
.L_M0:
LDARG $r23, $sp, 0
addi.d $sp, $sp, 8
jirl $r0, $r1, 0x00
EPILOGUE

View File

@ -2853,13 +2853,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endif
#define QGEMM_DEFAULT_UNROLL_N 2
#define CGEMM_DEFAULT_UNROLL_N 4
#define ZGEMM_DEFAULT_UNROLL_N 4
#define CGEMM_DEFAULT_UNROLL_N 2
#define ZGEMM_DEFAULT_UNROLL_N 2
#define XGEMM_DEFAULT_UNROLL_N 1
#define QGEMM_DEFAULT_UNROLL_M 2
#define CGEMM_DEFAULT_UNROLL_M 1
#define ZGEMM_DEFAULT_UNROLL_M 1
#define CGEMM_DEFAULT_UNROLL_M 2
#define ZGEMM_DEFAULT_UNROLL_M 2
#define XGEMM_DEFAULT_UNROLL_M 1
#define SGEMM_DEFAULT_P 256