Merge pull request #3491 from gxw-loongson/develop

loongarch64: Optimize dgemm_kernel
This commit is contained in:
Martin Kroeker 2021-12-22 08:34:12 +01:00 committed by GitHub
commit 253670383f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 6177 additions and 6 deletions

View File

@ -1 +1,14 @@
#TODO: Add loongarch64 SIMD optimizations
DGEMMKERNEL = dgemm_kernel_16x4.S
DGEMMINCOPY = dgemm_ncopy_16.S
DGEMMITCOPY = dgemm_tcopy_16.S
DGEMMONCOPY = dgemm_ncopy_4.S
DGEMMOTCOPY = dgemm_tcopy_4.S
DGEMMINCOPYOBJ = dgemm_incopy.o
DGEMMITCOPYOBJ = dgemm_itcopy.o
DGEMMONCOPYOBJ = dgemm_oncopy.o
DGEMMOTCOPYOBJ = dgemm_otcopy.o
DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,691 @@
/*******************************************************************************
Copyright (c) 2021, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: m
#define N $r5 // param 2: n
#define SRC $r6 // param 3: src
#define LDA $r7 // param 4: lda
#define DST $r8 // param 5: dst
#define I $r9
#define J $r10
#define S1 $r12
#define S2 $r13
#define S3 $r14
#define S4 $r15
#define S5 $r16
#define S6 $r17
#define S7 $r18
#define S8 $r19
#define S9 $r20
#define S10 $r23
#define S11 $r24
#define S12 $r25
#define S13 $r26
#define S14 $r27
#define S15 $r28
#define S16 $r29
#define TD $r30
#define TS $r31
#define TL $r7
#define T0 $r6
#define ZERO $r0
#define F0 $f0
#define F1 $f1
#define F2 $f2
#define F3 $f3
#define F4 $f4
#define F5 $f5
#define F6 $f6
#define F7 $f7
/* LASX vectors */
#define U0 $xr0
#define U1 $xr1
#define U2 $xr2
#define U3 $xr3
#define U4 $xr4
#define U5 $xr5
#define U6 $xr6
#define U7 $xr7
#define U8 $xr8
#define U9 $xr9
#define U10 $xr10
#define U11 $xr11
#define U12 $xr12
#define U13 $xr13
#define U14 $xr14
#define U15 $xr15
#define D0 $xr16
#define D1 $xr17
#define D2 $xr18
#define D3 $xr19
#define D4 $xr20
#define D5 $xr21
#define D6 $xr22
#define D7 $xr23
#define D8 $xr24
#define D9 $xr25
#define D10 $xr26
#define D11 $xr27
#define D12 $xr28
#define D13 $xr29
#define D14 $xr30
#define D15 $xr31
PROLOGUE
addi.d $sp, $sp, -0x90
SDARG $r23, $sp, 0x00
SDARG $r24, $sp, 0x08
SDARG $r25, $sp, 0x10
SDARG $r26, $sp, 0x18
SDARG $r27, $sp, 0x20
SDARG $r28, $sp, 0x28
SDARG $r29, $sp, 0x30
SDARG $r30, $sp, 0x38
SDARG $r31, $sp, 0x40
ST $f23, $sp, 0x48
ST $f24, $sp, 0x50
ST $f25, $sp, 0x58
ST $f26, $sp, 0x60
ST $f27, $sp, 0x68
ST $f28, $sp, 0x70
ST $f29, $sp, 0x78
ST $f30, $sp, 0x80
ST $f31, $sp, 0x88
move TD, DST
move TS, SRC
slli.d TL, LDA, 0x03
slli.d T0, TL, 0x01
srai.d J, N, 0x04
beq J, ZERO, .L_N8
.L_J1: /* J-- */
move S1, TS
add.d S2, TS, TL
srai.d I, M, 0x03
add.d S3, S2, TL
addi.d J, J, -1
add.d S4, S3, TL
add.d S5, S3, T0
add.d S6, S4, T0
add.d S7, S5, T0
add.d S8, S6, T0
add.d S9, S7, T0
add.d S10, S8, T0
add.d S11, S9, T0
add.d S12, S10, T0
add.d S13, S11, T0
add.d S14, S12, T0
add.d S15, S13, T0
add.d S16, S14, T0
add.d TS, S15, T0
beq I, ZERO, .L_I7
.L_I1: /* I-- */
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvld U2, S3, 0x00
xvld U3, S4, 0x00
xvld U4, S5, 0x00
xvld U5, S6, 0x00
xvld U6, S7, 0x00
xvld U7, S8, 0x00
xvld U8, S9, 0x00
xvld U9, S10, 0x00
xvld U10, S11, 0x00
xvld U11, S12, 0x00
xvld U12, S13, 0x00
xvld U13, S14, 0x00
xvld U14, S15, 0x00
xvld U15, S16, 0x00
xvpackev.d D0, U1, U0
xvpackod.d D1, U1, U0
xvpackev.d D2, U3, U2
xvpackod.d D3, U3, U2
xvpackev.d D4, U5, U4
xvpackod.d D5, U5, U4
xvpackev.d D6, U7, U6
xvpackod.d D7, U7, U6
xvpackev.d D8, U9, U8
xvpackod.d D9, U9, U8
xvpackev.d D10, U11, U10
xvpackod.d D11, U11, U10
xvpackev.d D12, U13, U12
xvpackod.d D13, U13, U12
xvpackev.d D14, U15, U14
xvpackod.d D15, U15, U14
xvand.v U0, D0, D0
xvpermi.q D0, D2, 0x02 // 0
xvand.v U4, D4, D4
xvpermi.q D4, D6, 0x02 // 1
xvand.v U1, D1, D1
xvpermi.q D1, D3, 0x02 // 4
xvand.v U5, D5, D5
xvpermi.q D5, D7, 0x02 // 5
xvpermi.q D2, U0, 0x31 // 8
xvpermi.q D6, U4, 0x31 // 9
xvpermi.q D3, U1, 0x31 // 12
xvpermi.q D7, U5, 0x31 // 13
xvand.v U8, D8, D8
xvpermi.q D8, D10, 0x02 // 2
xvand.v U12, D12, D12
xvpermi.q D12, D14, 0x02 // 3
xvand.v U9, D9, D9
xvpermi.q D9, D11, 0x02 // 6
xvand.v U13, D13, D13
xvpermi.q D13, D15, 0x02 // 7
xvpermi.q D10, U8, 0x31 // 10
xvpermi.q D14, U12, 0x31 // 11
xvpermi.q D11, U9, 0x31 // 14
xvpermi.q D15, U13, 0x31 // 15
xvst D0, TD, 0x00 // 0
xvst D4, TD, 0x20 // 1
xvst D8, TD, 0x40 // 2
xvst D12, TD, 0x60 // 3
xvst D1, TD, 0x80 // 4
xvst D5, TD, 0xA0 // 5
xvst D9, TD, 0xC0 // 6
xvst D13, TD, 0xE0 // 7
addi.d TD, TD, 0x100
xvst D2, TD, 0x00 // 8
xvst D6, TD, 0x20 // 9
xvst D10, TD, 0x40 // 10
xvst D14, TD, 0x60 // 11
xvst D3, TD, 0x80 // 12
xvst D7, TD, 0xA0 // 13
xvst D11, TD, 0xC0 // 14
xvst D15, TD, 0xE0 // 15
addi.d TD, TD, 0x100
xvld U0, S1, 0x20
xvld U1, S2, 0x20
xvld U2, S3, 0x20
xvld U3, S4, 0x20
xvld U4, S5, 0x20
xvld U5, S6, 0x20
xvld U6, S7, 0x20
xvld U7, S8, 0x20
xvld U8, S9, 0x20
xvld U9, S10, 0x20
xvld U10, S11, 0x20
xvld U11, S12, 0x20
xvld U12, S13, 0x20
xvld U13, S14, 0x20
xvld U14, S15, 0x20
xvld U15, S16, 0x20
xvpackev.d D0, U1, U0
xvpackod.d D1, U1, U0
xvpackev.d D2, U3, U2
xvpackod.d D3, U3, U2
xvpackev.d D4, U5, U4
xvpackod.d D5, U5, U4
xvpackev.d D6, U7, U6
xvpackod.d D7, U7, U6
xvpackev.d D8, U9, U8
xvpackod.d D9, U9, U8
xvpackev.d D10, U11, U10
xvpackod.d D11, U11, U10
xvpackev.d D12, U13, U12
xvpackod.d D13, U13, U12
xvpackev.d D14, U15, U14
xvpackod.d D15, U15, U14
xvand.v U0, D0, D0
xvpermi.q D0, D2, 0x02 // 0
xvand.v U4, D4, D4
xvpermi.q D4, D6, 0x02 // 1
xvand.v U1, D1, D1
xvpermi.q D1, D3, 0x02 // 4
xvand.v U5, D5, D5
xvpermi.q D5, D7, 0x02 // 5
xvpermi.q D2, U0, 0x31 // 8
xvpermi.q D6, U4, 0x31 // 9
xvpermi.q D3, U1, 0x31 // 12
xvpermi.q D7, U5, 0x31 // 13
xvand.v U8, D8, D8
xvpermi.q D8, D10, 0x02 // 2
xvand.v U12, D12, D12
xvpermi.q D12, D14, 0x02 // 3
xvand.v U9, D9, D9
xvpermi.q D9, D11, 0x02 // 6
xvand.v U13, D13, D13
xvpermi.q D13, D15, 0x02 // 7
xvpermi.q D10, U8, 0x31 // 10
xvpermi.q D14, U12, 0x31 // 11
xvpermi.q D11, U9, 0x31 // 14
xvpermi.q D15, U13, 0x31 // 15
xvst D0, TD, 0x00 // 0
xvst D4, TD, 0x20 // 1
xvst D8, TD, 0x40 // 2
xvst D12, TD, 0x60 // 3
xvst D1, TD, 0x80 // 4
xvst D5, TD, 0xA0 // 5
xvst D9, TD, 0xC0 // 6
xvst D13, TD, 0xE0 // 7
addi.d TD, TD, 0x100
xvst D2, TD, 0x00 // 8
xvst D6, TD, 0x20 // 9
xvst D10, TD, 0x40 // 10
xvst D14, TD, 0x60 // 11
xvst D3, TD, 0x80 // 12
xvst D7, TD, 0xA0 // 13
xvst D11, TD, 0xC0 // 14
xvst D15, TD, 0xE0 // 15
addi.d TD, TD, 0x100
addi.d S1, S1, 0x40
addi.d S2, S2, 0x40
addi.d S3, S3, 0x40
addi.d S4, S4, 0x40
addi.d S5, S5, 0x40
addi.d S6, S6, 0x40
addi.d S7, S7, 0x40
addi.d S8, S8, 0x40
addi.d S9, S9, 0x40
addi.d S10, S10, 0x40
addi.d S11, S11, 0x40
addi.d S12, S12, 0x40
addi.d S13, S13, 0x40
addi.d S14, S14, 0x40
addi.d S15, S15, 0x40
addi.d S16, S16, 0x40
addi.d I, I, -1
blt ZERO, I, .L_I1
.L_I7:
andi I, M, 0x07
beq I, ZERO, .L_I0
.L_II1: /* I-- */
fld.d F0, S1, 0x00
fld.d F1, S2, 0x00
fld.d F2, S3, 0x00
fld.d F3, S4, 0x00
fld.d F4, S5, 0x00
fld.d F5, S6, 0x00
fld.d F6, S7, 0x00
fld.d F7, S8, 0x00
fst.d F0, TD, 0x00
addi.d S1, S1, 0x08
fst.d F1, TD, 0x08
addi.d S2, S2, 0x08
fst.d F2, TD, 0x10
addi.d S3, S3, 0x08
fst.d F3, TD, 0x18
addi.d S4, S4, 0x08
fst.d F4, TD, 0x20
addi.d S5, S5, 0x08
fst.d F5, TD, 0x28
addi.d S6, S6, 0x08
fst.d F6, TD, 0x30
addi.d S7, S7, 0x08
fst.d F7, TD, 0x38
addi.d S8, S8, 0x08
addi.d TD, TD, 0x40
fld.d F0, S9, 0x00
fld.d F1, S10, 0x00
fld.d F2, S11, 0x00
fld.d F3, S12, 0x00
fld.d F4, S13, 0x00
fld.d F5, S14, 0x00
fld.d F6, S15, 0x00
fld.d F7, S16, 0x00
fst.d F0, TD, 0x00
addi.d S9, S9, 0x08
fst.d F1, TD, 0x08
addi.d S10, S10, 0x08
fst.d F2, TD, 0x10
addi.d S11, S11, 0x08
fst.d F3, TD, 0x18
addi.d S12, S12, 0x08
fst.d F4, TD, 0x20
addi.d S13, S13, 0x08
fst.d F5, TD, 0x28
addi.d S14, S14, 0x08
fst.d F6, TD, 0x30
addi.d S15, S15, 0x08
fst.d F7, TD, 0x38
addi.d S16, S16, 0x08
addi.d TD, TD, 0x40
addi.d I, I, -1
blt ZERO, I, .L_II1
.L_I0:
blt ZERO, J, .L_J1
.L_N8:
andi J, N, 0x08
beq ZERO, J, .L_N4
move S1, TS
add.d S2, TS, TL
srai.d I, M, 0x03
add.d S3, S2, TL
add.d S4, S2, T0
add.d S5, S3, T0
add.d S6, S4, T0
add.d S7, S5, T0
add.d S8, S6, T0
add.d TS, S7, T0
beq I, ZERO, .L_8I3
.L_8I1: /* I-- */
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvld U2, S3, 0x00
xvld U3, S4, 0x00
xvld U4, S5, 0x00
xvld U5, S6, 0x00
xvld U6, S7, 0x00
xvld U7, S8, 0x00
xvpackev.d D0, U1, U0
xvpackod.d D1, U1, U0
xvpackev.d D2, U3, U2
xvpackod.d D3, U3, U2
xvpackev.d D4, U5, U4
xvpackod.d D5, U5, U4
xvpackev.d D6, U7, U6
xvpackod.d D7, U7, U6
xvand.v U0, D0, D0
xvpermi.q D0, D2, 0x02 // 0
xvand.v U4, D4, D4
xvpermi.q D4, D6, 0x02 // 1
xvand.v U1, D1, D1
xvpermi.q D1, D3, 0x02 // 2
xvand.v U5, D5, D5
xvpermi.q D5, D7, 0x02 // 3
xvpermi.q D2, U0, 0x31 // 4
xvpermi.q D6, U4, 0x31 // 5
xvpermi.q D3, U1, 0x31 // 6
xvpermi.q D7, U5, 0x31 // 7
xvst D0, TD, 0x00
xvst D4, TD, 0x20
xvst D1, TD, 0x40
xvst D5, TD, 0x60
xvst D2, TD, 0x80
xvst D6, TD, 0xA0
xvst D3, TD, 0xC0
xvst D7, TD, 0xE0
addi.d TD, TD, 0x100
xvld U0, S1, 0x20
xvld U1, S2, 0x20
xvld U2, S3, 0x20
xvld U3, S4, 0x20
xvld U4, S5, 0x20
xvld U5, S6, 0x20
xvld U6, S7, 0x20
xvld U7, S8, 0x20
xvpackev.d D0, U1, U0
xvpackod.d D1, U1, U0
xvpackev.d D2, U3, U2
xvpackod.d D3, U3, U2
xvpackev.d D4, U5, U4
xvpackod.d D5, U5, U4
xvpackev.d D6, U7, U6
xvpackod.d D7, U7, U6
xvand.v U0, D0, D0
xvpermi.q D0, D2, 0x02 // 0
xvand.v U4, D4, D4
xvpermi.q D4, D6, 0x02 // 1
xvand.v U1, D1, D1
xvpermi.q D1, D3, 0x02 // 2
xvand.v U5, D5, D5
xvpermi.q D5, D7, 0x02 // 3
xvpermi.q D2, U0, 0x31 // 4
xvpermi.q D6, U4, 0x31 // 5
xvpermi.q D3, U1, 0x31 // 6
xvpermi.q D7, U5, 0x31 // 7
xvst D0, TD, 0x00
xvst D4, TD, 0x20
xvst D1, TD, 0x40
xvst D5, TD, 0x60
xvst D2, TD, 0x80
xvst D6, TD, 0xA0
xvst D3, TD, 0xC0
xvst D7, TD, 0xE0
addi.d TD, TD, 0x100
addi.d S1, S1, 0x40
addi.d S2, S2, 0x40
addi.d S3, S3, 0x40
addi.d S4, S4, 0x40
addi.d S5, S5, 0x40
addi.d S6, S6, 0x40
addi.d S7, S7, 0x40
addi.d S8, S8, 0x40
addi.d I, I, -1
blt ZERO, I, .L_8I1
.L_8I3:
andi I, M, 0x07
beq I, ZERO, .L_N4
.L_8I11:
fld.d F0, S1, 0x00
fld.d F1, S2, 0x00
fld.d F2, S3, 0x00
fld.d F3, S4, 0x00
fld.d F4, S5, 0x00
fld.d F5, S6, 0x00
fld.d F6, S7, 0x00
fld.d F7, S8, 0x00
fst.d F0, TD, 0x00
addi.d S1, S1, 0x08
fst.d F1, TD, 0x08
addi.d S2, S2, 0x08
fst.d F2, TD, 0x10
addi.d S3, S3, 0x08
fst.d F3, TD, 0x18
addi.d S4, S4, 0x08
fst.d F4, TD, 0x20
addi.d S5, S5, 0x08
fst.d F5, TD, 0x28
addi.d S6, S6, 0x08
fst.d F6, TD, 0x30
addi.d S7, S7, 0x08
fst.d F7, TD, 0x38
addi.d S8, S8, 0x08
addi.d TD, TD, 0x40
addi.d I, I, -1
blt ZERO, I, .L_8I11
.L_N4:
andi J, N, 0x04
beq ZERO, J, .L_N2
move S1, TS
add.d S2, TS, TL
srai.d I, M, 0x02
add.d S3, S2, TL
add.d S4, S2, T0
add.d TS, S3, T0
beq I, ZERO, .L_I3
.L_4I1: /* I-- */
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvld U2, S3, 0x00
xvld U3, S4, 0x00
xvpackev.d D0, U1, U0
xvpackod.d D1, U1, U0
xvpackev.d D2, U3, U2
xvpackod.d D3, U3, U2
xvand.v U0, D0, D0
xvpermi.q D0, D2, 0x02 // 0
xvand.v U1, D1, D1
xvpermi.q D1, D3, 0x02 // 1
xvpermi.q D2, U0, 0x31 // 2
xvpermi.q D3, U1, 0x31 // 3
xvst D0, TD, 0x00
xvst D1, TD, 0x20
xvst D2, TD, 0x40
xvst D3, TD, 0x60
addi.d S1, S1, 0x20
addi.d S2, S2, 0x20
addi.d S3, S3, 0x20
addi.d S4, S4, 0x20
addi.d TD, TD, 0x80
addi.d I, I, -1
blt ZERO, I, .L_4I1
.L_I3:
andi I, M, 0x03
beq I, ZERO, .L_N2
.L_4II1:
fld.d F0, S1, 0x00
fld.d F1, S2, 0x00
fld.d F2, S3, 0x00
fld.d F3, S4, 0x00
fst.d F0, TD, 0x00
addi.d S1, S1, 0x08
fst.d F1, TD, 0x08
addi.d S2, S2, 0x08
fst.d F2, TD, 0x10
addi.d S3, S3, 0x08
fst.d F3, TD, 0x18
addi.d S4, S4, 0x08
addi.d TD, TD, 0x20
addi.d I, I, -1
blt ZERO, I, .L_4II1
.L_N2:
andi J, N, 0x02
beq ZERO, J, .L_N1
move S1, TS
add.d S2, TS, TL
srai.d I, M, 0x01
add.d TS, S2, TL
beq I, ZERO, .L_NI1
.L_2I1: /* I-- */
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvpackev.d D0, U1, U0
xvpackod.d D1, U1, U0
xvpermi.q D0, D1, 0x02 // 0
xvst D0, TD, 0x00
addi.d S1, S1, 0x10
addi.d S2, S2, 0x10
addi.d TD, TD, 0x20
addi.d I, I, -1
blt ZERO, I, .L_2I1
.L_NI1:
andi I, M, 0x01
beq I, ZERO, .L_N1
fld.d F0, S1, 0x00
fld.d F1, S2, 0x00
fst.d F0, TD, 0x00
addi.d S1, S1, 0x08
fst.d F1, TD, 0x08
addi.d S2, S2, 0x08
addi.d TD, TD, 0x10
.L_N1:
move S1, TS
beq ZERO, M, .L_N0
.L_M1:
fld.d F0, S1, 0x00
addi.d S1, S1, 0x08
fst.d F0, TD, 0x00
addi.d TD, TD, 0x08
addi.d M, M, -1
blt ZERO, M, .L_M1
.L_N0:
LDARG $r23, $sp, 0x00
LDARG $r24, $sp, 0x08
LDARG $r25, $sp, 0x10
LDARG $r26, $sp, 0x18
LDARG $r27, $sp, 0x20
LDARG $r28, $sp, 0x28
LDARG $r29, $sp, 0x30
LDARG $r30, $sp, 0x38
LDARG $r31, $sp, 0x40
LD $f23, $sp, 0x48
LD $f24, $sp, 0x50
LD $f25, $sp, 0x58
LD $f26, $sp, 0x60
LD $f27, $sp, 0x68
LD $f28, $sp, 0x70
LD $f29, $sp, 0x78
LD $f30, $sp, 0x80
LD $f31, $sp, 0x88
addi.d $sp, $sp, 0x90
jirl $r0, $r1, 0x00
EPILOGUE

View File

@ -0,0 +1,237 @@
/*******************************************************************************
Copyright (c) 2021, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: m
#define N $r5 // param 2: n
#define SRC $r6 // param 3: src
#define LDA $r7 // param 4: lda
#define DST $r8 // param 5: dst
#define I $r9
#define J $r10
#define S1 $r12
#define S2 $r13
#define S3 $r14
#define S4 $r15
#define S5 $r16
#define S6 $r17
#define S7 $r18
#define S8 $r19
#define TD $r20
#define TS $r11
#define TL $r7
#define T0 $r23
#define ZERO $r0
#define F0 $f0
#define F1 $f1
#define F2 $f2
#define F3 $f3
#define F4 $f4
#define F5 $f5
#define F6 $f6
#define F7 $f7
/* LASX vectors */
#define U0 $xr0
#define U1 $xr1
#define U2 $xr2
#define U3 $xr3
#define U4 $xr4
#define U5 $xr5
#define U6 $xr6
#define U7 $xr7
#define D0 $xr14
#define D1 $xr8
#define D2 $xr9
#define D3 $xr10
#define D4 $xr11
#define D5 $xr12
#define D6 $xr13
#define D7 $xr15
PROLOGUE
addi.d $sp, $sp, -8
SDARG $r23, $sp, 0
move TD, DST
move TS, SRC
slli.d TL, LDA, 0x03
slli.d T0, TL, 0x01
srai.d J, N, 0x02
beq J, ZERO, .L_N2
.L_J1: /* J-- */
move S1, TS
add.d S2, TS, TL
srai.d I, M, 0x02
add.d S3, S2, TL
add.d S4, S2, T0
add.d TS, S3, T0
addi.d J, J, -1
beq I, ZERO, .L_I3
.L_I1: /* I-- */
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvld U2, S3, 0x00
xvld U3, S4, 0x00
xvpackev.d D0, U1, U0
xvpackod.d D1, U1, U0
xvpackev.d D2, U3, U2
xvpackod.d D3, U3, U2
xvand.v U0, D0, D0
xvpermi.q D0, D2, 0x02 // 0
xvand.v U1, D1, D1
xvpermi.q D1, D3, 0x02 // 1
xvpermi.q D2, U0, 0x31 // 2
xvpermi.q D3, U1, 0x31 // 3
xvst D0, TD, 0x00
xvst D1, TD, 0x20
xvst D2, TD, 0x40
xvst D3, TD, 0x60
addi.d S1, S1, 0x20
addi.d S2, S2, 0x20
addi.d S3, S3, 0x20
addi.d S4, S4, 0x20
addi.d TD, TD, 0x80
addi.d I, I, -1
blt ZERO, I, .L_I1
.L_I3:
andi I, M, 0x03
beq I, ZERO, .L_I0
.L_II1:
fld.d F0, S1, 0x00
fld.d F1, S2, 0x00
fld.d F2, S3, 0x00
fld.d F3, S4, 0x00
fst.d F0, TD, 0x00
addi.d S1, S1, 0x08
fst.d F1, TD, 0x08
addi.d S2, S2, 0x08
fst.d F2, TD, 0x10
addi.d S3, S3, 0x08
fst.d F3, TD, 0x18
addi.d S4, S4, 0x08
addi.d TD, TD, 0x20
addi.d I, I, -1
blt ZERO, I, .L_II1
.L_I0:
blt ZERO, J, .L_J1
.L_N2:
andi J, N, 0x02
beq ZERO, J, .L_N1
move S1, TS
add.d S2, TS, TL
srai.d I, M, 0x02
add.d TS, S2, TL
beq I, ZERO, .L_2I3
.L_2I1: /* I-- */
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvpackev.d D0, U1, U0
xvpackod.d D1, U1, U0
xvand.v U0, D0, D0
xvpermi.q D0, D1, 0x02 // 0
xvpermi.q D1, U0, 0x31 // 1
xvst D0, TD, 0x00
xvst D1, TD, 0x20
addi.d S1, S1, 0x20
addi.d S2, S2, 0x20
addi.d TD, TD, 0x40
addi.d I, I, -1
blt ZERO, I, .L_2I1
.L_2I3:
andi I, M, 0x03
beq ZERO, I, .L_N1
.L_2II1: /* I-- */
fld.d F0, S1, 0x00
fld.d F1, S2, 0x00
fst.d F0, TD, 0x00
addi.d I, I, -1
fst.d F1, TD, 0x08
addi.d S1, S1, 0x08
addi.d S2, S2, 0x08
addi.d TD, TD, 0x10
blt ZERO, I, .L_2II1
.L_N1:
andi J, N, 0x01
beq ZERO, J, .L_N0
move S1, TS
srai.d I, M, 0x02
beq ZERO, I, .L_1I3
.L_1I1:
xvld U0, S1, 0x00
addi.d S1, S1, 0x20
xvst U0, TD, 0x00
addi.d I, I, -1
addi.d TD, TD, 0x20
blt ZERO, I, .L_1I1
.L_1I3:
andi I, M, 0x03
beq ZERO, I, .L_N0
.L_1II1:
fld.d F0, S1, 0x00
addi.d S1, S1, 0x08
fst.d F0, TD, 0x00
addi.d I, I, -1
addi.d TD, TD, 0x08
blt ZERO, I, .L_1II1
.L_N0:
LDARG $r23, $sp, 0
addi.d $sp, $sp, 8
jirl $r0, $r1, 0x00
EPILOGUE

View File

@ -0,0 +1,710 @@
/*******************************************************************************
Copyright (c) 2021, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: m
#define N $r5 // param 2: n
#define SRC $r6 // param 3: src
#define LDA $r7 // param 4: lda
#define DST $r8 // param 5: dst
#define I $r9
#define J $r10
#define S0 $r11
#define S1 $r12
#define S2 $r13
#define S3 $r14
#define S4 $r15
#define S5 $r16
#define S6 $r17
#define S7 $r18
#define S8 $r19
#define P0 $r20
#define P1 $r23
#define P2 $r24
#define P3 $r25
#define P4 $r26
#define P5 $r27
#define T0 $r28
#define T1 $r29
#define TL $r7
#define ZERO $r0
#define F0 $f0
#define F1 $f1
#define F2 $f2
#define F3 $f3
#define F4 $f4
#define F5 $f5
#define F6 $f6
#define F7 $f7
/* LASX vectors */
#define U0 $xr0
#define U1 $xr1
#define U2 $xr2
#define U3 $xr3
#define U4 $xr4
#define U5 $xr5
#define U6 $xr6
#define U7 $xr7
PROLOGUE
addi.d $sp, $sp, -56
SDARG $r23, $sp, 0
SDARG $r24, $sp, 8
SDARG $r25, $sp, 16
SDARG $r26, $sp, 24
SDARG $r27, $sp, 32
SDARG $r28, $sp, 40
SDARG $r29, $sp, 48
move S0, SRC
move P0, DST
srai.d T0, N, 0x04
srai.d T1, N, 0x03
slli.d T0, T0, 0x04
slli.d T1, T1, 0x03
mul.d P2, M, T0
mul.d P3, M, T1
slli.d P2, P2, 0x03
slli.d P3, P3, 0x03
add.d P2, DST, P2
add.d P3, DST, P3
srai.d T0, N, 0x02
srai.d T1, N, 0x01
slli.d T0, T0, 0x02
slli.d T1, T1, 0x01
mul.d P4, M, T0
mul.d P5, M, T1
slli.d P4, P4, 0x03
slli.d P5, P5, 0x03
add.d P4, DST, P4
add.d P5, DST, P5
slli.d TL, LDA, 0x03
srai.d J, M, 0x03
slli.d T0, TL, 0x01
slli.d T1, M, 0x07
beq ZERO, J, .L_M7
.L_J1: /* J-- */
move S1, S0
add.d S2, S0, TL
add.d S3, S1, T0
add.d S4, S2, T0
add.d S5, S3, T0
add.d S6, S4, T0
add.d S7, S5, T0
add.d S8, S6, T0
add.d S0, S7, T0
move P1, P0
addi.d P0, P0, 0x400
srai.d I, N, 0x04
addi.d J, J, -1
beq ZERO, I, .L_N15
.L_I1: /* I-- */
xvld U0, S1, 0x00
xvld U1, S1, 0x20
xvld U2, S1, 0x40
xvld U3, S1, 0x60
xvld U4, S2, 0x00
xvld U5, S2, 0x20
xvld U6, S2, 0x40
xvld U7, S2, 0x60
xvst U0, P1, 0x00
xvst U1, P1, 0x20
xvst U2, P1, 0x40
xvst U3, P1, 0x60
xvst U4, P1, 0x80
xvst U5, P1, 0xA0
xvst U6, P1, 0xC0
xvst U7, P1, 0xE0
xvld U0, S3, 0x00
xvld U1, S3, 0x20
xvld U2, S3, 0x40
xvld U3, S3, 0x60
xvld U4, S4, 0x00
xvld U5, S4, 0x20
xvld U6, S4, 0x40
xvld U7, S4, 0x60
xvst U0, P1, 0x100
xvst U1, P1, 0x120
xvst U2, P1, 0x140
xvst U3, P1, 0x160
xvst U4, P1, 0x180
xvst U5, P1, 0x1A0
xvst U6, P1, 0x1C0
xvst U7, P1, 0x1E0
xvld U0, S5, 0x00
xvld U1, S5, 0x20
xvld U2, S5, 0x40
xvld U3, S5, 0x60
xvld U4, S6, 0x00
xvld U5, S6, 0x20
xvld U6, S6, 0x40
xvld U7, S6, 0x60
xvst U0, P1, 0x200
xvst U1, P1, 0x220
xvst U2, P1, 0x240
xvst U3, P1, 0x260
xvst U4, P1, 0x280
xvst U5, P1, 0x2A0
xvst U6, P1, 0x2C0
xvst U7, P1, 0x2E0
xvld U0, S7, 0x00
xvld U1, S7, 0x20
xvld U2, S7, 0x40
xvld U3, S7, 0x60
xvld U4, S8, 0x00
xvld U5, S8, 0x20
xvld U6, S8, 0x40
xvld U7, S8, 0x60
xvst U0, P1, 0x300
xvst U1, P1, 0x320
xvst U2, P1, 0x340
xvst U3, P1, 0x360
xvst U4, P1, 0x380
xvst U5, P1, 0x3A0
xvst U6, P1, 0x3C0
xvst U7, P1, 0x3E0
addi.d S1, S1, 0x80
addi.d S2, S2, 0x80
addi.d S3, S3, 0x80
addi.d S4, S4, 0x80
addi.d S5, S5, 0x80
addi.d S6, S6, 0x80
addi.d S7, S7, 0x80
addi.d S8, S8, 0x80
addi.d I, I, -1
add.d P1, P1, T1
blt ZERO, I, .L_I1
.L_N15:
andi I, N, 0x08
beq ZERO, I, .L_N7
xvld U0, S1, 0x00
xvld U1, S1, 0x20
xvld U2, S2, 0x00
xvld U3, S2, 0x20
xvld U4, S3, 0x00
xvld U5, S3, 0x20
xvld U6, S4, 0x00
xvld U7, S4, 0x20
xvst U0, P2, 0x00
xvst U1, P2, 0x20
xvst U2, P2, 0x40
xvst U3, P2, 0x60
xvst U4, P2, 0x80
xvst U5, P2, 0xA0
xvst U6, P2, 0xC0
xvst U7, P2, 0xE0
xvld U0, S5, 0x00
xvld U1, S5, 0x20
xvld U2, S6, 0x00
xvld U3, S6, 0x20
xvld U4, S7, 0x00
xvld U5, S7, 0x20
xvld U6, S8, 0x00
xvld U7, S8, 0x20
xvst U0, P2, 0x100
xvst U1, P2, 0x120
xvst U2, P2, 0x140
xvst U3, P2, 0x160
xvst U4, P2, 0x180
xvst U5, P2, 0x1A0
xvst U6, P2, 0x1C0
xvst U7, P2, 0x1E0
addi.d S1, S1, 0x40
addi.d S2, S2, 0x40
addi.d S3, S3, 0x40
addi.d S4, S4, 0x40
addi.d S5, S5, 0x40
addi.d S6, S6, 0x40
addi.d S7, S7, 0x40
addi.d S8, S8, 0x40
addi.d P2, P2, 0x200
.L_N7:
andi I, N, 0x04
beq ZERO, I, .L_N3
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvld U2, S3, 0x00
xvld U3, S4, 0x00
xvld U4, S5, 0x00
xvld U5, S6, 0x00
xvld U6, S7, 0x00
xvld U7, S8, 0x00
xvst U0, P3, 0x00
xvst U1, P3, 0x20
xvst U2, P3, 0x40
xvst U3, P3, 0x60
xvst U4, P3, 0x80
xvst U5, P3, 0xA0
xvst U6, P3, 0xC0
xvst U7, P3, 0xE0
addi.d S1, S1, 0x20
addi.d S2, S2, 0x20
addi.d S3, S3, 0x20
addi.d S4, S4, 0x20
addi.d S5, S5, 0x20
addi.d S6, S6, 0x20
addi.d S7, S7, 0x20
addi.d S8, S8, 0x20
addi.d P3, P3, 0x100
.L_N3:
andi I, N, 0x02
beq ZERO, I, .L_N1
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvld U2, S3, 0x00
xvld U3, S4, 0x00
xvld U4, S5, 0x00
xvld U5, S6, 0x00
xvld U6, S7, 0x00
xvld U7, S8, 0x00
xvpermi.q U0, U1, 0x02
xvpermi.q U2, U3, 0x02
xvpermi.q U4, U5, 0x02
xvpermi.q U6, U7, 0x02
xvst U0, P4, 0x00
xvst U2, P4, 0x20
xvst U4, P4, 0x40
xvst U6, P4, 0x60
addi.d S1, S1, 0x10
addi.d S2, S2, 0x10
addi.d S3, S3, 0x10
addi.d S4, S4, 0x10
addi.d S5, S5, 0x10
addi.d S6, S6, 0x10
addi.d S7, S7, 0x10
addi.d S8, S8, 0x10
addi.d P4, P4, 0x80
.L_N1:
andi I, N, 0x01
beq ZERO, I, .L_N0
fld.d F0, S1, 0x00
fld.d F1, S2, 0x00
fld.d F2, S3, 0x00
fld.d F3, S4, 0x00
fld.d F4, S5, 0x00
fld.d F5, S6, 0x00
fld.d F6, S7, 0x00
fld.d F7, S8, 0x00
fst.d F0, P5, 0x00
fst.d F1, P5, 0x08
fst.d F2, P5, 0x10
fst.d F3, P5, 0x18
fst.d F4, P5, 0x20
fst.d F5, P5, 0x28
fst.d F6, P5, 0x30
fst.d F7, P5, 0x38
addi.d S1, S1, 0x08
addi.d S2, S2, 0x08
addi.d S3, S3, 0x08
addi.d S4, S4, 0x08
addi.d S5, S5, 0x08
addi.d S6, S6, 0x08
addi.d S7, S7, 0x08
addi.d S8, S8, 0x08
addi.d P5, P5, 0x40
.L_N0:
blt ZERO, J, .L_J1
.L_M7:
andi J, M, 0x04
beq ZERO, J, .L_M3
move S1, S0
add.d S2, S0, TL
add.d S3, S1, T0
add.d S4, S2, T0
add.d S0, S3, T0
move P1, P0
addi.d P0, P0, 0x200
srai.d I, N, 0x04
beq ZERO, I, .L_4N15
.L_4I1: /* I-- */
xvld U0, S1, 0x00
xvld U1, S1, 0x20
xvld U2, S1, 0x40
xvld U3, S1, 0x60
xvld U4, S2, 0x00
xvld U5, S2, 0x20
xvld U6, S2, 0x40
xvld U7, S2, 0x60
xvst U0, P1, 0x00
xvst U1, P1, 0x20
xvst U2, P1, 0x40
xvst U3, P1, 0x60
xvst U4, P1, 0x80
xvst U5, P1, 0xA0
xvst U6, P1, 0xC0
xvst U7, P1, 0xE0
xvld U0, S3, 0x00
xvld U1, S3, 0x20
xvld U2, S3, 0x40
xvld U3, S3, 0x60
xvld U4, S4, 0x00
xvld U5, S4, 0x20
xvld U6, S4, 0x40
xvld U7, S4, 0x60
xvst U0, P1, 0x100
xvst U1, P1, 0x120
xvst U2, P1, 0x140
xvst U3, P1, 0x160
xvst U4, P1, 0x180
xvst U5, P1, 0x1A0
xvst U6, P1, 0x1C0
xvst U7, P1, 0x1E0
addi.d S1, S1, 0x80
addi.d S2, S2, 0x80
addi.d S3, S3, 0x80
addi.d S4, S4, 0x80
addi.d I, I, -1
add.d P1, P1, T1
blt ZERO, I, .L_4I1
.L_4N15:
andi I, N, 0x08
beq ZERO, I, .L_4N7
xvld U0, S1, 0x00
xvld U1, S1, 0x20
xvld U2, S2, 0x00
xvld U3, S2, 0x20
xvld U4, S3, 0x00
xvld U5, S3, 0x20
xvld U6, S4, 0x00
xvld U7, S4, 0x20
xvst U0, P2, 0x00
xvst U1, P2, 0x20
xvst U2, P2, 0x40
xvst U3, P2, 0x60
xvst U4, P2, 0x80
xvst U5, P2, 0xA0
xvst U6, P2, 0xC0
xvst U7, P2, 0xE0
addi.d S1, S1, 0x40
addi.d S2, S2, 0x40
addi.d S3, S3, 0x40
addi.d S4, S4, 0x40
addi.d P2, P2, 0x100
.L_4N7:
andi I, N, 0x04
beq ZERO, I, .L_4N3
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvld U2, S3, 0x00
xvld U3, S4, 0x00
xvst U0, P3, 0x00
xvst U1, P3, 0x20
xvst U2, P3, 0x40
xvst U3, P3, 0x60
addi.d S1, S1, 0x20
addi.d S2, S2, 0x20
addi.d S3, S3, 0x20
addi.d S4, S4, 0x20
addi.d P3, P3, 0x80
.L_4N3:
andi I, N, 0x02
beq ZERO, I, .L_4N1
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvld U2, S3, 0x00
xvld U3, S4, 0x00
xvpermi.q U0, U1, 0x02
xvpermi.q U2, U3, 0x02
xvst U0, P4, 0x00
xvst U2, P4, 0x20
addi.d S1, S1, 0x10
addi.d S2, S2, 0x10
addi.d S3, S3, 0x10
addi.d S4, S4, 0x10
addi.d P4, P4, 0x40
.L_4N1:
andi I, N, 0x01
beq ZERO, I, .L_M3
fld.d F0, S1, 0x00
fld.d F1, S2, 0x00
fld.d F2, S3, 0x00
fld.d F3, S4, 0x00
fst.d F0, P5, 0x00
fst.d F1, P5, 0x08
fst.d F2, P5, 0x10
fst.d F3, P5, 0x18
addi.d S1, S1, 0x08
addi.d S2, S2, 0x08
addi.d S3, S3, 0x08
addi.d S4, S4, 0x08
addi.d P5, P5, 0x20
.L_M3:
andi J, M, 0x02
beq ZERO, J, .L_M1
move S1, S0
add.d S2, S0, TL
add.d S0, S0, T0
move P1, P0
addi.d P0, P0, 0x100
srai.d I, N, 0x04
beq ZERO, I, .L_2N15
.L_2I1: /* I-- */
xvld U0, S1, 0x00
xvld U1, S1, 0x20
xvld U2, S1, 0x40
xvld U3, S1, 0x60
xvld U4, S2, 0x00
xvld U5, S2, 0x20
xvld U6, S2, 0x40
xvld U7, S2, 0x60
xvst U0, P1, 0x00
xvst U1, P1, 0x20
xvst U2, P1, 0x40
xvst U3, P1, 0x60
xvst U4, P1, 0x80
xvst U5, P1, 0xA0
xvst U6, P1, 0xC0
xvst U7, P1, 0xE0
addi.d S1, S1, 0x80
addi.d S2, S2, 0x80
addi.d I, I, -1
add.d P1, P1, T1
blt ZERO, I, .L_2I1
.L_2N15:
andi I, N, 0x08
beq ZERO, I, .L_2N7
xvld U0, S1, 0x00
xvld U1, S1, 0x20
xvld U2, S2, 0x00
xvld U3, S2, 0x20
xvst U0, P2, 0x00
xvst U1, P2, 0x20
xvst U2, P2, 0x40
xvst U3, P2, 0x60
addi.d S1, S1, 0x40
addi.d S2, S2, 0x40
addi.d P2, P2, 0x80
.L_2N7:
andi I, N, 0x04
beq ZERO, I, .L_2N3
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvst U0, P3, 0x00
xvst U1, P3, 0x20
addi.d S1, S1, 0x20
addi.d S2, S2, 0x20
addi.d P3, P3, 0x40
.L_2N3:
andi I, N, 0x02
beq ZERO, I, .L_2N1
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvpermi.q U0, U1, 0x02
xvst U0, P4, 0x00
addi.d S1, S1, 0x10
addi.d S2, S2, 0x10
addi.d P4, P4, 0x20
.L_2N1:
andi I, N, 0x01
beq ZERO, I, .L_M1
fld.d F0, S1, 0x00
fld.d F1, S2, 0x00
fst.d F0, P5, 0x00
fst.d F1, P5, 0x08
addi.d S1, S1, 0x08
addi.d S2, S2, 0x08
addi.d P5, P5, 0x10
.L_M1:
andi J, M, 0x01
beq ZERO, J, .L_M0
move S1, S0
add.d S2, S0, TL
move P1, P0
addi.d P0, P0, 0x80
srai.d I, N, 0x04
beq ZERO, I, .L_1N15
.L_1I1: /* I-- */
xvld U0, S1, 0x00
xvld U1, S1, 0x20
xvld U2, S1, 0x40
xvld U3, S1, 0x60
xvst U0, P1, 0x00
xvst U1, P1, 0x20
xvst U2, P1, 0x40
xvst U3, P1, 0x60
addi.d S1, S1, 0x80
addi.d I, I, -1
add.d P1, P1, T1
blt ZERO, I, .L_1I1
.L_1N15:
andi I, N, 0x08
beq ZERO, I, .L_1N7
xvld U0, S1, 0x00
xvld U1, S1, 0x20
xvst U0, P2, 0x00
xvst U1, P2, 0x20
addi.d S1, S1, 0x40
addi.d P2, P2, 0x40
.L_1N7:
andi I, N, 0x04
beq ZERO, I, .L_1N3
xvld U0, S1, 0x00
xvst U0, P3, 0x00
addi.d S1, S1, 0x20
addi.d P3, P3, 0x20
.L_1N3:
andi I, N, 0x02
beq ZERO, I, .L_1N1
fld.d F0, S1, 0x00
fld.d F1, S1, 0x08
fst.d F0, P4, 0x00
fst.d F1, P4, 0x08
addi.d S1, S1, 0x10
addi.d P4, P4, 0x10
.L_1N1:
andi I, N, 0x01
beq ZERO, I, .L_M0
fld.d F0, S1, 0x00
fst.d F0, P5, 0x00
addi.d S1, S1, 0x08
addi.d P5, P5, 0x08
.L_M0:
LDARG $r23, $sp, 0
LDARG $r24, $sp, 8
LDARG $r25, $sp, 16
LDARG $r26, $sp, 24
LDARG $r27, $sp, 32
LDARG $r28, $sp, 40
LDARG $r29, $sp, 48
addi.d $sp, $sp, 56
jirl $r0, $r1, 0x00
EPILOGUE

View File

@ -0,0 +1,270 @@
/*******************************************************************************
Copyright (c) 2021, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* Function parameters */
#define M $r4 // param 1: m
#define N $r5 // param 2: n
#define SRC $r6 // param 3: src
#define LDA $r7 // param 4: lda
#define DST $r8 // param 5: dst
#define I $r9
#define J $r10
#define S0 $r11
#define S1 $r12
#define S2 $r13
#define S3 $r14
#define S4 $r15
#define P0 $r16
#define P1 $r17
#define P2 $r18
#define P3 $r19
#define T0 $r20
#define T1 $r23
#define TL $r7
#define ZERO $r0
#define F0 $f0
#define F1 $f1
#define F2 $f2
#define F3 $f3
/* LASX vectors */
#define U0 $xr0
#define U1 $xr1
#define U2 $xr2
#define U3 $xr3
PROLOGUE
addi.d $sp, $sp, -8
SDARG $r23, $sp, 0
move S0, SRC
move P0, DST
srai.d T0, N, 0x02
slli.d T0, T0, 0x02
srai.d T1, N, 0x01
slli.d T1, T1, 0x01
mul.d T0, M, T0
mul.d T1, M, T1
slli.d T0, T0, 0x03
slli.d T1, T1, 0x03
add.d P2, DST, T0
add.d P3, DST, T1
slli.d TL, LDA, 0x03
srai.d J, M, 0x02
slli.d T0, TL, 0x01
slli.d T1, M, 0x05
beq ZERO, J, .L_M3
.L_J1: /* J-- */
move S1, S0
add.d S2, S0, TL
add.d S3, S1, T0
add.d S4, S2, T0
add.d S0, S3, T0
move P1, P0
addi.d P0, P0, 0x80
srai.d I, N, 0x02
addi.d J, J, -1
beq ZERO, I, .L_N3
.L_I1: /* I-- */
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvld U2, S3, 0x00
xvld U3, S4, 0x00
xvst U0, P1, 0x00
xvst U1, P1, 0x20
xvst U2, P1, 0x40
xvst U3, P1, 0x60
addi.d S1, S1, 0x20
addi.d S2, S2, 0x20
addi.d S3, S3, 0x20
addi.d S4, S4, 0x20
add.d P1, P1, T1
addi.d I, I, -1
blt ZERO, I, .L_I1
.L_N3:
andi I, N, 0x02
beq ZERO, I, .L_N1
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvld U2, S3, 0x00
xvld U3, S4, 0x00
xvpermi.q U0, U1, 0x02
xvpermi.q U2, U3, 0x02
xvst U0, P2, 0x00
xvst U2, P2, 0x20
addi.d S1, S1, 0x10
addi.d S2, S2, 0x10
addi.d S3, S3, 0x10
addi.d S4, S4, 0x10
addi.d P2, P2, 0x40
.L_N1:
andi I, N, 0x01
beq ZERO, I, .L_N0
fld.d F0, S1, 0x00
fld.d F1, S2, 0x00
fld.d F2, S3, 0x00
fld.d F3, S4, 0x00
fst.d F0, P3, 0x00
fst.d F1, P3, 0x08
fst.d F2, P3, 0x10
fst.d F3, P3, 0x18
addi.d S1, S1, 0x08
addi.d S2, S2, 0x08
addi.d S3, S3, 0x08
addi.d S4, S4, 0x08
addi.d P3, P3, 0x20
.L_N0:
blt ZERO, J, .L_J1
.L_M3:
andi J, M, 0x02
beq ZERO, J, .L_M1
move S1, S0
add.d S2, S0, TL
add.d S0, S0, T0
move P1, P0
addi.d P0, P0, 0x40
srai.d I, N, 0x02
beq ZERO, I, .L_2N3
.L_2I1: /* I-- */
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvst U0, P1, 0x00
xvst U1, P1, 0x20
addi.d S1, S1, 0x20
addi.d S2, S2, 0x20
addi.d I, I, -1
add.d P1, P1, T1
blt ZERO, I, .L_2I1
.L_2N3:
andi I, N, 0x02
beq ZERO, I, .L_2N1
xvld U0, S1, 0x00
xvld U1, S2, 0x00
xvpermi.q U0, U1, 0x02
xvst U0, P2, 0x00
addi.d S1, S1, 0x10
addi.d S2, S2, 0x10
addi.d P2, P2, 0x20
.L_2N1:
addi.d I, N, 0x01
beq ZERO, I, .L_M1
fld.d F0, S1, 0x00
fld.d F1, S2, 0x00
fst.d F0, P3, 0x00
fst.d F1, P3, 0x08
addi.d S1, S1, 0x08
addi.d S2, S2, 0x08
addi.d P3, P3, 0x10
.L_M1:
andi J, M, 0x01
beq ZERO, J, .L_M0
move S1, S0
move P1, P0
srai.d I, N, 0x02
beq ZERO, I, .L_1N3
.L_1I1:
xvld U0, S1, 0x00
xvst U0, P1, 0x00
addi.d S1, S1, 0x20
addi.d I, I, -1
add.d P1, P1, T1
blt ZERO, I, .L_1I1
.L_1N3:
andi I, N, 0x02
beq I, ZERO, .L_1N1
fld.d F0, S1, 0x00
fld.d F1, S1, 0x08
fst.d F0, P2, 0x00
fst.d F1, P2, 0x08
addi.d S1, S1, 0x10
addi.d P2, P2, 0x10
.L_1N1:
andi I, N, 0x01
beq I, ZERO, .L_M0
fld.d F0, S1, 0x00
fst.d F0, P3, 0x00
.L_M0:
LDARG $r23, $sp, 0
addi.d $sp, $sp, 8
jirl $r0, $r1, 0x00
EPILOGUE

10
param.h
View File

@ -2852,35 +2852,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define GEMM_DEFAULT_ALIGN 0x0ffffUL
#define SGEMM_DEFAULT_UNROLL_N 8
#define DGEMM_DEFAULT_UNROLL_N 8
#define DGEMM_DEFAULT_UNROLL_N 4
#define QGEMM_DEFAULT_UNROLL_N 2
#define CGEMM_DEFAULT_UNROLL_N 4
#define ZGEMM_DEFAULT_UNROLL_N 4
#define XGEMM_DEFAULT_UNROLL_N 1
#define SGEMM_DEFAULT_UNROLL_M 2
#define DGEMM_DEFAULT_UNROLL_M 2
#define DGEMM_DEFAULT_UNROLL_M 16
#define QGEMM_DEFAULT_UNROLL_M 2
#define CGEMM_DEFAULT_UNROLL_M 1
#define ZGEMM_DEFAULT_UNROLL_M 1
#define XGEMM_DEFAULT_UNROLL_M 1
#define SGEMM_DEFAULT_P sgemm_p
#define DGEMM_DEFAULT_P dgemm_p
#define DGEMM_DEFAULT_P 32
#define QGEMM_DEFAULT_P qgemm_p
#define CGEMM_DEFAULT_P cgemm_p
#define ZGEMM_DEFAULT_P zgemm_p
#define XGEMM_DEFAULT_P xgemm_p
#define SGEMM_DEFAULT_R sgemm_r
#define DGEMM_DEFAULT_R dgemm_r
#define DGEMM_DEFAULT_R 858
#define QGEMM_DEFAULT_R qgemm_r
#define CGEMM_DEFAULT_R cgemm_r
#define ZGEMM_DEFAULT_R zgemm_r
#define XGEMM_DEFAULT_R xgemm_r
#define SGEMM_DEFAULT_Q 128
#define DGEMM_DEFAULT_Q 128
#define DGEMM_DEFAULT_Q 152
#define QGEMM_DEFAULT_Q 128
#define CGEMM_DEFAULT_Q 128
#define ZGEMM_DEFAULT_Q 128