loongarch64: Add optimizations for sum and asum.

This commit is contained in:
yancheng 2023-12-07 13:08:03 +08:00 committed by Shiyou Yin
parent d4c96a35a8
commit c80e7e27d1
10 changed files with 1136 additions and 0 deletions

View File

@ -43,4 +43,10 @@ DAXPYKERNEL = daxpy_lsx.S
SAXPBYKERNEL = saxpby_lsx.S
DAXPBYKERNEL = daxpby_lsx.S
SSUMKERNEL = ssum_lsx.S
DSUMKERNEL = dsum_lsx.S
SASUMKERNEL = sasum_lsx.S
DASUMKERNEL = dasum_lsx.S
endif

View File

@ -43,6 +43,12 @@ DAXPYKERNEL = daxpy_lasx.S
SAXPBYKERNEL = saxpby_lasx.S
DAXPBYKERNEL = daxpby_lasx.S
SSUMKERNEL = ssum_lasx.S
DSUMKERNEL = dsum_lasx.S
SASUMKERNEL = sasum_lasx.S
DASUMKERNEL = dasum_lasx.S
DGEMMKERNEL = dgemm_kernel_16x4.S
DGEMMINCOPY = dgemm_ncopy_16.S
DGEMMITCOPY = dgemm_tcopy_16.S

View File

@ -0,0 +1,148 @@
#define ASSEMBLER
#include "common.h"
#define N $r4
#define X $r5
#define INCX $r6
#define I $r17
#define TEMP $r18
#define t1 $r15
#define t2 $r12
#define t3 $r13
#define t4 $r14
#define VX0 $xr12
#define VX1 $xr13
#define VX2 $xr14
#define VX3 $xr15
#define VT0 $xr23
#define VT1 $xr22
#define res1 $xr16
#define res2 $xr17
#define res0 $xr18
#define neg1 $xr19
PROLOGUE
xvxor.v res1, res1, res1
xvxor.v res2, res2, res2
xvxor.v res0, res0, res0
bge $r0, N, .L999
bge $r0, INCX, .L999
li.d t1, -1
xvreplgr2vr.d neg1, t1
xvffint.d.l neg1, neg1
li.d TEMP, SIZE
slli.d INCX, INCX, BASE_SHIFT
srai.d I, N, 3
bne INCX, TEMP, .L20
bge $r0, I, .L13
.align 3
.L11:
xvld VX0, X, 0 * SIZE
xvld VX1, X, 4 * SIZE
xvfmul.d VX2, neg1, VX0
xvfmul.d VX3, neg1, VX1
xvfcmp.clt.d VT0, VX0, res0
xvfcmp.clt.d VT1, VX1, res0
xvbitsel.v VX0, VX0, VX2, VT0
xvbitsel.v VX1, VX1, VX3, VT1
xvfadd.d res2, VX0, VX1
xvfadd.d res1, res1, res2
addi.d X, X, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L11
.align 3
.L12:
xvpickve.d VX1, res1, 1
xvpickve.d VX2, res1, 2
xvpickve.d VX3, res1, 3
xvfadd.d res1, VX1, res1
xvfadd.d res1, VX2, res1
xvfadd.d res1, VX3, res1
.align 3
.L13:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L14:
fld.d $f12, X, 0 * SIZE
fabs.d $f12, $f12
fadd.d $f16, $f12, $f16
addi.d I, I, -1
addi.d X, X, SIZE
blt $r0, I, .L14
b .L999
.align 3
.L20:
bge $r0, I, .L23
.align 3
.L21:
ld.d t1, X, 0 * SIZE
add.d X, X, INCX
ld.d t2, X, 0 * SIZE
add.d X, X, INCX
ld.d t3, X, 0 * SIZE
add.d X, X, INCX
ld.d t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.d VX0, t1, 0
xvinsgr2vr.d VX0, t2, 1
xvinsgr2vr.d VX0, t3, 2
xvinsgr2vr.d VX0, t4, 3
ld.d t1, X, 0 * SIZE
add.d X, X, INCX
ld.d t2, X, 0 * SIZE
add.d X, X, INCX
ld.d t3, X, 0 * SIZE
add.d X, X, INCX
ld.d t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.d VX1, t1, 0
xvinsgr2vr.d VX1, t2, 1
xvinsgr2vr.d VX1, t3, 2
xvinsgr2vr.d VX1, t4, 3
xvfmul.d VX2, neg1, VX0
xvfmul.d VX3, neg1, VX1
xvfcmp.clt.d VT0, VX0, res0
xvfcmp.clt.d VT1, VX1, res0
xvbitsel.v VX0, VX0, VX2, VT0
xvbitsel.v VX1, VX1, VX3, VT1
xvfadd.d res2, VX0, VX1
xvfadd.d res1, res1, res2
addi.d I, I, -1
blt $r0, I, .L21
.align 3
.L22:
xvpickve.d VX1, res1, 1
xvpickve.d VX2, res1, 2
xvpickve.d VX3, res1, 3
xvfadd.d res1, VX1, res1
xvfadd.d res1, VX2, res1
xvfadd.d res1, VX3, res1
.align 3
.L23:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L24:
fld.d $f12, X, 0 * SIZE
fabs.d $f12, $f12
fadd.d $f16, $f12, $f16
addi.d I, I, -1
add.d X, X, INCX
blt $r0, I, .L24
.align 3
.L999:
fmov.d $f0, $f16
jirl $r0, $r1, 0x0
.align 3
EPILOGUE

View File

@ -0,0 +1,158 @@
#define ASSEMBLER
#include "common.h"
#define N $r4
#define X $r5
#define INCX $r6
#define I $r17
#define TEMP $r18
#define t1 $r15
#define t2 $r12
#define t3 $r13
#define t4 $r14
#define VX0 $vr12
#define VX1 $vr13
#define VX2 $vr14
#define VX3 $vr15
#define VT0 $vr23
#define VT1 $vr22
#define res1 $vr16
#define res2 $vr17
#define res0 $vr18
#define neg1 $vr19
PROLOGUE
vxor.v res1, res1, res1
vxor.v res2, res2, res2
vxor.v res0, res0, res0
bge $r0, N, .L999
bge $r0, INCX, .L999
li.d t1, -1
vreplgr2vr.d neg1, t1
vffint.d.l neg1, neg1
li.d TEMP, SIZE
slli.d INCX, INCX, BASE_SHIFT
srai.d I, N, 3
bne INCX, TEMP, .L20
bge $r0, I, .L13
.align 3
.L11:
vld VX0, X, 0 * SIZE
vld VX1, X, 2 * SIZE
vfmul.d VX2, neg1, VX0
vfmul.d VX3, neg1, VX1
vfcmp.clt.d VT0, VX0, res0
vfcmp.clt.d VT1, VX1, res0
vbitsel.v VX0, VX0, VX2, VT0
vbitsel.v VX1, VX1, VX3, VT1
vfadd.d res2, VX0, VX1
vfadd.d res1, res1, res2
vld VX0, X, 4 * SIZE
vld VX1, X, 6 * SIZE
vfmul.d VX2, neg1, VX0
vfmul.d VX3, neg1, VX1
vfcmp.clt.d VT0, VX0, res0
vfcmp.clt.d VT1, VX1, res0
vbitsel.v VX0, VX0, VX2, VT0
vbitsel.v VX1, VX1, VX3, VT1
vfadd.d res2, VX0, VX1
vfadd.d res1, res1, res2
addi.d X, X, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L11
.align 3
.L12:
vreplvei.d VX1, res1, 1
vfadd.d res1, VX1, res1
.align 3
.L13:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L14:
fld.d $f12, X, 0 * SIZE
fabs.d $f12, $f12
fadd.d $f16, $f12, $f16
addi.d I, I, -1
addi.d X, X, SIZE
blt $r0, I, .L14
b .L999
.align 3
.L20:
bge $r0, I, .L23
.align 3
.L21:
ld.d t1, X, 0 * SIZE
add.d X, X, INCX
ld.d t2, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.d VX0, t1, 0
vinsgr2vr.d VX0, t2, 1
ld.d t1, X, 0 * SIZE
add.d X, X, INCX
ld.d t2, X, 0 * SIZE
vinsgr2vr.d VX1, t1, 0
vinsgr2vr.d VX1, t2, 1
add.d X, X, INCX
vfmul.d VX2, neg1, VX0
vfmul.d VX3, neg1, VX1
vfcmp.clt.d VT0, VX0, res0
vfcmp.clt.d VT1, VX1, res0
vbitsel.v VX0, VX0, VX2, VT0
vbitsel.v VX1, VX1, VX3, VT1
vfadd.d res2, VX0, VX1
vfadd.d res1, res1, res2
ld.d t3, X, 0 * SIZE
add.d X, X, INCX
ld.d t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.d VX0, t3, 0
vinsgr2vr.d VX0, t4, 1
ld.d t3, X, 0 * SIZE
add.d X, X, INCX
ld.d t4, X, 0 * SIZE
vinsgr2vr.d VX1, t3, 0
vinsgr2vr.d VX1, t4, 1
add.d X, X, INCX
vfmul.d VX2, neg1, VX0
vfmul.d VX3, neg1, VX1
vfcmp.clt.d VT0, VX0, res0
vfcmp.clt.d VT1, VX1, res0
vbitsel.v VX0, VX0, VX2, VT0
vbitsel.v VX1, VX1, VX3, VT1
vfadd.d res2, VX0, VX1
vfadd.d res1, res1, res2
addi.d I, I, -1
blt $r0, I, .L21
.align 3
.L22:
vreplvei.d VX1, res1, 1
vfadd.d res1, VX1, res1
.align 3
.L23:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L24:
fld.d $f12, X, 0 * SIZE
fabs.d $f12, $f12
fadd.d $f16, $f12, $f16
addi.d I, I, -1
add.d X, X, INCX
blt $r0, I, .L24
.align 3
.L999:
fmov.d $f0, $f16
jirl $r0, $r1, 0x0
.align 3
EPILOGUE

View File

@ -0,0 +1,125 @@
#define ASSEMBLER
#include "common.h"
#define N $r4
#define X $r5
#define INCX $r6
#define I $r17
#define TEMP $r18
#define t1 $r15
#define t2 $r12
#define t3 $r13
#define t4 $r14
#define VX0 $xr12
#define VX1 $xr13
#define VX2 $xr14
#define VX3 $xr15
#define res1 $xr16
#define res2 $xr17
PROLOGUE
xvxor.v res1, res1, res1
xvxor.v res2, res2, res2
bge $r0, N, .L999
bge $r0, INCX, .L999
li.d TEMP, SIZE
slli.d INCX, INCX, BASE_SHIFT
srai.d I, N, 3
bne INCX, TEMP, .L20
bge $r0, I, .L13
.align 3
.L11:
xvld VX0, X, 0 * SIZE
xvld VX1, X, 4 * SIZE
xvfadd.d res2, VX0, VX1
xvfadd.d res1, res1, res2
addi.d X, X, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L11
.align 3
.L12:
xvpickve.d VX1, res1, 1
xvpickve.d VX2, res1, 2
xvpickve.d VX3, res1, 3
xvfadd.d res1, VX1, res1
xvfadd.d res1, VX2, res1
xvfadd.d res1, VX3, res1
.align 3
.L13:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L14:
fld.d $f12, X, 0 * SIZE
addi.d I, I, -1
fadd.d $f16, $f12, $f16
addi.d X, X, SIZE
blt $r0, I, .L14
b .L999
.align 3
.L20:
bge $r0, I, .L23
.align 3
.L21:
ld.d t1, X, 0 * SIZE
add.d X, X, INCX
ld.d t2, X, 0 * SIZE
add.d X, X, INCX
ld.d t3, X, 0 * SIZE
add.d X, X, INCX
ld.d t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.d VX0, t1, 0
xvinsgr2vr.d VX0, t2, 1
xvinsgr2vr.d VX0, t3, 2
xvinsgr2vr.d VX0, t4, 3
ld.d t1, X, 0 * SIZE
add.d X, X, INCX
ld.d t2, X, 0 * SIZE
add.d X, X, INCX
ld.d t3, X, 0 * SIZE
add.d X, X, INCX
ld.d t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.d VX1, t1, 0
xvinsgr2vr.d VX1, t2, 1
xvinsgr2vr.d VX1, t3, 2
xvinsgr2vr.d VX1, t4, 3
xvfadd.d res2, VX0, VX1
xvfadd.d res1, res1, res2
addi.d I, I, -1
blt $r0, I, .L21
.align 3
.L22:
xvpickve.d VX1, res1, 1
xvpickve.d VX2, res1, 2
xvpickve.d VX3, res1, 3
xvfadd.d res1, VX1, res1
xvfadd.d res1, VX2, res1
xvfadd.d res1, VX3, res1
.align 3
.L23:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L24:
fld.d $f12, X, 0 * SIZE
fadd.d $f16, $f12, $f16
addi.d I, I, -1
add.d X, X, INCX
blt $r0, I, .L24
.align 3
.L999:
fmov.d $f0, $f16
jirl $r0, $r1, 0x0
.align 3
EPILOGUE

View File

@ -0,0 +1,123 @@
#define ASSEMBLER
#include "common.h"
#define N $r4
#define X $r5
#define INCX $r6
#define I $r17
#define TEMP $r18
#define t1 $r15
#define t2 $r12
#define t3 $r13
#define t4 $r14
#define VX0 $vr12
#define VX1 $vr13
#define VX2 $vr14
#define VX3 $vr15
#define res1 $vr16
#define res2 $vr17
PROLOGUE
vxor.v res1, res1, res1
vxor.v res2, res2, res2
bge $r0, N, .L999
bge $r0, INCX, .L999
li.d TEMP, SIZE
slli.d INCX, INCX, BASE_SHIFT
srai.d I, N, 3
bne INCX, TEMP, .L20
bge $r0, I, .L13
.align 3
.L11:
vld VX0, X, 0 * SIZE
vld VX1, X, 2 * SIZE
vfadd.d res2, VX0, VX1
vfadd.d res1, res1, res2
vld VX0, X, 4 * SIZE
vld VX1, X, 6 * SIZE
vfadd.d res2, VX0, VX1
vfadd.d res1, res1, res2
addi.d X, X, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L11
.align 3
.L12:
vreplvei.d VX1, res1, 1
vfadd.d res1, VX1, res1
.align 3
.L13:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L14:
fld.d $f12, X, 0 * SIZE
fadd.d $f16, $f12, $f16
addi.d I, I, -1
addi.d X, X, SIZE
blt $r0, I, .L14
b .L999
.align 3
.L20:
bge $r0, I, .L23
.align 3
.L21:
ld.d t1, X, 0 * SIZE
add.d X, X, INCX
ld.d t2, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.d VX0, t1, 0
vinsgr2vr.d VX0, t2, 1
ld.d t1, X, 0 * SIZE
add.d X, X, INCX
ld.d t2, X, 0 * SIZE
vinsgr2vr.d VX1, t1, 0
vinsgr2vr.d VX1, t2, 1
add.d X, X, INCX
vfadd.d res2, VX0, VX1
vfadd.d res1, res1, res2
ld.d t3, X, 0 * SIZE
add.d X, X, INCX
ld.d t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.d VX0, t3, 0
vinsgr2vr.d VX0, t4, 1
ld.d t3, X, 0 * SIZE
add.d X, X, INCX
ld.d t4, X, 0 * SIZE
vinsgr2vr.d VX1, t3, 0
vinsgr2vr.d VX1, t4, 1
add.d X, X, INCX
vfadd.d res2, VX0, VX1
vfadd.d res1, res1, res2
addi.d I, I, -1
blt $r0, I, .L21
.align 3
.L22:
vreplvei.d VX1, res1, 1
vfadd.d res1, VX1, res1
.align 3
.L23:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L24:
fld.d $f12, X, 0 * SIZE
fadd.d $f16, $f12, $f16
addi.d I, I, -1
add.d X, X, INCX
blt $r0, I, .L24
.align 3
.L999:
fmov.d $f0, $f16
jirl $r0, $r1, 0x0
.align 3
EPILOGUE

View File

@ -0,0 +1,157 @@
#define ASSEMBLER
#include "common.h"
#define N $r4
#define X $r5
#define INCX $r6
#define I $r17
#define TEMP $r18
#define t1 $r15
#define t2 $r12
#define t3 $r13
#define t4 $r14
#define VX0 $xr12
#define VX1 $xr13
#define VX2 $xr14
#define VX3 $xr15
#define VT0 $xr23
#define VT1 $xr22
#define res1 $xr16
#define res2 $xr17
#define res0 $xr18
#define neg1 $xr19
PROLOGUE
xvxor.v res1, res1, res1
xvxor.v res2, res2, res2
xvxor.v res0, res0, res0
bge $r0, N, .L999
bge $r0, INCX, .L999
li.w t1, -1
xvreplgr2vr.w neg1, t1
xvffint.s.w neg1, neg1
li.d TEMP, SIZE
slli.d INCX, INCX, BASE_SHIFT
srai.d I, N, 3
bne INCX, TEMP, .L20
bge $r0, I, .L13
.align 3
.L11:
xvld VX0, X, 0 * SIZE
xvfmul.s VX2, neg1, VX0
xvfcmp.clt.s VT0, VX0, res0
xvbitsel.v VX0, VX0, VX2, VT0
xvfadd.s res1, VX0, res1
addi.d X, X, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L11
.align 3
.L12:
xvfadd.s res2, res1, res2
xvpickve.w VX1, res1, 1
xvpickve.w VX2, res1, 2
xvpickve.w VX3, res1, 3
xvfadd.s res1, VX1, res1
xvfadd.s res1, VX2, res1
xvfadd.s res1, VX3, res1
xvpickve.w VX0, res2, 4
xvpickve.w VX1, res2, 5
xvpickve.w VX2, res2, 6
xvpickve.w VX3, res2, 7
xvfadd.s res1, VX0, res1
xvfadd.s res1, VX1, res1
xvfadd.s res1, VX2, res1
xvfadd.s res1, VX2, res1
.align 3
.L13:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L14:
fld.s $f12, X, 0 * SIZE
fabs.s $f12, $f12
fadd.s $f16, $f12, $f16
addi.d I, I, -1
addi.d X, X, SIZE
blt $r0, I, .L14
b .L999
.align 3
.L20:
bge $r0, I, .L23
.align 3
.L21:
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.w VX0, t1, 0
xvinsgr2vr.w VX0, t2, 1
xvinsgr2vr.w VX0, t3, 2
xvinsgr2vr.w VX0, t4, 3
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.w VX0, t1, 4
xvinsgr2vr.w VX0, t2, 5
xvinsgr2vr.w VX0, t3, 6
xvinsgr2vr.w VX0, t4, 7
xvfmul.s VX2, neg1, VX0
xvfcmp.clt.s VT0, VX0, res0
xvbitsel.v VX0, VX0, VX2, VT0
xvfadd.s res1, VX0, res1
addi.d I, I, -1
blt $r0, I, .L21
.align 3
.L22:
xvfadd.s res2, res1, res2
xvpickve.w VX1, res1, 1
xvpickve.w VX2, res1, 2
xvpickve.w VX3, res1, 3
xvfadd.s res1, VX1, res1
xvfadd.s res1, VX2, res1
xvfadd.s res1, VX3, res1
xvpickve.w VX0, res2, 4
xvpickve.w VX1, res2, 5
xvpickve.w VX2, res2, 6
xvpickve.w VX3, res2, 7
xvfadd.s res1, VX0, res1
xvfadd.s res1, VX1, res1
xvfadd.s res1, VX2, res1
xvfadd.s res1, VX2, res1
.align 3
.L23:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L24:
fld.s $f12, X, 0 * SIZE
fabs.s $f12, $f12
fadd.s $f16, $f12, $f16
addi.d I, I, -1
add.d X, X, INCX
blt $r0, I, .L24
.align 3
.L999:
fmov.s $f0, $f16
jirl $r0, $r1, 0x0
.align 3
EPILOGUE

View File

@ -0,0 +1,148 @@
#define ASSEMBLER
#include "common.h"
#define N $r4
#define X $r5
#define INCX $r6
#define I $r17
#define TEMP $r18
#define t1 $r15
#define t2 $r12
#define t3 $r13
#define t4 $r14
#define VX0 $vr12
#define VX1 $vr13
#define VX2 $vr14
#define VX3 $vr15
#define VT0 $vr23
#define VT1 $vr22
#define res1 $vr16
#define res2 $vr17
#define res0 $vr18
#define neg1 $vr19
PROLOGUE
vxor.v res1, res1, res1
vxor.v res2, res2, res2
vxor.v res0, res0, res0
bge $r0, N, .L999
bge $r0, INCX, .L999
li.w t1, -1
vreplgr2vr.w neg1, t1
vffint.s.w neg1, neg1
li.d TEMP, SIZE
slli.d INCX, INCX, BASE_SHIFT
srai.d I, N, 3
bne INCX, TEMP, .L20
bge $r0, I, .L13
.align 3
.L11:
vld VX0, X, 0 * SIZE
vld VX1, X, 4 * SIZE
vfmul.s VX2, neg1, VX0
vfmul.s VX3, neg1, VX1
vfcmp.clt.s VT0, VX0, res0
vfcmp.clt.s VT1, VX1, res0
vbitsel.v VX0, VX0, VX2, VT0
vbitsel.v VX1, VX1, VX3, VT1
vfadd.s res2, VX0, VX1
vfadd.s res1, res1, res2
addi.d X, X, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L11
.align 3
.L12:
vreplvei.w VX1, res1, 1
vreplvei.w VX2, res1, 2
vreplvei.w VX3, res1, 3
vfadd.s res1, VX1, res1
vfadd.s res1, VX2, res1
vfadd.s res1, VX3, res1
.align 3
.L13:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L14:
fld.s $f12, X, 0 * SIZE
fabs.s $f12, $f12
fadd.s $f16, $f12, $f16
addi.d I, I, -1
addi.d X, X, SIZE
blt $r0, I, .L14
b .L999
.align 3
.L20:
bge $r0, I, .L23
.align 3
.L21:
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.w VX0, t1, 0
vinsgr2vr.w VX0, t2, 1
vinsgr2vr.w VX0, t3, 2
vinsgr2vr.w VX0, t4, 3
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.w VX1, t1, 0
vinsgr2vr.w VX1, t2, 1
vinsgr2vr.w VX1, t3, 2
vinsgr2vr.w VX1, t4, 3
vfmul.s VX2, neg1, VX0
vfmul.s VX3, neg1, VX1
vfcmp.clt.s VT0, VX0, res0
vfcmp.clt.s VT1, VX1, res0
vbitsel.v VX0, VX0, VX2, VT0
vbitsel.v VX1, VX1, VX3, VT1
vfadd.s res2, VX0, VX1
vfadd.s res1, res1, res2
addi.d I, I, -1
blt $r0, I, .L21
.align 3
.L22:
vreplvei.w VX1, res1, 1
vreplvei.w VX2, res1, 2
vreplvei.w VX3, res1, 3
vfadd.s res1, VX1, res1
vfadd.s res1, VX2, res1
vfadd.s res1, VX3, res1
.align 3
.L23:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L24:
fld.s $f12, X, 0 * SIZE
fabs.s $f12, $f12
fadd.s $f16, $f12, $f16
addi.d I, I, -1
add.d X, X, INCX
blt $r0, I, .L24
.align 3
.L999:
fmov.s $f0, $f16
jirl $r0, $r1, 0x0
.align 3
EPILOGUE

View File

@ -0,0 +1,140 @@
#define ASSEMBLER
#include "common.h"
#define N $r4
#define X $r5
#define INCX $r6
#define I $r17
#define TEMP $r18
#define t1 $r15
#define t2 $r12
#define t3 $r13
#define t4 $r14
#define VX0 $xr12
#define VX1 $xr13
#define VX2 $xr14
#define VX3 $xr15
#define res1 $xr16
#define res2 $xr17
PROLOGUE
xvxor.v res1, res1, res1
xvxor.v res2, res2, res2
bge $r0, N, .L999
bge $r0, INCX, .L999
li.d TEMP, SIZE
slli.d INCX, INCX, BASE_SHIFT
srai.d I, N, 3
bne INCX, TEMP, .L20
bge $r0, I, .L13
.align 3
.L11:
xvld VX0, X, 0 * SIZE
xvfadd.s res1, VX0, res1
addi.d X, X, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L11
.align 3
.L12:
xvfadd.s res2, res1, res2
xvpickve.w VX1, res1, 1
xvpickve.w VX2, res1, 2
xvpickve.w VX3, res1, 3
xvfadd.s res1, VX1, res1
xvfadd.s res1, VX2, res1
xvfadd.s res1, VX3, res1
xvpickve.w VX0, res2, 4
xvpickve.w VX1, res2, 5
xvpickve.w VX2, res2, 6
xvpickve.w VX3, res2, 7
xvfadd.s res1, VX0, res1
xvfadd.s res1, VX1, res1
xvfadd.s res1, VX2, res1
xvfadd.s res1, VX2, res1
.align 3
.L13:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L14:
fld.s $f12, X, 0 * SIZE
fadd.s $f16, $f12, $f16
addi.d I, I, -1
addi.d X, X, SIZE
blt $r0, I, .L14
b .L999
.align 3
.L20:
bge $r0, I, .L23
.align 3
.L21:
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.w VX0, t1, 0
xvinsgr2vr.w VX0, t2, 1
xvinsgr2vr.w VX0, t3, 2
xvinsgr2vr.w VX0, t4, 3
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
xvinsgr2vr.w VX0, t1, 4
xvinsgr2vr.w VX0, t2, 5
xvinsgr2vr.w VX0, t3, 6
xvinsgr2vr.w VX0, t4, 7
xvfadd.s res1, VX0, res1
addi.d I, I, -1
blt $r0, I, .L21
.align 3
.L22:
xvfadd.s res2, res1, res2
xvpickve.w VX1, res1, 1
xvpickve.w VX2, res1, 2
xvpickve.w VX3, res1, 3
xvfadd.s res1, VX1, res1
xvfadd.s res1, VX2, res1
xvfadd.s res1, VX3, res1
xvpickve.w VX0, res2, 4
xvpickve.w VX1, res2, 5
xvpickve.w VX2, res2, 6
xvpickve.w VX3, res2, 7
xvfadd.s res1, VX0, res1
xvfadd.s res1, VX1, res1
xvfadd.s res1, VX2, res1
xvfadd.s res1, VX2, res1
.align 3
.L23:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L24:
fld.s $f12, X, 0 * SIZE
fadd.s $f16, $f12, $f16
addi.d I, I, -1
add.d X, X, INCX
blt $r0, I, .L24
.align 3
.L999:
fmov.s $f0, $f16
jirl $r0, $r1, 0x0
.align 3
EPILOGUE

View File

@ -0,0 +1,125 @@
#define ASSEMBLER
#include "common.h"
#define N $r4
#define X $r5
#define INCX $r6
#define I $r17
#define TEMP $r18
#define t1 $r15
#define t2 $r12
#define t3 $r13
#define t4 $r14
#define VX0 $vr12
#define VX1 $vr13
#define VX2 $vr14
#define VX3 $vr15
#define res1 $vr16
#define res2 $vr17
PROLOGUE
vxor.v res1, res1, res1
vxor.v res2, res2, res2
bge $r0, N, .L999
bge $r0, INCX, .L999
li.d TEMP, SIZE
slli.d INCX, INCX, BASE_SHIFT
srai.d I, N, 3
bne INCX, TEMP, .L20
bge $r0, I, .L13
.align 3
.L11:
vld VX0, X, 0 * SIZE
vld VX1, X, 4 * SIZE
vfadd.s res2, VX0, VX1
vfadd.s res1, res1, res2
addi.d X, X, 8 * SIZE
addi.d I, I, -1
blt $r0, I, .L11
.align 3
.L12:
vreplvei.w VX1, res1, 1
vreplvei.w VX2, res1, 2
vreplvei.w VX3, res1, 3
vfadd.s res1, VX1, res1
vfadd.s res1, VX2, res1
vfadd.s res1, VX3, res1
.align 3
.L13:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L14:
fld.s $f12, X, 0 * SIZE
fadd.s $f16, $f12, $f16
addi.d I, I, -1
addi.d X, X, SIZE
blt $r0, I, .L14
b .L999
.align 3
.L20:
bge $r0, I, .L23
.align 3
.L21:
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.w VX0, t1, 0
vinsgr2vr.w VX0, t2, 1
vinsgr2vr.w VX0, t3, 2
vinsgr2vr.w VX0, t4, 3
ld.w t1, X, 0 * SIZE
add.d X, X, INCX
ld.w t2, X, 0 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
add.d X, X, INCX
ld.w t4, X, 0 * SIZE
add.d X, X, INCX
vinsgr2vr.w VX1, t1, 0
vinsgr2vr.w VX1, t2, 1
vinsgr2vr.w VX1, t3, 2
vinsgr2vr.w VX1, t4, 3
vfadd.s res2, VX0, VX1
vfadd.s res1, res1, res2
addi.d I, I, -1
blt $r0, I, .L21
.align 3
.L22:
vreplvei.w VX1, res1, 1
vreplvei.w VX2, res1, 2
vreplvei.w VX3, res1, 3
vfadd.s res1, VX1, res1
vfadd.s res1, VX2, res1
vfadd.s res1, VX3, res1
.align 3
.L23:
andi I, N, 7
bge $r0, I, .L999
.align 3
.L24:
fld.s $f12, X, 0 * SIZE
fadd.s $f16, $f12, $f16
addi.d I, I, -1
add.d X, X, INCX
blt $r0, I, .L24
.align 3
.L999:
fmov.s $f0, $f16
jirl $r0, $r1, 0x0
.align 3
EPILOGUE